Compare commits

...

24 Commits

Author SHA1 Message Date
Siddharth Ganesan
c14c614e33 Consolidation 2026-01-27 12:55:27 -08:00
Siddharth Ganesan
415acda403 Allow run from block for triggers 2026-01-27 12:50:16 -08:00
Siddharth Ganesan
8dc45e6e7e Fix 2026-01-27 12:32:18 -08:00
Siddharth Ganesan
7a0aaa460d Clean up 2026-01-27 12:30:46 -08:00
Siddharth Ganesan
2c333bfd98 Lint 2026-01-27 12:25:27 -08:00
Siddharth Ganesan
23ab11a40d Run u ntil block 2026-01-27 12:13:09 -08:00
Siddharth Ganesan
6e541949ec Change ordering 2026-01-27 11:37:36 -08:00
Siddharth Ganesan
3231955a07 Fix loop l ogs 2026-01-27 11:36:09 -08:00
Siddharth Ganesan
d38fb29e05 Fix trace spans 2026-01-27 11:21:42 -08:00
Siddharth Ganesan
5c1e620831 Fix 2026-01-27 11:03:34 -08:00
Siddharth Ganesan
72594df766 Minor improvements 2026-01-27 11:03:13 -08:00
Siddharth Ganesan
be95a7dbd8 Fix 2026-01-27 10:33:31 -08:00
Siddharth Ganesan
da5d4ac9d5 Fix 2026-01-26 17:16:35 -08:00
Siddharth Ganesan
e8534bea7a Fixes 2026-01-26 16:40:14 -08:00
Siddharth Ganesan
3d0b810a8e Run from block 2026-01-26 16:19:41 -08:00
Waleed
ebf2852733 fix(copilot): reliable zoom to changed blocks after diff applied (#3011) 2026-01-26 13:54:01 -08:00
Waleed
12495ef89c feat(ci): auto-create github releases and add workflow permissions (#3009) 2026-01-26 13:28:59 -08:00
Waleed
d8d85fccf0 feat(helm): add branding configmap for custom assets (#3008) 2026-01-26 13:19:23 -08:00
Waleed
56bc809c6f fix(docs): separate local and blob asset resolution for quick-reference (#3007)
* fix(docs): separate local and blob asset resolution for quick-reference

ActionImage now uses local paths directly for PNGs while ActionVideo
uses blob storage with proper path normalization (strips static/ prefix).

Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>

* refactor(docs): simplify asset resolution by using correct paths directly

Remove path normalization logic from action-media component. Instead,
use the appropriate paths in MDX:
- PNGs: /static/quick-reference/... (local)
- MP4s: quick-reference/... (blob via getAssetUrl)

---------

Co-authored-by: Claude Opus 4.5 <noreply@anthropic.com>
2026-01-26 13:07:11 -08:00
Vikhyath Mondreti
c7bd48573a fix(codegen): function prologue resolution edge cases (#3005)
* fix(codegen): function prologue resolution edge cases

* remove hacky fallback

* case insensitive lookup

* fix python nan and inf resolution

* remove template literal check

* fix tests

* consolidate literal gen
2026-01-26 10:16:13 -08:00
Waleed
80f00479a3 improvement(docs): added images and videos to quick references (#3004)
* improvement(docs): added images and videos to quick references

* moved mp4s to blob, completed quick reference guide
2026-01-25 23:31:40 -08:00
Vikhyath Mondreti
c140e90559 fix(multi-trigger): resolution paths for triggers (#3002)
* fix(multi-trigger): resolution paths for triggers

* fix trigger input format version

* fix output condition logic

* update type guard:

* fix
2026-01-25 23:20:42 -08:00
Vikhyath Mondreti
d83c418111 fix(supabase): storage upload + add basic mode version (#2996)
* fix(supabase): storage upload + add basic mode version

* fix subblock update

* remove redundant check in a2a

* add check consistently for baseline diff
2026-01-25 14:19:30 -08:00
Waleed
be2a9ef0f8 fix(storage): support Azure connection string for presigned URLs (#2997)
* fix(docs): update requirements to be more accurate for deploying the app

* updated kb to support 1536 dimension vectors for models other than text embedding 3 small

* fix(storage): support Azure connection string for presigned URLs

* fix(kb): update test for embedding dimensions parameter

* fix(storage): align credential source ordering for consistency
2026-01-25 13:06:12 -08:00
98 changed files with 3098 additions and 740 deletions

View File

@@ -44,7 +44,7 @@ services:
deploy:
resources:
limits:
memory: 4G
memory: 1G
environment:
- NODE_ENV=development
- DATABASE_URL=postgresql://postgres:postgres@db:5432/simstudio

View File

@@ -10,6 +10,9 @@ concurrency:
group: ci-${{ github.ref }}
cancel-in-progress: false
permissions:
contents: read
jobs:
test-build:
name: Test and Build
@@ -278,3 +281,30 @@ jobs:
if: needs.check-docs-changes.outputs.docs_changed == 'true'
uses: ./.github/workflows/docs-embeddings.yml
secrets: inherit
# Create GitHub Release (only for version commits on main, after all builds complete)
create-release:
name: Create GitHub Release
runs-on: blacksmith-4vcpu-ubuntu-2404
needs: [create-ghcr-manifests, detect-version]
if: needs.detect-version.outputs.is_release == 'true'
permissions:
contents: write
steps:
- name: Checkout code
uses: actions/checkout@v4
with:
fetch-depth: 0
- name: Setup Bun
uses: oven-sh/setup-bun@v2
with:
bun-version: latest
- name: Install dependencies
run: bun install --frozen-lockfile
- name: Create release
env:
GH_PAT: ${{ secrets.GITHUB_TOKEN }}
run: bun run scripts/create-single-release.ts ${{ needs.detect-version.outputs.version }}

View File

@@ -4,6 +4,9 @@ on:
workflow_call:
workflow_dispatch: # Allow manual triggering
permissions:
contents: read
jobs:
process-docs-embeddings:
name: Process Documentation Embeddings

View File

@@ -4,6 +4,9 @@ on:
workflow_call:
workflow_dispatch:
permissions:
contents: read
jobs:
migrate:
name: Apply Database Migrations

View File

@@ -6,6 +6,9 @@ on:
paths:
- 'packages/cli/**'
permissions:
contents: read
jobs:
publish-npm:
runs-on: blacksmith-4vcpu-ubuntu-2404

View File

@@ -6,6 +6,9 @@ on:
paths:
- 'packages/python-sdk/**'
permissions:
contents: write
jobs:
publish-pypi:
runs-on: blacksmith-4vcpu-ubuntu-2404

View File

@@ -6,6 +6,9 @@ on:
paths:
- 'packages/ts-sdk/**'
permissions:
contents: write
jobs:
publish-npm:
runs-on: blacksmith-4vcpu-ubuntu-2404

View File

@@ -4,6 +4,9 @@ on:
workflow_call:
workflow_dispatch:
permissions:
contents: read
jobs:
test-build:
name: Test and Build

View File

@@ -0,0 +1,38 @@
'use client'
import { getAssetUrl } from '@/lib/utils'
interface ActionImageProps {
src: string
alt: string
}
interface ActionVideoProps {
src: string
alt: string
}
export function ActionImage({ src, alt }: ActionImageProps) {
return (
<img
src={src}
alt={alt}
className='inline-block w-full max-w-[200px] rounded border border-neutral-200 dark:border-neutral-700'
/>
)
}
export function ActionVideo({ src, alt }: ActionVideoProps) {
const resolvedSrc = getAssetUrl(src)
return (
<video
src={resolvedSrc}
autoPlay
loop
muted
playsInline
className='inline-block w-full max-w-[200px] rounded border border-neutral-200 dark:border-neutral-700'
/>
)
}

View File

@@ -10,12 +10,20 @@ Stellen Sie Sim auf Ihrer eigenen Infrastruktur mit Docker oder Kubernetes berei
## Anforderungen
| Ressource | Minimum | Empfohlen |
|----------|---------|-------------|
| CPU | 2 Kerne | 4+ Kerne |
| RAM | 12 GB | 16+ GB |
| Speicher | 20 GB SSD | 50+ GB SSD |
| Docker | 20.10+ | Neueste Version |
| Ressource | Klein | Standard | Produktion |
|----------|-------|----------|------------|
| CPU | 2 Kerne | 4 Kerne | 8+ Kerne |
| RAM | 12 GB | 16 GB | 32+ GB |
| Speicher | 20 GB SSD | 50 GB SSD | 100+ GB SSD |
| Docker | 20.10+ | 20.10+ | Neueste Version |
**Klein**: Entwicklung, Tests, Einzelnutzer (1-5 Nutzer)
**Standard**: Teams (5-50 Nutzer), moderate Arbeitslasten
**Produktion**: Große Teams (50+ Nutzer), Hochverfügbarkeit, intensive Workflow-Ausführung
<Callout type="info">
Die Ressourcenanforderungen werden durch Workflow-Ausführung (isolated-vm Sandboxing), Dateiverarbeitung (In-Memory-Dokumentenparsing) und Vektoroperationen (pgvector) bestimmt. Arbeitsspeicher ist typischerweise der limitierende Faktor, nicht CPU. Produktionsdaten zeigen, dass die Hauptanwendung durchschnittlich 4-8 GB und bei hoher Last bis zu 12 GB benötigt.
</Callout>
## Schnellstart

View File

@@ -4,6 +4,7 @@ description: Essential actions for navigating and using the Sim workflow editor
---
import { Callout } from 'fumadocs-ui/components/callout'
import { ActionImage, ActionVideo } from '@/components/ui/action-media'
A quick lookup for everyday actions in the Sim workflow editor. For keyboard shortcuts, see [Keyboard Shortcuts](/keyboard-shortcuts).
@@ -13,124 +14,362 @@ A quick lookup for everyday actions in the Sim workflow editor. For keyboard sho
## Workspaces
| Action | How |
|--------|-----|
| Create a workspace | Click workspace dropdown in sidebar → **New Workspace** |
| Rename a workspace | Workspace settings → Edit name |
| Switch workspaces | Click workspace dropdown in sidebar → Select workspace |
| Invite team members | Workspace settings → **Team** → **Invite** |
<table>
<thead>
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
</thead>
<tbody>
<tr>
<td>Create a workspace</td>
<td>Click workspace dropdown → **New Workspace**</td>
<td><ActionVideo src="quick-reference/create-workspace.mp4" alt="Create workspace" /></td>
</tr>
<tr>
<td>Switch workspaces</td>
<td>Click workspace dropdown → Select workspace</td>
<td><ActionVideo src="quick-reference/switch-workspace.mp4" alt="Switch workspaces" /></td>
</tr>
<tr>
<td>Invite team members</td>
<td>Sidebar → **Invite**</td>
<td><ActionVideo src="quick-reference/invite.mp4" alt="Invite team members" /></td>
</tr>
<tr>
<td>Rename a workspace</td>
<td>Right-click workspace → **Rename**</td>
<td rowSpan={4}><ActionImage src="/static/quick-reference/workspace-context-menu.png" alt="Workspace context menu" /></td>
</tr>
<tr>
<td>Duplicate a workspace</td>
<td>Right-click workspace → **Duplicate**</td>
</tr>
<tr>
<td>Export a workspace</td>
<td>Right-click workspace → **Export**</td>
</tr>
<tr>
<td>Delete a workspace</td>
<td>Right-click workspace → **Delete**</td>
</tr>
</tbody>
</table>
## Workflows
| Action | How |
|--------|-----|
| Create a workflow | Click **New Workflow** button or `Mod+Shift+A` |
| Rename a workflow | Double-click workflow name in sidebar, or right-click → **Rename** |
| Duplicate a workflow | Right-click workflow → **Duplicate** |
| Reorder workflows | Drag workflow up/down in the sidebar list |
| Import a workflow | Sidebar menu → **Import** → Select file |
| Create a folder | Right-click in sidebar → **New Folder** |
| Rename a folder | Right-click folder → **Rename** |
| Delete a folder | Right-click folder → **Delete** |
| Collapse/expand folder | Click folder arrow, or double-click folder |
| Move workflow to folder | Drag workflow onto folder in sidebar |
| Delete a workflow | Right-click workflow → **Delete** |
| Export a workflow | Right-click workflow → **Export** |
| Assign workflow color | Right-click workflow → **Change Color** |
| Multi-select workflows | `Mod+Click` or `Shift+Click` workflows in sidebar |
| Open in new tab | Right-click workflow → **Open in New Tab** |
<table>
<thead>
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
</thead>
<tbody>
<tr>
<td>Create a workflow</td>
<td>Click **+** button in sidebar</td>
<td><ActionImage src="/static/quick-reference/create-workflow.png" alt="Create workflow" /></td>
</tr>
<tr>
<td>Reorder / move workflows</td>
<td>Drag workflow up/down or onto a folder</td>
<td><ActionVideo src="quick-reference/reordering.mp4" alt="Reorder workflows" /></td>
</tr>
<tr>
<td>Import a workflow</td>
<td>Click import button in sidebar → Select file</td>
<td><ActionImage src="/static/quick-reference/import-workflow.png" alt="Import workflow" /></td>
</tr>
<tr>
<td>Multi-select workflows</td>
<td>`Mod+Click` or `Shift+Click` workflows in sidebar</td>
<td><ActionVideo src="quick-reference/multiselect.mp4" alt="Multi-select workflows" /></td>
</tr>
<tr>
<td>Open in new tab</td>
<td>Right-click workflow → **Open in New Tab**</td>
<td rowSpan={6}><ActionImage src="/static/quick-reference/workflow-context-menu.png" alt="Workflow context menu" /></td>
</tr>
<tr>
<td>Rename a workflow</td>
<td>Right-click workflow → **Rename**</td>
</tr>
<tr>
<td>Assign workflow color</td>
<td>Right-click workflow → **Change Color**</td>
</tr>
<tr>
<td>Duplicate a workflow</td>
<td>Right-click workflow → **Duplicate**</td>
</tr>
<tr>
<td>Export a workflow</td>
<td>Right-click workflow → **Export**</td>
</tr>
<tr>
<td>Delete a workflow</td>
<td>Right-click workflow → **Delete**</td>
</tr>
<tr>
<td>Rename a folder</td>
<td>Right-click folder → **Rename**</td>
<td rowSpan={6}><ActionImage src="/static/quick-reference/folder-context-menu.png" alt="Folder context menu" /></td>
</tr>
<tr>
<td>Create workflow in folder</td>
<td>Right-click folder → **Create workflow**</td>
</tr>
<tr>
<td>Create folder in folder</td>
<td>Right-click folder → **Create folder**</td>
</tr>
<tr>
<td>Duplicate a folder</td>
<td>Right-click folder → **Duplicate**</td>
</tr>
<tr>
<td>Export a folder</td>
<td>Right-click folder → **Export**</td>
</tr>
<tr>
<td>Delete a folder</td>
<td>Right-click folder → **Delete**</td>
</tr>
</tbody>
</table>
## Blocks
| Action | How |
|--------|-----|
| Add a block | Drag from Toolbar panel, or right-click canvas → **Add Block** |
| Select a block | Click on the block |
| Multi-select blocks | `Mod+Click` additional blocks, or right-drag to draw selection box |
| Move blocks | Drag selected block(s) to new position |
| Copy blocks | `Mod+C` with blocks selected |
| Paste blocks | `Mod+V` to paste copied blocks |
| Duplicate blocks | Right-click → **Duplicate** |
| Delete blocks | `Delete` or `Backspace` key, or right-click → **Delete** |
| Rename a block | Click block name in header, or edit in the Editor panel |
| Enable/Disable a block | Right-click → **Enable/Disable** |
| Toggle handle orientation | Right-click → **Toggle Handles** |
| Toggle trigger mode | Right-click trigger block → **Toggle Trigger Mode** |
| Configure a block | Select block → use Editor panel on right |
<table>
<thead>
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
</thead>
<tbody>
<tr>
<td>Add a block</td>
<td>Drag from Toolbar panel, or right-click canvas → **Add Block**</td>
<td><ActionVideo src="quick-reference/add-block.mp4" alt="Add a block" /></td>
</tr>
<tr>
<td>Multi-select blocks</td>
<td>`Mod+Click` additional blocks, or shift-drag to draw selection box</td>
<td><ActionVideo src="quick-reference/multiselect-blocks.mp4" alt="Multi-select blocks" /></td>
</tr>
<tr>
<td>Copy blocks</td>
<td>`Mod+C` with blocks selected</td>
<td rowSpan={2}><ActionVideo src="quick-reference/copy-paste.mp4" alt="Copy and paste blocks" /></td>
</tr>
<tr>
<td>Paste blocks</td>
<td>`Mod+V` to paste copied blocks</td>
</tr>
<tr>
<td>Duplicate blocks</td>
<td>Right-click → **Duplicate**</td>
<td><ActionVideo src="quick-reference/duplicate-block.mp4" alt="Duplicate blocks" /></td>
</tr>
<tr>
<td>Delete blocks</td>
<td>`Delete` or `Backspace` key, or right-click → **Delete**</td>
<td><ActionImage src="/static/quick-reference/delete-block.png" alt="Delete block" /></td>
</tr>
<tr>
<td>Rename a block</td>
<td>Click block name in header, or edit in the Editor panel</td>
<td><ActionVideo src="quick-reference/rename-block.mp4" alt="Rename a block" /></td>
</tr>
<tr>
<td>Enable/Disable a block</td>
<td>Right-click → **Enable/Disable**</td>
<td><ActionImage src="/static/quick-reference/disable-block.png" alt="Disable block" /></td>
</tr>
<tr>
<td>Toggle handle orientation</td>
<td>Right-click → **Toggle Handles**</td>
<td><ActionVideo src="quick-reference/toggle-handles.mp4" alt="Toggle handle orientation" /></td>
</tr>
<tr>
<td>Configure a block</td>
<td>Select block → use Editor panel on right</td>
<td><ActionVideo src="quick-reference/configure-block.mp4" alt="Configure a block" /></td>
</tr>
</tbody>
</table>
## Connections
| Action | How |
|--------|-----|
| Create a connection | Drag from output handle to input handle |
| Delete a connection | Click edge to select → `Delete` key |
| Use output in another block | Drag connection tag into input field |
## Canvas Navigation
| Action | How |
|--------|-----|
| Pan/move canvas | Left-drag on empty space, or scroll/trackpad |
| Zoom in/out | Scroll wheel or pinch gesture |
| Auto-layout | `Shift+L` |
| Draw selection box | Right-drag on empty canvas area |
<table>
<thead>
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
</thead>
<tbody>
<tr>
<td>Create a connection</td>
<td>Drag from output handle to input handle</td>
<td><ActionVideo src="quick-reference/connect-blocks.mp4" alt="Connect blocks" /></td>
</tr>
<tr>
<td>Delete a connection</td>
<td>Click edge to select `Delete` key</td>
<td><ActionVideo src="quick-reference/delete-connection.mp4" alt="Delete connection" /></td>
</tr>
<tr>
<td>Use output in another block</td>
<td>Drag connection tag into input field</td>
<td><ActionVideo src="quick-reference/connection-tag.mp4" alt="Use connection tag" /></td>
</tr>
</tbody>
</table>
## Panels & Views
| Action | How |
|--------|-----|
| Open Copilot tab | Press `C` or click Copilot tab |
| Open Toolbar tab | Press `T` or click Toolbar tab |
| Open Editor tab | Press `E` or click Editor tab |
| Search toolbar | `Mod+F` |
| Toggle advanced mode | Click toggle button on input fields |
| Resize panels | Drag panel edge |
| Collapse/expand sidebar | Click collapse button on sidebar |
<table>
<thead>
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
</thead>
<tbody>
<tr>
<td>Search toolbar</td>
<td>`Mod+F`</td>
<td><ActionVideo src="quick-reference/search-toolbar.mp4" alt="Search toolbar" /></td>
</tr>
<tr>
<td>Search everything</td>
<td>`Mod+K`</td>
<td><ActionImage src="/static/quick-reference/search-everything.png" alt="Search everything" /></td>
</tr>
<tr>
<td>Toggle manual mode</td>
<td>Click toggle button to switch between manual and selector</td>
<td><ActionImage src="/static/quick-reference/toggle-manual-mode.png" alt="Toggle manual mode" /></td>
</tr>
<tr>
<td>Collapse/expand sidebar</td>
<td>Click collapse button on sidebar</td>
<td><ActionVideo src="quick-reference/collapse-sidebar.mp4" alt="Collapse sidebar" /></td>
</tr>
</tbody>
</table>
## Running & Testing
| Action | How |
|--------|-----|
| Run workflow | Click Play button or `Mod+Enter` |
| Stop workflow | Click Stop button or `Mod+Enter` while running |
| Test with chat | Use Chat panel on the right side |
| Select output to view | Click dropdown in Chat panel → Select block output |
| Clear chat history | Click clear button in Chat panel |
| View execution logs | Open terminal panel at bottom, or `Mod+L` |
| Filter logs by block | Click block filter in terminal |
| Filter logs by status | Click status filter in terminal |
| Search logs | Use search field in terminal |
| Copy log entry | Right-click log entry → **Copy** |
| Clear terminal | `Mod+D` |
<table>
<thead>
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
</thead>
<tbody>
<tr>
<td>Run workflow</td>
<td>Click Run Workflow button or `Mod+Enter`</td>
<td><ActionImage src="/static/quick-reference/run-workflow.png" alt="Run workflow" /></td>
</tr>
<tr>
<td>Stop workflow</td>
<td>Click Stop button or `Mod+Enter` while running</td>
<td><ActionImage src="/static/quick-reference/stop-workflow.png" alt="Stop workflow" /></td>
</tr>
<tr>
<td>Test with chat</td>
<td>Use Chat panel on the right side</td>
<td><ActionImage src="/static/quick-reference/test-chat.png" alt="Test with chat" /></td>
</tr>
<tr>
<td>Select output to view</td>
<td>Click dropdown in Chat panel → Select block output</td>
<td><ActionImage src="/static/quick-reference/output-select.png" alt="Select output to view" /></td>
</tr>
<tr>
<td>Clear chat history</td>
<td>Click clear button in Chat panel</td>
<td><ActionImage src="/static/quick-reference/clear-chat.png" alt="Clear chat history" /></td>
</tr>
<tr>
<td>View execution logs</td>
<td>Open terminal panel at bottom, or `Mod+L`</td>
<td><ActionImage src="/static/quick-reference/terminal.png" alt="Execution logs terminal" /></td>
</tr>
<tr>
<td>Filter logs by block or status</td>
<td>Click block filter in terminal or right-click log entry → **Filter by Block** or **Filter by Status**</td>
<td><ActionImage src="/static/quick-reference/filter-block.png" alt="Filter logs by block" /></td>
</tr>
<tr>
<td>Search logs</td>
<td>Use search field in terminal or right-click log entry → **Search**</td>
<td><ActionImage src="/static/quick-reference/terminal-search.png" alt="Search logs" /></td>
</tr>
<tr>
<td>Copy log entry</td>
<td>Clipboard Icon or Right-click log entry → **Copy**</td>
<td><ActionImage src="/static/quick-reference/copy-log.png" alt="Copy log entry" /></td>
</tr>
<tr>
<td>Clear terminal</td>
<td>Trash icon or `Mod+D`</td>
<td><ActionImage src="/static/quick-reference/clear-terminal.png" alt="Clear terminal" /></td>
</tr>
</tbody>
</table>
## Deployment
| Action | How |
|--------|-----|
| Deploy a workflow | Click **Deploy** button in Deploy tab |
| Update deployment | Click **Update** when changes are detected |
| View deployment status | Check status indicator (Live/Update/Deploy) in Deploy tab |
| Revert deployment | Access previous versions in Deploy tab |
| Copy webhook URL | Deploy tab → Copy webhook URL |
| Copy API endpoint | Deploy tab → Copy API endpoint URL |
| Set up a schedule | Add Schedule trigger block → Configure interval |
<table>
<thead>
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
</thead>
<tbody>
<tr>
<td>Deploy a workflow</td>
<td>Click **Deploy** button in panel</td>
<td><ActionImage src="/static/quick-reference/deploy.png" alt="Deploy workflow" /></td>
</tr>
<tr>
<td>Update deployment</td>
<td>Click **Update** when changes are detected</td>
<td><ActionImage src="/static/quick-reference/update-deployment.png" alt="Update deployment" /></td>
</tr>
<tr>
<td>View deployment status</td>
<td>Check status indicator (Live/Update/Deploy) in Deploy tab</td>
<td><ActionImage src="/static/quick-reference/view-deployment.png" alt="View deployment status" /></td>
</tr>
<tr>
<td>Revert deployment</td>
<td>Access previous versions in Deploy tab → **Promote to live**</td>
<td><ActionImage src="/static/quick-reference/promote-deployment.png" alt="Promote deployment to live" /></td>
</tr>
<tr>
<td>Copy API endpoint</td>
<td>Deploy tab → API → Copy API cURL</td>
<td><ActionImage src="/static/quick-reference/copy-api.png" alt="Copy API endpoint" /></td>
</tr>
</tbody>
</table>
## Variables
| Action | How |
|--------|-----|
| Add workflow variable | Variables tab → **Add Variable** |
| Edit workflow variable | Variables tab → Click variable to edit |
| Delete workflow variable | Variables tab → Click delete icon on variable |
| Add environment variable | Settings → **Environment Variables** → **Add** |
| Reference a variable | Use `{{variableName}}` syntax in block inputs |
## Credentials
| Action | How |
|--------|-----|
| Add API key | Block credential field → **Add Credential** → Enter API key |
| Connect OAuth account | Block credential field → **Connect** → Authorize with provider |
| Manage credentials | Settings → **Credentials** |
| Remove credential | Settings → **Credentials** → Delete credential |
<table>
<thead>
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
</thead>
<tbody>
<tr>
<td>Add / Edit / Delete workflow variable</td>
<td>Panel -> Variables -> **Add Variable**, click to edit, or delete icon</td>
<td><ActionImage src="/static/quick-reference/variables.png" alt="Variables panel" /></td>
</tr>
<tr>
<td>Add environment variable</td>
<td>Settings → **Environment Variables** → **Add**</td>
<td><ActionImage src="/static/quick-reference/add-env-variable.png" alt="Add environment variable" /></td>
</tr>
<tr>
<td>Reference a workflow variable</td>
<td>Use `<blockName.itemName>` syntax in block inputs</td>
<td><ActionImage src="/static/quick-reference/variable-reference.png" alt="Reference workflow variable" /></td>
</tr>
<tr>
<td>Reference an environment variable</td>
<td>Use `{{ENV_VAR}}` syntax in block inputs</td>
<td><ActionImage src="/static/quick-reference/env-variable-reference.png" alt="Reference environment variable" /></td>
</tr>
</tbody>
</table>

View File

@@ -16,12 +16,20 @@ Deploy Sim on your own infrastructure with Docker or Kubernetes.
## Requirements
| Resource | Minimum | Recommended |
|----------|---------|-------------|
| CPU | 2 cores | 4+ cores |
| RAM | 12 GB | 16+ GB |
| Storage | 20 GB SSD | 50+ GB SSD |
| Docker | 20.10+ | Latest |
| Resource | Small | Standard | Production |
|----------|-------|----------|------------|
| CPU | 2 cores | 4 cores | 8+ cores |
| RAM | 12 GB | 16 GB | 32+ GB |
| Storage | 20 GB SSD | 50 GB SSD | 100+ GB SSD |
| Docker | 20.10+ | 20.10+ | Latest |
**Small**: Development, testing, single user (1-5 users)
**Standard**: Teams (5-50 users), moderate workloads
**Production**: Large teams (50+ users), high availability, heavy workflow execution
<Callout type="info">
Resource requirements are driven by workflow execution (isolated-vm sandboxing), file processing (in-memory document parsing), and vector operations (pgvector). Memory is typically the constraining factor rather than CPU. Production telemetry shows the main app uses 4-8 GB average with peaks up to 12 GB under heavy load.
</Callout>
## Quick Start

View File

@@ -10,12 +10,20 @@ Despliega Sim en tu propia infraestructura con Docker o Kubernetes.
## Requisitos
| Recurso | Mínimo | Recomendado |
|----------|---------|-------------|
| CPU | 2 núcleos | 4+ núcleos |
| RAM | 12 GB | 16+ GB |
| Almacenamiento | 20 GB SSD | 50+ GB SSD |
| Docker | 20.10+ | Última versión |
| Recurso | Pequeño | Estándar | Producción |
|----------|---------|----------|------------|
| CPU | 2 núcleos | 4 núcleos | 8+ núcleos |
| RAM | 12 GB | 16 GB | 32+ GB |
| Almacenamiento | 20 GB SSD | 50 GB SSD | 100+ GB SSD |
| Docker | 20.10+ | 20.10+ | Última versión |
**Pequeño**: Desarrollo, pruebas, usuario único (1-5 usuarios)
**Estándar**: Equipos (5-50 usuarios), cargas de trabajo moderadas
**Producción**: Equipos grandes (50+ usuarios), alta disponibilidad, ejecución intensiva de workflows
<Callout type="info">
Los requisitos de recursos están determinados por la ejecución de workflows (sandboxing isolated-vm), procesamiento de archivos (análisis de documentos en memoria) y operaciones vectoriales (pgvector). La memoria suele ser el factor limitante, no la CPU. La telemetría de producción muestra que la aplicación principal usa 4-8 GB en promedio con picos de hasta 12 GB bajo carga pesada.
</Callout>
## Inicio rápido

View File

@@ -10,12 +10,20 @@ Déployez Sim sur votre propre infrastructure avec Docker ou Kubernetes.
## Prérequis
| Ressource | Minimum | Recommandé |
|----------|---------|-------------|
| CPU | 2 cœurs | 4+ cœurs |
| RAM | 12 Go | 16+ Go |
| Stockage | 20 Go SSD | 50+ Go SSD |
| Docker | 20.10+ | Dernière version |
| Ressource | Petit | Standard | Production |
|----------|-------|----------|------------|
| CPU | 2 cœurs | 4 cœurs | 8+ cœurs |
| RAM | 12 Go | 16 Go | 32+ Go |
| Stockage | 20 Go SSD | 50 Go SSD | 100+ Go SSD |
| Docker | 20.10+ | 20.10+ | Dernière version |
**Petit** : Développement, tests, utilisateur unique (1-5 utilisateurs)
**Standard** : Équipes (5-50 utilisateurs), charges de travail modérées
**Production** : Grandes équipes (50+ utilisateurs), haute disponibilité, exécution intensive de workflows
<Callout type="info">
Les besoins en ressources sont déterminés par l'exécution des workflows (sandboxing isolated-vm), le traitement des fichiers (analyse de documents en mémoire) et les opérations vectorielles (pgvector). La mémoire est généralement le facteur limitant, pas le CPU. La télémétrie de production montre que l'application principale utilise 4-8 Go en moyenne avec des pics jusqu'à 12 Go sous forte charge.
</Callout>
## Démarrage rapide

View File

@@ -10,12 +10,20 @@ DockerまたはKubernetesを使用して、自社のインフラストラクチ
## 要件
| リソース | 最小 | 推奨 |
|----------|---------|-------------|
| CPU | 2コア | 4+コア |
| RAM | 12 GB | 16+ GB |
| ストレージ | 20 GB SSD | 50+ GB SSD |
| Docker | 20.10+ | 最新版 |
| リソース | スモール | スタンダード | プロダクション |
|----------|---------|-------------|----------------|
| CPU | 2コア | 4コア | 8+コア |
| RAM | 12 GB | 16 GB | 32+ GB |
| ストレージ | 20 GB SSD | 50 GB SSD | 100+ GB SSD |
| Docker | 20.10+ | 20.10+ | 最新版 |
**スモール**: 開発、テスト、シングルユーザー1-5ユーザー
**スタンダード**: チーム5-50ユーザー、中程度のワークロード
**プロダクション**: 大規模チーム50+ユーザー)、高可用性、高負荷ワークフロー実行
<Callout type="info">
リソース要件は、ワークフロー実行isolated-vmサンドボックス、ファイル処理メモリ内ドキュメント解析、ベクトル演算pgvectorによって決まります。CPUよりもメモリが制約要因となることが多いです。本番環境のテレメトリによると、メインアプリは平均4-8 GB、高負荷時は最大12 GBを使用します。
</Callout>
## クイックスタート

View File

@@ -10,12 +10,20 @@ import { Callout } from 'fumadocs-ui/components/callout'
## 要求
| 资源 | 最低要求 | 推荐配置 |
|----------|---------|-------------|
| CPU | 2 核 | 4 核及以上 |
| 内存 | 12 GB | 16 GB 及以上 |
| 存储 | 20 GB SSD | 50 GB 及以上 SSD |
| Docker | 20.10+ | 最新版本 |
| 资源 | 小型 | 标准 | 生产环境 |
|----------|------|------|----------|
| CPU | 2 核 | 4 核 | 8+ 核 |
| 内存 | 12 GB | 16 GB | 32+ GB |
| 存储 | 20 GB SSD | 50 GB SSD | 100+ GB SSD |
| Docker | 20.10+ | 20.10+ | 最新版本 |
**小型**: 开发、测试、单用户1-5 用户)
**标准**: 团队5-50 用户)、中等工作负载
**生产环境**: 大型团队50+ 用户)、高可用性、密集工作流执行
<Callout type="info">
资源需求由工作流执行isolated-vm 沙箱、文件处理内存中文档解析和向量运算pgvector决定。内存通常是限制因素而不是 CPU。生产遥测数据显示主应用平均使用 4-8 GB高负载时峰值可达 12 GB。
</Callout>
## 快速开始

Binary file not shown.

After

Width:  |  Height:  |  Size: 104 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 37 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 45 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 114 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 44 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 26 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 27 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.7 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 24 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 66 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 48 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 20 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 49 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 25 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 5.9 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 78 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 12 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 82 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 146 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 7.1 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 28 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 6.8 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 60 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 31 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 36 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 103 KiB

View File

@@ -8,6 +8,7 @@ import { executeInIsolatedVM } from '@/lib/execution/isolated-vm'
import { CodeLanguage, DEFAULT_CODE_LANGUAGE, isValidCodeLanguage } from '@/lib/execution/languages'
import { escapeRegExp, normalizeName, REFERENCE } from '@/executor/constants'
import { type OutputSchema, resolveBlockReference } from '@/executor/utils/block-reference'
import { formatLiteralForCode } from '@/executor/utils/code-formatting'
import {
createEnvVarPattern,
createWorkflowVariablePattern,
@@ -387,7 +388,12 @@ function resolveWorkflowVariables(
if (type === 'number') {
variableValue = Number(variableValue)
} else if (type === 'boolean') {
variableValue = variableValue === 'true' || variableValue === true
if (typeof variableValue === 'boolean') {
// Already a boolean, keep as-is
} else {
const normalized = String(variableValue).toLowerCase().trim()
variableValue = normalized === 'true'
}
} else if (type === 'json' && typeof variableValue === 'string') {
try {
variableValue = JSON.parse(variableValue)
@@ -687,11 +693,7 @@ export async function POST(req: NextRequest) {
prologue += `const environmentVariables = JSON.parse(${JSON.stringify(JSON.stringify(envVars))});\n`
prologueLineCount++
for (const [k, v] of Object.entries(contextVariables)) {
if (v === undefined) {
prologue += `const ${k} = undefined;\n`
} else {
prologue += `const ${k} = JSON.parse(${JSON.stringify(JSON.stringify(v))});\n`
}
prologue += `const ${k} = ${formatLiteralForCode(v, 'javascript')};\n`
prologueLineCount++
}
@@ -762,11 +764,7 @@ export async function POST(req: NextRequest) {
prologue += `environmentVariables = json.loads(${JSON.stringify(JSON.stringify(envVars))})\n`
prologueLineCount++
for (const [k, v] of Object.entries(contextVariables)) {
if (v === undefined) {
prologue += `${k} = None\n`
} else {
prologue += `${k} = json.loads(${JSON.stringify(JSON.stringify(v))})\n`
}
prologue += `${k} = ${formatLiteralForCode(v, 'python')}\n`
prologueLineCount++
}
const wrapped = [

View File

@@ -408,6 +408,7 @@ describe('Knowledge Search Utils', () => {
input: ['test query'],
model: 'text-embedding-3-small',
encoding_format: 'float',
dimensions: 1536,
}),
})
)

View File

@@ -1,204 +0,0 @@
import { db } from '@sim/db'
import { member, permissions, user, workspace } from '@sim/db/schema'
import { createLogger } from '@sim/logger'
import { and, eq, or } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
const logger = createLogger('OrganizationWorkspacesAPI')
/**
* GET /api/organizations/[id]/workspaces
* Get workspaces related to the organization with optional filtering
* Query parameters:
* - ?available=true - Only workspaces where user can invite others (admin permissions)
* - ?member=userId - Workspaces where specific member has access
*/
export async function GET(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
try {
const session = await getSession()
if (!session?.user?.id) {
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
}
const { id: organizationId } = await params
const url = new URL(request.url)
const availableOnly = url.searchParams.get('available') === 'true'
const memberId = url.searchParams.get('member')
// Verify user is a member of this organization
const memberEntry = await db
.select()
.from(member)
.where(and(eq(member.organizationId, organizationId), eq(member.userId, session.user.id)))
.limit(1)
if (memberEntry.length === 0) {
return NextResponse.json(
{
error: 'Forbidden - Not a member of this organization',
},
{ status: 403 }
)
}
const userRole = memberEntry[0].role
const hasAdminAccess = ['owner', 'admin'].includes(userRole)
if (availableOnly) {
// Get workspaces where user has admin permissions (can invite others)
const availableWorkspaces = await db
.select({
id: workspace.id,
name: workspace.name,
ownerId: workspace.ownerId,
createdAt: workspace.createdAt,
isOwner: eq(workspace.ownerId, session.user.id),
permissionType: permissions.permissionType,
})
.from(workspace)
.leftJoin(
permissions,
and(
eq(permissions.entityType, 'workspace'),
eq(permissions.entityId, workspace.id),
eq(permissions.userId, session.user.id)
)
)
.where(
or(
// User owns the workspace
eq(workspace.ownerId, session.user.id),
// User has admin permission on the workspace
and(
eq(permissions.userId, session.user.id),
eq(permissions.entityType, 'workspace'),
eq(permissions.permissionType, 'admin')
)
)
)
// Filter and format the results
const workspacesWithInvitePermission = availableWorkspaces
.filter((workspace) => {
// Include if user owns the workspace OR has admin permission
return workspace.isOwner || workspace.permissionType === 'admin'
})
.map((workspace) => ({
id: workspace.id,
name: workspace.name,
isOwner: workspace.isOwner,
canInvite: true, // All returned workspaces have invite permission
createdAt: workspace.createdAt,
}))
logger.info('Retrieved available workspaces for organization member', {
organizationId,
userId: session.user.id,
workspaceCount: workspacesWithInvitePermission.length,
})
return NextResponse.json({
success: true,
data: {
workspaces: workspacesWithInvitePermission,
totalCount: workspacesWithInvitePermission.length,
filter: 'available',
},
})
}
if (memberId && hasAdminAccess) {
// Get workspaces where specific member has access (admin only)
const memberWorkspaces = await db
.select({
id: workspace.id,
name: workspace.name,
ownerId: workspace.ownerId,
isOwner: eq(workspace.ownerId, memberId),
permissionType: permissions.permissionType,
createdAt: permissions.createdAt,
})
.from(workspace)
.leftJoin(
permissions,
and(
eq(permissions.entityType, 'workspace'),
eq(permissions.entityId, workspace.id),
eq(permissions.userId, memberId)
)
)
.where(
or(
// Member owns the workspace
eq(workspace.ownerId, memberId),
// Member has permissions on the workspace
and(eq(permissions.userId, memberId), eq(permissions.entityType, 'workspace'))
)
)
const formattedWorkspaces = memberWorkspaces.map((workspace) => ({
id: workspace.id,
name: workspace.name,
isOwner: workspace.isOwner,
permission: workspace.permissionType,
joinedAt: workspace.createdAt,
createdAt: workspace.createdAt,
}))
return NextResponse.json({
success: true,
data: {
workspaces: formattedWorkspaces,
totalCount: formattedWorkspaces.length,
filter: 'member',
memberId,
},
})
}
// Default: Get all workspaces (basic info only for regular members)
if (!hasAdminAccess) {
return NextResponse.json({
success: true,
data: {
workspaces: [],
totalCount: 0,
message: 'Workspace access information is only available to organization admins',
},
})
}
// For admins: Get summary of all workspaces
const allWorkspaces = await db
.select({
id: workspace.id,
name: workspace.name,
ownerId: workspace.ownerId,
createdAt: workspace.createdAt,
ownerName: user.name,
})
.from(workspace)
.leftJoin(user, eq(workspace.ownerId, user.id))
return NextResponse.json({
success: true,
data: {
workspaces: allWorkspaces,
totalCount: allWorkspaces.length,
filter: 'all',
},
userRole,
hasAdminAccess,
})
} catch (error) {
logger.error('Failed to get organization workspaces', { error })
return NextResponse.json(
{
error: 'Internal server error',
},
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,257 @@
import { createLogger } from '@sim/logger'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { checkInternalAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request'
import { processSingleFileToUserFile } from '@/lib/uploads/utils/file-utils'
import { downloadFileFromStorage } from '@/lib/uploads/utils/file-utils.server'
export const dynamic = 'force-dynamic'
const logger = createLogger('SupabaseStorageUploadAPI')
const SupabaseStorageUploadSchema = z.object({
projectId: z.string().min(1, 'Project ID is required'),
apiKey: z.string().min(1, 'API key is required'),
bucket: z.string().min(1, 'Bucket name is required'),
fileName: z.string().min(1, 'File name is required'),
path: z.string().optional().nullable(),
fileData: z.any(),
contentType: z.string().optional().nullable(),
upsert: z.boolean().optional().default(false),
})
export async function POST(request: NextRequest) {
const requestId = generateRequestId()
try {
const authResult = await checkInternalAuth(request, { requireWorkflowId: false })
if (!authResult.success) {
logger.warn(
`[${requestId}] Unauthorized Supabase storage upload attempt: ${authResult.error}`
)
return NextResponse.json(
{
success: false,
error: authResult.error || 'Authentication required',
},
{ status: 401 }
)
}
logger.info(
`[${requestId}] Authenticated Supabase storage upload request via ${authResult.authType}`,
{
userId: authResult.userId,
}
)
const body = await request.json()
const validatedData = SupabaseStorageUploadSchema.parse(body)
const fileData = validatedData.fileData
const isStringInput = typeof fileData === 'string'
logger.info(`[${requestId}] Uploading to Supabase Storage`, {
bucket: validatedData.bucket,
fileName: validatedData.fileName,
path: validatedData.path,
fileDataType: isStringInput ? 'string' : 'object',
})
if (!fileData) {
return NextResponse.json(
{
success: false,
error: 'fileData is required',
},
{ status: 400 }
)
}
let uploadBody: Buffer
let uploadContentType: string | undefined
if (isStringInput) {
let content = fileData as string
const dataUrlMatch = content.match(/^data:([^;]+);base64,(.+)$/s)
if (dataUrlMatch) {
const [, mimeType, base64Data] = dataUrlMatch
content = base64Data
if (!validatedData.contentType) {
uploadContentType = mimeType
}
logger.info(`[${requestId}] Extracted base64 from data URL (MIME: ${mimeType})`)
}
const cleanedContent = content.replace(/[\s\r\n]/g, '')
const isLikelyBase64 = /^[A-Za-z0-9+/]*={0,2}$/.test(cleanedContent)
if (isLikelyBase64 && cleanedContent.length >= 4) {
try {
uploadBody = Buffer.from(cleanedContent, 'base64')
const expectedMinSize = Math.floor(cleanedContent.length * 0.7)
const expectedMaxSize = Math.ceil(cleanedContent.length * 0.8)
if (
uploadBody.length >= expectedMinSize &&
uploadBody.length <= expectedMaxSize &&
uploadBody.length > 0
) {
logger.info(
`[${requestId}] Decoded base64 content: ${cleanedContent.length} chars -> ${uploadBody.length} bytes`
)
} else {
const reEncoded = uploadBody.toString('base64')
if (reEncoded !== cleanedContent) {
logger.info(
`[${requestId}] Content looked like base64 but re-encoding didn't match, using as plain text`
)
uploadBody = Buffer.from(content, 'utf-8')
} else {
logger.info(
`[${requestId}] Decoded base64 content (verified): ${uploadBody.length} bytes`
)
}
}
} catch (decodeError) {
logger.info(
`[${requestId}] Failed to decode as base64, using as plain text: ${decodeError}`
)
uploadBody = Buffer.from(content, 'utf-8')
}
} else {
uploadBody = Buffer.from(content, 'utf-8')
logger.info(`[${requestId}] Using content as plain text (${uploadBody.length} bytes)`)
}
uploadContentType =
uploadContentType || validatedData.contentType || 'application/octet-stream'
} else {
const rawFile = fileData
logger.info(`[${requestId}] Processing file object: ${rawFile.name || 'unknown'}`)
let userFile
try {
userFile = processSingleFileToUserFile(rawFile, requestId, logger)
} catch (error) {
return NextResponse.json(
{
success: false,
error: error instanceof Error ? error.message : 'Failed to process file',
},
{ status: 400 }
)
}
const buffer = await downloadFileFromStorage(userFile, requestId, logger)
uploadBody = buffer
uploadContentType = validatedData.contentType || userFile.type || 'application/octet-stream'
}
let fullPath = validatedData.fileName
if (validatedData.path) {
const folderPath = validatedData.path.endsWith('/')
? validatedData.path
: `${validatedData.path}/`
fullPath = `${folderPath}${validatedData.fileName}`
}
const supabaseUrl = `https://${validatedData.projectId}.supabase.co/storage/v1/object/${validatedData.bucket}/${fullPath}`
const headers: Record<string, string> = {
apikey: validatedData.apiKey,
Authorization: `Bearer ${validatedData.apiKey}`,
'Content-Type': uploadContentType,
}
if (validatedData.upsert) {
headers['x-upsert'] = 'true'
}
logger.info(`[${requestId}] Sending to Supabase: ${supabaseUrl}`, {
contentType: uploadContentType,
bodySize: uploadBody.length,
upsert: validatedData.upsert,
})
const response = await fetch(supabaseUrl, {
method: 'POST',
headers,
body: new Uint8Array(uploadBody),
})
if (!response.ok) {
const errorText = await response.text()
let errorData
try {
errorData = JSON.parse(errorText)
} catch {
errorData = { message: errorText }
}
logger.error(`[${requestId}] Supabase Storage upload failed:`, {
status: response.status,
statusText: response.statusText,
error: errorData,
})
return NextResponse.json(
{
success: false,
error: errorData.message || errorData.error || `Upload failed: ${response.statusText}`,
details: errorData,
},
{ status: response.status }
)
}
const result = await response.json()
logger.info(`[${requestId}] File uploaded successfully to Supabase Storage`, {
bucket: validatedData.bucket,
path: fullPath,
})
const publicUrl = `https://${validatedData.projectId}.supabase.co/storage/v1/object/public/${validatedData.bucket}/${fullPath}`
return NextResponse.json({
success: true,
output: {
message: 'Successfully uploaded file to storage',
results: {
...result,
path: fullPath,
bucket: validatedData.bucket,
publicUrl,
},
},
})
} catch (error) {
if (error instanceof z.ZodError) {
logger.warn(`[${requestId}] Invalid request data`, { errors: error.errors })
return NextResponse.json(
{
success: false,
error: 'Invalid request data',
details: error.errors,
},
{ status: 400 }
)
}
logger.error(`[${requestId}] Error uploading to Supabase Storage:`, error)
return NextResponse.json(
{
success: false,
error: error instanceof Error ? error.message : 'Internal server error',
},
{ status: 500 }
)
}
}

View File

@@ -0,0 +1,305 @@
import { db, workflow as workflowTable } from '@sim/db'
import { createLogger } from '@sim/logger'
import { eq } from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { v4 as uuidv4 } from 'uuid'
import { z } from 'zod'
import { checkHybridAuth } from '@/lib/auth/hybrid'
import { generateRequestId } from '@/lib/core/utils/request'
import { SSE_HEADERS } from '@/lib/core/utils/sse'
import { markExecutionCancelled } from '@/lib/execution/cancellation'
import { LoggingSession } from '@/lib/logs/execution/logging-session'
import { executeWorkflowCore } from '@/lib/workflows/executor/execution-core'
import { type ExecutionEvent, encodeSSEEvent } from '@/lib/workflows/executor/execution-events'
import { ExecutionSnapshot } from '@/executor/execution/snapshot'
import type { IterationContext, SerializableExecutionState } from '@/executor/execution/types'
import type { NormalizedBlockOutput } from '@/executor/types'
import { hasExecutionResult } from '@/executor/utils/errors'
const logger = createLogger('ExecuteFromBlockAPI')
const ExecuteFromBlockSchema = z.object({
startBlockId: z.string().min(1, 'Start block ID is required'),
sourceSnapshot: z.object({
blockStates: z.record(z.any()),
executedBlocks: z.array(z.string()),
blockLogs: z.array(z.any()),
decisions: z.object({
router: z.record(z.string()),
condition: z.record(z.string()),
}),
completedLoops: z.array(z.string()),
loopExecutions: z.record(z.any()).optional(),
parallelExecutions: z.record(z.any()).optional(),
parallelBlockMapping: z.record(z.any()).optional(),
activeExecutionPath: z.array(z.string()),
}),
})
export const runtime = 'nodejs'
export const dynamic = 'force-dynamic'
export async function POST(req: NextRequest, { params }: { params: Promise<{ id: string }> }) {
const requestId = generateRequestId()
const { id: workflowId } = await params
try {
const auth = await checkHybridAuth(req, { requireWorkflowId: false })
if (!auth.success || !auth.userId) {
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
}
const userId = auth.userId
let body: unknown
try {
body = await req.json()
} catch {
return NextResponse.json({ error: 'Invalid JSON body' }, { status: 400 })
}
const validation = ExecuteFromBlockSchema.safeParse(body)
if (!validation.success) {
logger.warn(`[${requestId}] Invalid request body:`, validation.error.errors)
return NextResponse.json(
{
error: 'Invalid request body',
details: validation.error.errors.map((e) => ({
path: e.path.join('.'),
message: e.message,
})),
},
{ status: 400 }
)
}
const { startBlockId, sourceSnapshot } = validation.data
const executionId = uuidv4()
const [workflowRecord] = await db
.select({ workspaceId: workflowTable.workspaceId, userId: workflowTable.userId })
.from(workflowTable)
.where(eq(workflowTable.id, workflowId))
.limit(1)
if (!workflowRecord?.workspaceId) {
return NextResponse.json({ error: 'Workflow not found or has no workspace' }, { status: 404 })
}
const workspaceId = workflowRecord.workspaceId
const workflowUserId = workflowRecord.userId
logger.info(`[${requestId}] Starting run-from-block execution`, {
workflowId,
startBlockId,
executedBlocksCount: sourceSnapshot.executedBlocks.length,
})
const loggingSession = new LoggingSession(workflowId, executionId, 'manual', requestId)
const abortController = new AbortController()
let isStreamClosed = false
const stream = new ReadableStream<Uint8Array>({
async start(controller) {
const sendEvent = (event: ExecutionEvent) => {
if (isStreamClosed) return
try {
controller.enqueue(encodeSSEEvent(event))
} catch {
isStreamClosed = true
}
}
const snapshot = new ExecutionSnapshot({
requestId,
workflowId,
userId,
executionId,
triggerType: 'manual',
workspaceId,
workflowUserId,
useDraftState: true,
isClientSession: true,
})
try {
const startTime = new Date()
sendEvent({
type: 'execution:started',
timestamp: startTime.toISOString(),
executionId,
workflowId,
data: { startTime: startTime.toISOString() },
})
const result = await executeWorkflowCore({
snapshot,
loggingSession,
abortSignal: abortController.signal,
runFromBlock: {
startBlockId,
sourceSnapshot: sourceSnapshot as SerializableExecutionState,
},
callbacks: {
onBlockStart: async (
blockId: string,
blockName: string,
blockType: string,
iterationContext?: IterationContext
) => {
sendEvent({
type: 'block:started',
timestamp: new Date().toISOString(),
executionId,
workflowId,
data: {
blockId,
blockName,
blockType,
...(iterationContext && {
iterationCurrent: iterationContext.iterationCurrent,
iterationTotal: iterationContext.iterationTotal,
iterationType: iterationContext.iterationType,
}),
},
})
},
onBlockComplete: async (
blockId: string,
blockName: string,
blockType: string,
callbackData: {
input?: unknown
output: NormalizedBlockOutput
executionTime: number
},
iterationContext?: IterationContext
) => {
const hasError = (callbackData.output as any)?.error
sendEvent({
type: hasError ? 'block:error' : 'block:completed',
timestamp: new Date().toISOString(),
executionId,
workflowId,
data: {
blockId,
blockName,
blockType,
input: callbackData.input,
...(hasError
? { error: (callbackData.output as any).error }
: { output: callbackData.output }),
durationMs: callbackData.executionTime || 0,
...(iterationContext && {
iterationCurrent: iterationContext.iterationCurrent,
iterationTotal: iterationContext.iterationTotal,
iterationType: iterationContext.iterationType,
}),
},
})
},
onStream: async (streamingExecution: unknown) => {
const streamingExec = streamingExecution as {
stream: ReadableStream
execution: any
}
const blockId = streamingExec.execution?.blockId
const reader = streamingExec.stream.getReader()
const decoder = new TextDecoder()
try {
while (true) {
const { done, value } = await reader.read()
if (done) break
const chunk = decoder.decode(value, { stream: true })
sendEvent({
type: 'stream:chunk',
timestamp: new Date().toISOString(),
executionId,
workflowId,
data: { blockId, chunk },
})
}
sendEvent({
type: 'stream:done',
timestamp: new Date().toISOString(),
executionId,
workflowId,
data: { blockId },
})
} finally {
try {
reader.releaseLock()
} catch {}
}
},
},
})
if (result.status === 'cancelled') {
sendEvent({
type: 'execution:cancelled',
timestamp: new Date().toISOString(),
executionId,
workflowId,
data: { duration: result.metadata?.duration || 0 },
})
} else {
sendEvent({
type: 'execution:completed',
timestamp: new Date().toISOString(),
executionId,
workflowId,
data: {
success: result.success,
output: result.output,
duration: result.metadata?.duration || 0,
startTime: result.metadata?.startTime || startTime.toISOString(),
endTime: result.metadata?.endTime || new Date().toISOString(),
},
})
}
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
logger.error(`[${requestId}] Run-from-block execution failed: ${errorMessage}`)
const executionResult = hasExecutionResult(error) ? error.executionResult : undefined
sendEvent({
type: 'execution:error',
timestamp: new Date().toISOString(),
executionId,
workflowId,
data: {
error: executionResult?.error || errorMessage,
duration: executionResult?.metadata?.duration || 0,
},
})
} finally {
if (!isStreamClosed) {
try {
controller.enqueue(new TextEncoder().encode('data: [DONE]\n\n'))
controller.close()
} catch {}
}
}
},
cancel() {
isStreamClosed = true
abortController.abort()
markExecutionCancelled(executionId).catch(() => {})
},
})
return new NextResponse(stream, {
headers: { ...SSE_HEADERS, 'X-Execution-Id': executionId },
})
} catch (error: unknown) {
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
logger.error(`[${requestId}] Failed to start run-from-block execution:`, error)
return NextResponse.json(
{ error: errorMessage || 'Failed to start execution' },
{ status: 500 }
)
}
}

View File

@@ -53,6 +53,7 @@ const ExecuteWorkflowSchema = z.object({
parallels: z.record(z.any()).optional(),
})
.optional(),
stopAfterBlockId: z.string().optional(),
})
export const runtime = 'nodejs'
@@ -222,6 +223,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
includeFileBase64,
base64MaxBytes,
workflowStateOverride,
stopAfterBlockId,
} = validation.data
// For API key and internal JWT auth, the entire body is the input (except for our control fields)
@@ -237,6 +239,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
includeFileBase64,
base64MaxBytes,
workflowStateOverride,
stopAfterBlockId: _stopAfterBlockId,
workflowId: _workflowId, // Also exclude workflowId used for internal JWT auth
...rest
} = body
@@ -434,6 +437,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
loggingSession,
includeFileBase64,
base64MaxBytes,
stopAfterBlockId,
})
const outputWithBase64 = includeFileBase64
@@ -722,6 +726,7 @@ export async function POST(req: NextRequest, { params }: { params: Promise<{ id:
abortSignal: abortController.signal,
includeFileBase64,
base64MaxBytes,
stopAfterBlockId,
})
if (result.status === 'paused') {

View File

@@ -1,11 +1,13 @@
import { memo, useCallback } from 'react'
import { ArrowLeftRight, ArrowUpDown, Circle, CircleOff, LogOut } from 'lucide-react'
import { ArrowLeftRight, ArrowUpDown, Circle, CircleOff, LogOut, Play } from 'lucide-react'
import { Button, Copy, Tooltip, Trash2 } from '@/components/emcn'
import { cn } from '@/lib/core/utils/cn'
import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers'
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'
import { useWorkflowExecution } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks'
import { validateTriggerPaste } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils'
import { useCollaborativeWorkflow } from '@/hooks/use-collaborative-workflow'
import { useExecutionStore } from '@/stores/execution'
import { useNotificationStore } from '@/stores/notifications'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
@@ -49,6 +51,7 @@ export const ActionBar = memo(
collaborativeBatchToggleBlockHandles,
} = useCollaborativeWorkflow()
const { setPendingSelection } = useWorkflowRegistry()
const { handleRunFromBlock } = useWorkflowExecution()
const addNotification = useNotificationStore((s) => s.addNotification)
@@ -97,12 +100,30 @@ export const ActionBar = memo(
)
)
const { activeWorkflowId } = useWorkflowRegistry()
const { isExecuting, getLastExecutionSnapshot } = useExecutionStore()
const userPermissions = useUserPermissionsContext()
const edges = useWorkflowStore((state) => state.edges)
const isStartBlock = isInputDefinitionTrigger(blockType)
const isResponseBlock = blockType === 'response'
const isNoteBlock = blockType === 'note'
const isSubflowBlock = blockType === 'loop' || blockType === 'parallel'
const isInsideSubflow = parentId && (parentType === 'loop' || parentType === 'parallel')
const snapshot = activeWorkflowId ? getLastExecutionSnapshot(activeWorkflowId) : null
const incomingEdges = edges.filter((edge) => edge.target === blockId)
const isTriggerBlock = incomingEdges.length === 0
const dependenciesSatisfied =
isTriggerBlock ||
(snapshot && incomingEdges.every((edge) => snapshot.executedBlocks.includes(edge.source)))
const canRunFromBlock =
dependenciesSatisfied && !isNoteBlock && !isInsideSubflow && !isExecuting
const handleRunFromBlockClick = useCallback(() => {
if (!activeWorkflowId || !canRunFromBlock) return
handleRunFromBlock(blockId, activeWorkflowId)
}, [blockId, activeWorkflowId, canRunFromBlock, handleRunFromBlock])
/**
* Get appropriate tooltip message based on disabled state
@@ -135,23 +156,29 @@ export const ActionBar = memo(
variant='ghost'
onClick={(e) => {
e.stopPropagation()
if (!disabled) {
collaborativeBatchToggleBlockEnabled([blockId])
if (canRunFromBlock && !disabled) {
handleRunFromBlockClick()
}
}}
className={ACTION_BUTTON_STYLES}
disabled={disabled}
disabled={disabled || !canRunFromBlock}
>
{isEnabled ? <Circle className={ICON_SIZE} /> : <CircleOff className={ICON_SIZE} />}
<Play className={ICON_SIZE} />
</Button>
</Tooltip.Trigger>
<Tooltip.Content side='top'>
{getTooltipMessage(isEnabled ? 'Disable Block' : 'Enable Block')}
{(() => {
if (disabled) return getTooltipMessage('Run from this block')
if (isExecuting) return 'Execution in progress'
if (isInsideSubflow) return 'Cannot run from inside subflow'
if (!dependenciesSatisfied) return 'Run upstream blocks first'
return 'Run from this block'
})()}
</Tooltip.Content>
</Tooltip.Root>
)}
{isSubflowBlock && (
{!isNoteBlock && (
<Tooltip.Root>
<Tooltip.Trigger asChild>
<Button

View File

@@ -40,9 +40,16 @@ export interface BlockMenuProps {
onRemoveFromSubflow: () => void
onOpenEditor: () => void
onRename: () => void
onRunFromBlock?: () => void
onRunUntilBlock?: () => void
hasClipboard?: boolean
showRemoveFromSubflow?: boolean
/** Whether run from block is available (has snapshot, was executed, not inside subflow) */
canRunFromBlock?: boolean
/** Reason why run from block is disabled (for tooltip) */
runFromBlockDisabledReason?: string
disableEdit?: boolean
isExecuting?: boolean
}
/**
@@ -65,9 +72,14 @@ export function BlockMenu({
onRemoveFromSubflow,
onOpenEditor,
onRename,
onRunFromBlock,
onRunUntilBlock,
hasClipboard = false,
showRemoveFromSubflow = false,
canRunFromBlock = false,
runFromBlockDisabledReason,
disableEdit = false,
isExecuting = false,
}: BlockMenuProps) {
const isSingleBlock = selectedBlocks.length === 1
@@ -203,6 +215,39 @@ export function BlockMenu({
</PopoverItem>
)}
{/* Run from/until block - only for single non-note block selection */}
{isSingleBlock && !allNoteBlocks && (
<>
<PopoverDivider />
<PopoverItem
disabled={!canRunFromBlock || isExecuting}
onClick={() => {
if (canRunFromBlock && !isExecuting) {
onRunFromBlock?.()
onClose()
}
}}
>
{isExecuting
? 'Execution in progress...'
: !canRunFromBlock && runFromBlockDisabledReason
? runFromBlockDisabledReason
: 'Run from this block'}
</PopoverItem>
<PopoverItem
disabled={isExecuting}
onClick={() => {
if (!isExecuting) {
onRunUntilBlock?.()
onClose()
}
}}
>
{isExecuting ? 'Execution in progress...' : 'Run until this block'}
</PopoverItem>
</>
)}
{/* Destructive action */}
<PopoverDivider />
<PopoverItem

View File

@@ -338,6 +338,11 @@ const arePropsEqual = (prevProps: SubBlockProps, nextProps: SubBlockProps): bool
const configEqual =
prevProps.config.id === nextProps.config.id && prevProps.config.type === nextProps.config.type
const canonicalToggleEqual =
!!prevProps.canonicalToggle === !!nextProps.canonicalToggle &&
prevProps.canonicalToggle?.mode === nextProps.canonicalToggle?.mode &&
prevProps.canonicalToggle?.disabled === nextProps.canonicalToggle?.disabled
return (
prevProps.blockId === nextProps.blockId &&
configEqual &&
@@ -346,8 +351,7 @@ const arePropsEqual = (prevProps: SubBlockProps, nextProps: SubBlockProps): bool
prevProps.disabled === nextProps.disabled &&
prevProps.fieldDiffStatus === nextProps.fieldDiffStatus &&
prevProps.allowExpandInPreview === nextProps.allowExpandInPreview &&
prevProps.canonicalToggle?.mode === nextProps.canonicalToggle?.mode &&
prevProps.canonicalToggle?.disabled === nextProps.canonicalToggle?.disabled
canonicalToggleEqual
)
}

View File

@@ -15,7 +15,8 @@ import {
TriggerUtils,
} from '@/lib/workflows/triggers/triggers'
import { useCurrentWorkflow } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-current-workflow'
import type { BlockLog, ExecutionResult, StreamingExecution } from '@/executor/types'
import type { SerializableExecutionState } from '@/executor/execution/types'
import type { BlockLog, BlockState, ExecutionResult, StreamingExecution } from '@/executor/types'
import { hasExecutionResult } from '@/executor/utils/errors'
import { coerceValue } from '@/executor/utils/start-block'
import { subscriptionKeys } from '@/hooks/queries/subscription'
@@ -98,6 +99,8 @@ export function useWorkflowExecution() {
setActiveBlocks,
setBlockRunStatus,
setEdgeRunStatus,
setLastExecutionSnapshot,
getLastExecutionSnapshot,
} = useExecutionStore()
const [executionResult, setExecutionResult] = useState<ExecutionResult | null>(null)
const executionStream = useExecutionStream()
@@ -668,7 +671,8 @@ export function useWorkflowExecution() {
onStream?: (se: StreamingExecution) => Promise<void>,
executionId?: string,
onBlockComplete?: (blockId: string, output: any) => Promise<void>,
overrideTriggerType?: 'chat' | 'manual' | 'api'
overrideTriggerType?: 'chat' | 'manual' | 'api',
stopAfterBlockId?: string
): Promise<ExecutionResult | StreamingExecution> => {
// Use diff workflow for execution when available, regardless of canvas view state
const executionWorkflowState = null as {
@@ -876,6 +880,8 @@ export function useWorkflowExecution() {
const activeBlocksSet = new Set<string>()
const streamedContent = new Map<string, string>()
const accumulatedBlockLogs: BlockLog[] = []
const accumulatedBlockStates = new Map<string, BlockState>()
const executedBlockIds = new Set<string>()
// Execute the workflow
try {
@@ -887,6 +893,7 @@ export function useWorkflowExecution() {
triggerType: overrideTriggerType || 'manual',
useDraftState: true,
isClientSession: true,
stopAfterBlockId,
workflowStateOverride: executionWorkflowState
? {
blocks: executionWorkflowState.blocks,
@@ -916,18 +923,22 @@ export function useWorkflowExecution() {
logger.info('onBlockCompleted received:', { data })
activeBlocksSet.delete(data.blockId)
// Create a new Set to trigger React re-render
setActiveBlocks(new Set(activeBlocksSet))
// Track successful block execution in run path
setBlockRunStatus(data.blockId, 'success')
// Edges already tracked in onBlockStarted, no need to track again
executedBlockIds.add(data.blockId)
accumulatedBlockStates.set(data.blockId, {
output: data.output,
executed: true,
executionTime: data.durationMs,
})
const isContainerBlock = data.blockType === 'loop' || data.blockType === 'parallel'
if (isContainerBlock) return
const startedAt = new Date(Date.now() - data.durationMs).toISOString()
const endedAt = new Date().toISOString()
// Accumulate block log for the execution result
accumulatedBlockLogs.push({
blockId: data.blockId,
blockName: data.blockName || 'Unknown Block',
@@ -1056,6 +1067,48 @@ export function useWorkflowExecution() {
},
logs: accumulatedBlockLogs,
}
if (data.success && activeWorkflowId) {
if (stopAfterBlockId) {
const existingSnapshot = getLastExecutionSnapshot(activeWorkflowId)
const mergedBlockStates = {
...(existingSnapshot?.blockStates || {}),
...Object.fromEntries(accumulatedBlockStates),
}
const mergedExecutedBlocks = new Set([
...(existingSnapshot?.executedBlocks || []),
...executedBlockIds,
])
const snapshot: SerializableExecutionState = {
blockStates: mergedBlockStates,
executedBlocks: Array.from(mergedExecutedBlocks),
blockLogs: [...(existingSnapshot?.blockLogs || []), ...accumulatedBlockLogs],
decisions: existingSnapshot?.decisions || { router: {}, condition: {} },
completedLoops: existingSnapshot?.completedLoops || [],
activeExecutionPath: Array.from(mergedExecutedBlocks),
}
setLastExecutionSnapshot(activeWorkflowId, snapshot)
logger.info('Merged execution snapshot after run-until-block', {
workflowId: activeWorkflowId,
newBlocksExecuted: executedBlockIds.size,
totalExecutedBlocks: mergedExecutedBlocks.size,
})
} else {
const snapshot: SerializableExecutionState = {
blockStates: Object.fromEntries(accumulatedBlockStates),
executedBlocks: Array.from(executedBlockIds),
blockLogs: accumulatedBlockLogs,
decisions: { router: {}, condition: {} },
completedLoops: [],
activeExecutionPath: Array.from(executedBlockIds),
}
setLastExecutionSnapshot(activeWorkflowId, snapshot)
logger.info('Stored execution snapshot for run-from-block', {
workflowId: activeWorkflowId,
executedBlocksCount: executedBlockIds.size,
})
}
}
},
onExecutionError: (data) => {
@@ -1376,6 +1429,265 @@ export function useWorkflowExecution() {
setActiveBlocks,
])
/**
* Handles running workflow from a specific block using cached outputs
*/
const handleRunFromBlock = useCallback(
async (blockId: string, workflowId: string) => {
const snapshot = getLastExecutionSnapshot(workflowId)
const workflowEdges = useWorkflowStore.getState().edges
const incomingEdges = workflowEdges.filter((edge) => edge.target === blockId)
const isTriggerBlock = incomingEdges.length === 0
if (!snapshot && !isTriggerBlock) {
logger.error('No execution snapshot available for run-from-block', { workflowId, blockId })
return
}
const dependenciesSatisfied =
isTriggerBlock ||
(snapshot && incomingEdges.every((edge) => snapshot.executedBlocks.includes(edge.source)))
if (!dependenciesSatisfied) {
logger.error('Upstream dependencies not satisfied for run-from-block', {
workflowId,
blockId,
})
return
}
// For trigger blocks with no snapshot, create an empty one
const effectiveSnapshot: SerializableExecutionState = snapshot || {
blockStates: {},
executedBlocks: [],
blockLogs: [],
decisions: { router: {}, condition: {} },
completedLoops: [],
activeExecutionPath: [],
}
logger.info('Starting run-from-block execution', {
workflowId,
startBlockId: blockId,
isTriggerBlock,
})
setIsExecuting(true)
const executionId = uuidv4()
const accumulatedBlockLogs: BlockLog[] = []
const accumulatedBlockStates = new Map<string, BlockState>()
const executedBlockIds = new Set<string>()
const activeBlocksSet = new Set<string>()
try {
await executionStream.executeFromBlock({
workflowId,
startBlockId: blockId,
sourceSnapshot: effectiveSnapshot,
callbacks: {
onExecutionStarted: (data) => {
logger.info('Run-from-block execution started:', data)
},
onBlockStarted: (data) => {
activeBlocksSet.add(data.blockId)
setActiveBlocks(new Set(activeBlocksSet))
const incomingEdges = workflowEdges.filter((edge) => edge.target === data.blockId)
incomingEdges.forEach((edge) => {
setEdgeRunStatus(edge.id, 'success')
})
},
onBlockCompleted: (data) => {
activeBlocksSet.delete(data.blockId)
setActiveBlocks(new Set(activeBlocksSet))
setBlockRunStatus(data.blockId, 'success')
executedBlockIds.add(data.blockId)
accumulatedBlockStates.set(data.blockId, {
output: data.output,
executed: true,
executionTime: data.durationMs,
})
const isContainerBlock = data.blockType === 'loop' || data.blockType === 'parallel'
if (isContainerBlock) return
const startedAt = new Date(Date.now() - data.durationMs).toISOString()
const endedAt = new Date().toISOString()
accumulatedBlockLogs.push({
blockId: data.blockId,
blockName: data.blockName || 'Unknown Block',
blockType: data.blockType || 'unknown',
input: data.input || {},
output: data.output,
success: true,
durationMs: data.durationMs,
startedAt,
endedAt,
})
addConsole({
input: data.input || {},
output: data.output,
success: true,
durationMs: data.durationMs,
startedAt,
endedAt,
workflowId,
blockId: data.blockId,
executionId,
blockName: data.blockName || 'Unknown Block',
blockType: data.blockType || 'unknown',
iterationCurrent: data.iterationCurrent,
iterationTotal: data.iterationTotal,
iterationType: data.iterationType,
})
},
onBlockError: (data) => {
activeBlocksSet.delete(data.blockId)
setActiveBlocks(new Set(activeBlocksSet))
setBlockRunStatus(data.blockId, 'error')
const startedAt = new Date(Date.now() - data.durationMs).toISOString()
const endedAt = new Date().toISOString()
accumulatedBlockLogs.push({
blockId: data.blockId,
blockName: data.blockName || 'Unknown Block',
blockType: data.blockType || 'unknown',
input: data.input || {},
output: {},
success: false,
error: data.error,
durationMs: data.durationMs,
startedAt,
endedAt,
})
addConsole({
input: data.input || {},
output: {},
success: false,
error: data.error,
durationMs: data.durationMs,
startedAt,
endedAt,
workflowId,
blockId: data.blockId,
executionId,
blockName: data.blockName,
blockType: data.blockType,
iterationCurrent: data.iterationCurrent,
iterationTotal: data.iterationTotal,
iterationType: data.iterationType,
})
},
onExecutionCompleted: (data) => {
if (data.success) {
const mergedBlockStates: Record<string, BlockState> = {
...effectiveSnapshot.blockStates,
}
for (const [bId, state] of accumulatedBlockStates) {
mergedBlockStates[bId] = state
}
const mergedExecutedBlocks = new Set([
...effectiveSnapshot.executedBlocks,
...executedBlockIds,
])
const updatedSnapshot: SerializableExecutionState = {
...effectiveSnapshot,
blockStates: mergedBlockStates,
executedBlocks: Array.from(mergedExecutedBlocks),
blockLogs: [...effectiveSnapshot.blockLogs, ...accumulatedBlockLogs],
activeExecutionPath: Array.from(mergedExecutedBlocks),
}
setLastExecutionSnapshot(workflowId, updatedSnapshot)
logger.info('Updated execution snapshot after run-from-block', {
workflowId,
newBlocksExecuted: executedBlockIds.size,
})
}
},
onExecutionError: (data) => {
logger.error('Run-from-block execution error:', data.error)
},
onExecutionCancelled: () => {
logger.info('Run-from-block execution cancelled')
},
},
})
} catch (error) {
if ((error as Error).name !== 'AbortError') {
logger.error('Run-from-block execution failed:', error)
}
} finally {
setIsExecuting(false)
setActiveBlocks(new Set())
}
},
[
getLastExecutionSnapshot,
setLastExecutionSnapshot,
setIsExecuting,
setActiveBlocks,
setBlockRunStatus,
setEdgeRunStatus,
addConsole,
executionStream,
]
)
/**
* Handles running workflow until a specific block (stops after that block completes)
*/
const handleRunUntilBlock = useCallback(
async (blockId: string, workflowId: string) => {
if (!workflowId || workflowId !== activeWorkflowId) {
logger.error('Invalid workflow ID for run-until-block', { workflowId, activeWorkflowId })
return
}
logger.info('Starting run-until-block execution', { workflowId, stopAfterBlockId: blockId })
setExecutionResult(null)
setIsExecuting(true)
const executionId = uuidv4()
try {
const result = await executeWorkflow(
undefined,
undefined,
executionId,
undefined,
'manual',
blockId
)
if (result && 'success' in result) {
setExecutionResult(result)
}
} catch (error) {
const errorResult = handleExecutionError(error, { executionId })
return errorResult
} finally {
setIsExecuting(false)
setIsDebugging(false)
setActiveBlocks(new Set())
}
},
[activeWorkflowId, setExecutionResult, setIsExecuting, setIsDebugging, setActiveBlocks]
)
return {
isExecuting,
isDebugging,
@@ -1386,5 +1698,7 @@ export function useWorkflowExecution() {
handleResumeDebug,
handleCancelDebug,
handleCancelExecution,
handleRunFromBlock,
handleRunUntilBlock,
}
}

View File

@@ -47,6 +47,7 @@ import {
useCurrentWorkflow,
useNodeUtilities,
useShiftSelectionLock,
useWorkflowExecution,
} from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks'
import {
calculateContainerDimensions,
@@ -325,6 +326,8 @@ const WorkflowContent = React.memo(() => {
const showTrainingModal = useCopilotTrainingStore((state) => state.showModal)
const { handleRunFromBlock, handleRunUntilBlock } = useWorkflowExecution()
const snapToGridSize = useSnapToGridSize()
const snapToGrid = snapToGridSize > 0
@@ -758,13 +761,16 @@ const WorkflowContent = React.memo(() => {
[collaborativeBatchAddBlocks, setSelectedEdges, setPendingSelection]
)
const { activeBlockIds, pendingBlocks, isDebugging } = useExecutionStore(
useShallow((state) => ({
activeBlockIds: state.activeBlockIds,
pendingBlocks: state.pendingBlocks,
isDebugging: state.isDebugging,
}))
)
const { activeBlockIds, pendingBlocks, isDebugging, isExecuting, getLastExecutionSnapshot } =
useExecutionStore(
useShallow((state) => ({
activeBlockIds: state.activeBlockIds,
pendingBlocks: state.pendingBlocks,
isDebugging: state.isDebugging,
isExecuting: state.isExecuting,
getLastExecutionSnapshot: state.getLastExecutionSnapshot,
}))
)
const [dragStartParentId, setDragStartParentId] = useState<string | null>(null)
@@ -988,6 +994,41 @@ const WorkflowContent = React.memo(() => {
}
}, [contextMenuBlocks])
const handleContextRunFromBlock = useCallback(() => {
if (contextMenuBlocks.length !== 1) return
const blockId = contextMenuBlocks[0].id
handleRunFromBlock(blockId, workflowIdParam)
}, [contextMenuBlocks, workflowIdParam, handleRunFromBlock])
const handleContextRunUntilBlock = useCallback(() => {
if (contextMenuBlocks.length !== 1) return
const blockId = contextMenuBlocks[0].id
handleRunUntilBlock(blockId, workflowIdParam)
}, [contextMenuBlocks, workflowIdParam, handleRunUntilBlock])
const runFromBlockState = useMemo(() => {
if (contextMenuBlocks.length !== 1) {
return { canRun: false, reason: undefined }
}
const block = contextMenuBlocks[0]
const snapshot = getLastExecutionSnapshot(workflowIdParam)
const incomingEdges = edges.filter((edge) => edge.target === block.id)
const isTriggerBlock = incomingEdges.length === 0
const dependenciesSatisfied =
isTriggerBlock ||
(snapshot && incomingEdges.every((edge) => snapshot.executedBlocks.includes(edge.source)))
const isNoteBlock = block.type === 'note'
const isInsideSubflow =
block.parentId && (block.parentType === 'loop' || block.parentType === 'parallel')
if (isInsideSubflow) return { canRun: false, reason: 'Cannot run from inside subflow' }
if (!dependenciesSatisfied) return { canRun: false, reason: 'Run upstream blocks first' }
if (isNoteBlock) return { canRun: false, reason: undefined }
if (isExecuting) return { canRun: false, reason: undefined }
return { canRun: true, reason: undefined }
}, [contextMenuBlocks, edges, workflowIdParam, getLastExecutionSnapshot, isExecuting])
const handleContextAddBlock = useCallback(() => {
useSearchModalStore.getState().open()
}, [])
@@ -1641,51 +1682,36 @@ const WorkflowContent = React.memo(() => {
}, [screenToFlowPosition, handleToolbarDrop])
/**
* Focus canvas on changed blocks when diff appears
* Focuses on new/edited blocks rather than fitting the entire workflow
* Focus canvas on changed blocks when diff appears.
*/
const pendingZoomBlockIdsRef = useRef<Set<string> | null>(null)
const prevDiffReadyRef = useRef(false)
// Phase 1: When diff becomes ready, record which blocks we want to zoom to
// Phase 2 effect is located after displayNodes is defined (search for "Phase 2")
useEffect(() => {
// Only focus when diff transitions from not ready to ready
if (isDiffReady && !prevDiffReadyRef.current && diffAnalysis) {
// Diff just became ready - record blocks to zoom to
const changedBlockIds = [
...(diffAnalysis.new_blocks || []),
...(diffAnalysis.edited_blocks || []),
]
if (changedBlockIds.length > 0) {
const allNodes = getNodes()
const changedNodes = allNodes.filter((node) => changedBlockIds.includes(node.id))
if (changedNodes.length > 0) {
logger.info('Diff ready - focusing on changed blocks', {
changedBlockIds,
foundNodes: changedNodes.length,
})
requestAnimationFrame(() => {
fitViewToBounds({
nodes: changedNodes,
duration: 600,
padding: 0.1,
minZoom: 0.5,
maxZoom: 1.0,
})
})
} else {
logger.info('Diff ready - no changed nodes found, fitting all')
requestAnimationFrame(() => {
fitViewToBounds({ padding: 0.1, duration: 600 })
})
}
pendingZoomBlockIdsRef.current = new Set(changedBlockIds)
} else {
logger.info('Diff ready - no changed blocks, fitting all')
// No specific blocks to focus on, fit all after a frame
pendingZoomBlockIdsRef.current = null
requestAnimationFrame(() => {
fitViewToBounds({ padding: 0.1, duration: 600 })
})
}
} else if (!isDiffReady && prevDiffReadyRef.current) {
// Diff was cleared (accepted/rejected) - cancel any pending zoom
pendingZoomBlockIdsRef.current = null
}
prevDiffReadyRef.current = isDiffReady
}, [isDiffReady, diffAnalysis, fitViewToBounds, getNodes])
}, [isDiffReady, diffAnalysis, fitViewToBounds])
/** Displays trigger warning notifications. */
useEffect(() => {
@@ -2093,6 +2119,48 @@ const WorkflowContent = React.memo(() => {
})
}, [derivedNodes, blocks, pendingSelection, clearPendingSelection])
// Phase 2: When displayNodes updates, check if pending zoom blocks are ready
// (Phase 1 is located earlier in the file where pendingZoomBlockIdsRef is defined)
useEffect(() => {
const pendingBlockIds = pendingZoomBlockIdsRef.current
if (!pendingBlockIds || pendingBlockIds.size === 0) {
return
}
// Find the nodes we're waiting for
const pendingNodes = displayNodes.filter((node) => pendingBlockIds.has(node.id))
// Check if all expected nodes are present with valid dimensions
const allNodesReady =
pendingNodes.length === pendingBlockIds.size &&
pendingNodes.every(
(node) =>
typeof node.width === 'number' &&
typeof node.height === 'number' &&
node.width > 0 &&
node.height > 0
)
if (allNodesReady) {
logger.info('Diff ready - focusing on changed blocks', {
changedBlockIds: Array.from(pendingBlockIds),
foundNodes: pendingNodes.length,
})
// Clear pending state before zooming to prevent re-triggers
pendingZoomBlockIdsRef.current = null
// Use requestAnimationFrame to ensure React has finished rendering
requestAnimationFrame(() => {
fitViewToBounds({
nodes: pendingNodes,
duration: 600,
padding: 0.1,
minZoom: 0.5,
maxZoom: 1.0,
})
})
}
}, [displayNodes, fitViewToBounds])
/** Handles ActionBar remove-from-subflow events. */
useEffect(() => {
const handleRemoveFromSubflow = (event: Event) => {
@@ -3281,11 +3349,16 @@ const WorkflowContent = React.memo(() => {
onRemoveFromSubflow={handleContextRemoveFromSubflow}
onOpenEditor={handleContextOpenEditor}
onRename={handleContextRename}
onRunFromBlock={handleContextRunFromBlock}
onRunUntilBlock={handleContextRunUntilBlock}
hasClipboard={hasClipboard()}
showRemoveFromSubflow={contextMenuBlocks.some(
(b) => b.parentId && (b.parentType === 'loop' || b.parentType === 'parallel')
)}
canRunFromBlock={runFromBlockState.canRun}
runFromBlockDisabledReason={runFromBlockState.reason}
disableEdit={!effectivePermissions.canEdit}
isExecuting={isExecuting}
/>
<CanvasMenu

View File

@@ -214,15 +214,6 @@ export const A2ABlock: BlockConfig<A2AResponse> = {
],
config: {
tool: (params) => params.operation as string,
params: (params) => {
const { fileUpload, fileReference, ...rest } = params
const hasFileUpload = Array.isArray(fileUpload) ? fileUpload.length > 0 : !!fileUpload
const files = hasFileUpload ? fileUpload : fileReference
return {
...rest,
...(files ? { files } : {}),
}
},
},
},
inputs: {

View File

@@ -581,6 +581,18 @@ export const GmailV2Block: BlockConfig<GmailToolResponse> = {
results: { type: 'json', description: 'Search/read summary results' },
attachments: { type: 'json', description: 'Downloaded attachments (if enabled)' },
// Draft-specific outputs
draftId: {
type: 'string',
description: 'Draft ID',
condition: { field: 'operation', value: 'draft_gmail' },
},
messageId: {
type: 'string',
description: 'Gmail message ID for the draft',
condition: { field: 'operation', value: 'draft_gmail' },
},
// Trigger outputs (unchanged)
email_id: { type: 'string', description: 'Gmail message ID' },
thread_id: { type: 'string', description: 'Gmail thread ID' },

View File

@@ -661,12 +661,25 @@ Return ONLY the PostgREST filter expression - no explanations, no markdown, no e
placeholder: 'folder/subfolder/',
condition: { field: 'operation', value: 'storage_upload' },
},
{
id: 'file',
title: 'File',
type: 'file-upload',
canonicalParamId: 'fileData',
placeholder: 'Upload file to storage',
condition: { field: 'operation', value: 'storage_upload' },
mode: 'basic',
multiple: false,
required: true,
},
{
id: 'fileContent',
title: 'File Content',
type: 'code',
canonicalParamId: 'fileData',
placeholder: 'Base64 encoded for binary files, or plain text',
condition: { field: 'operation', value: 'storage_upload' },
mode: 'advanced',
required: true,
},
{

View File

@@ -26,6 +26,7 @@ export class ExecutionEngine {
private allowResumeTriggers: boolean
private cancelledFlag = false
private errorFlag = false
private stoppedEarlyFlag = false
private executionError: Error | null = null
private lastCancellationCheck = 0
private readonly useRedisCancellation: boolean
@@ -105,7 +106,7 @@ export class ExecutionEngine {
this.initializeQueue(triggerBlockId)
while (this.hasWork()) {
if ((await this.checkCancellation()) || this.errorFlag) {
if ((await this.checkCancellation()) || this.errorFlag || this.stoppedEarlyFlag) {
break
}
await this.processQueue()
@@ -259,6 +260,16 @@ export class ExecutionEngine {
}
private initializeQueue(triggerBlockId?: string): void {
if (this.context.runFromBlockContext) {
const { startBlockId } = this.context.runFromBlockContext
logger.info('Initializing queue for run-from-block mode', {
startBlockId,
dirtySetSize: this.context.runFromBlockContext.dirtySet.size,
})
this.addToQueue(startBlockId)
return
}
const pendingBlocks = this.context.metadata.pendingBlocks
const remainingEdges = (this.context.metadata as any).remainingEdges
@@ -385,6 +396,12 @@ export class ExecutionEngine {
this.finalOutput = output
}
if (this.context.stopAfterBlockId === nodeId) {
logger.info('Stopping execution after target block', { nodeId })
this.stoppedEarlyFlag = true
return
}
const readyNodes = this.edgeManager.processOutgoingEdges(node, output, false)
logger.info('Processing outgoing edges', {

View File

@@ -5,12 +5,22 @@ import { BlockExecutor } from '@/executor/execution/block-executor'
import { EdgeManager } from '@/executor/execution/edge-manager'
import { ExecutionEngine } from '@/executor/execution/engine'
import { ExecutionState } from '@/executor/execution/state'
import type { ContextExtensions, WorkflowInput } from '@/executor/execution/types'
import type {
ContextExtensions,
SerializableExecutionState,
WorkflowInput,
} from '@/executor/execution/types'
import { createBlockHandlers } from '@/executor/handlers/registry'
import { LoopOrchestrator } from '@/executor/orchestrators/loop'
import { NodeExecutionOrchestrator } from '@/executor/orchestrators/node'
import { ParallelOrchestrator } from '@/executor/orchestrators/parallel'
import type { BlockState, ExecutionContext, ExecutionResult } from '@/executor/types'
import {
computeDirtySet,
type RunFromBlockContext,
resolveContainerToSentinelStart,
validateRunFromBlock,
} from '@/executor/utils/run-from-block'
import {
buildResolutionFromBlock,
buildStartBlockOutput,
@@ -89,17 +99,108 @@ export class DAGExecutor {
}
}
/**
* Execute from a specific block using cached outputs for upstream blocks.
*/
async executeFromBlock(
workflowId: string,
startBlockId: string,
sourceSnapshot: SerializableExecutionState
): Promise<ExecutionResult> {
const dag = this.dagBuilder.build(this.workflow)
const executedBlocks = new Set(sourceSnapshot.executedBlocks)
const validation = validateRunFromBlock(startBlockId, dag, executedBlocks)
if (!validation.valid) {
throw new Error(validation.error)
}
const dirtySet = computeDirtySet(dag, startBlockId)
const effectiveStartBlockId = resolveContainerToSentinelStart(startBlockId, dag) ?? startBlockId
logger.info('Executing from block', {
workflowId,
startBlockId,
effectiveStartBlockId,
dirtySetSize: dirtySet.size,
totalBlocks: dag.nodes.size,
dirtyBlocks: Array.from(dirtySet),
})
// Remove incoming edges from non-dirty sources so convergent blocks don't wait for cached upstream
for (const nodeId of dirtySet) {
const node = dag.nodes.get(nodeId)
if (!node) continue
const nonDirtyIncoming: string[] = []
for (const sourceId of node.incomingEdges) {
if (!dirtySet.has(sourceId)) {
nonDirtyIncoming.push(sourceId)
}
}
for (const sourceId of nonDirtyIncoming) {
node.incomingEdges.delete(sourceId)
logger.debug('Removed non-dirty incoming edge for run-from-block', {
nodeId,
sourceId,
})
}
}
const runFromBlockContext = { startBlockId: effectiveStartBlockId, dirtySet }
const { context, state } = this.createExecutionContext(workflowId, undefined, {
snapshotState: sourceSnapshot,
runFromBlockContext,
})
const resolver = new VariableResolver(this.workflow, this.workflowVariables, state)
const loopOrchestrator = new LoopOrchestrator(dag, state, resolver)
loopOrchestrator.setContextExtensions(this.contextExtensions)
const parallelOrchestrator = new ParallelOrchestrator(dag, state)
parallelOrchestrator.setResolver(resolver)
parallelOrchestrator.setContextExtensions(this.contextExtensions)
const allHandlers = createBlockHandlers()
const blockExecutor = new BlockExecutor(allHandlers, resolver, this.contextExtensions, state)
const edgeManager = new EdgeManager(dag)
loopOrchestrator.setEdgeManager(edgeManager)
const nodeOrchestrator = new NodeExecutionOrchestrator(
dag,
state,
blockExecutor,
loopOrchestrator,
parallelOrchestrator
)
const engine = new ExecutionEngine(context, dag, edgeManager, nodeOrchestrator)
return await engine.run()
}
private createExecutionContext(
workflowId: string,
triggerBlockId?: string
triggerBlockId?: string,
overrides?: {
snapshotState?: SerializableExecutionState
runFromBlockContext?: RunFromBlockContext
}
): { context: ExecutionContext; state: ExecutionState } {
const snapshotState = this.contextExtensions.snapshotState
const snapshotState = overrides?.snapshotState ?? this.contextExtensions.snapshotState
const blockStates = snapshotState?.blockStates
? new Map(Object.entries(snapshotState.blockStates))
: new Map<string, BlockState>()
const executedBlocks = snapshotState?.executedBlocks
let executedBlocks = snapshotState?.executedBlocks
? new Set(snapshotState.executedBlocks)
: new Set<string>()
if (overrides?.runFromBlockContext) {
const { dirtySet } = overrides.runFromBlockContext
executedBlocks = new Set([...executedBlocks].filter((id) => !dirtySet.has(id)))
logger.info('Cleared executed status for dirty blocks', {
dirtySetSize: dirtySet.size,
remainingExecutedBlocks: executedBlocks.size,
})
}
const state = new ExecutionState(blockStates, executedBlocks)
const context: ExecutionContext = {
@@ -109,7 +210,7 @@ export class DAGExecutor {
userId: this.contextExtensions.userId,
isDeployedContext: this.contextExtensions.isDeployedContext,
blockStates: state.getBlockStates(),
blockLogs: snapshotState?.blockLogs ?? [],
blockLogs: overrides?.runFromBlockContext ? [] : (snapshotState?.blockLogs ?? []),
metadata: {
...this.contextExtensions.metadata,
startTime: new Date().toISOString(),
@@ -169,6 +270,8 @@ export class DAGExecutor {
abortSignal: this.contextExtensions.abortSignal,
includeFileBase64: this.contextExtensions.includeFileBase64,
base64MaxBytes: this.contextExtensions.base64MaxBytes,
runFromBlockContext: overrides?.runFromBlockContext,
stopAfterBlockId: this.contextExtensions.stopAfterBlockId,
}
if (this.contextExtensions.resumeFromSnapshot) {
@@ -193,6 +296,10 @@ export class DAGExecutor {
pendingBlocks: context.metadata.pendingBlocks,
skipStarterBlockInit: true,
})
} else if (overrides?.runFromBlockContext) {
logger.info('Run-from-block mode: skipping starter block initialization', {
startBlockId: overrides.runFromBlockContext.startBlockId,
})
} else {
this.initializeStarterBlock(context, state, triggerBlockId)
}

View File

@@ -1,5 +1,6 @@
import type { Edge } from 'reactflow'
import type { BlockLog, BlockState, NormalizedBlockOutput } from '@/executor/types'
import type { RunFromBlockContext } from '@/executor/utils/run-from-block'
import type { SubflowType } from '@/stores/workflows/workflow/types'
export interface ExecutionMetadata {
@@ -105,6 +106,17 @@ export interface ContextExtensions {
output: { input?: any; output: NormalizedBlockOutput; executionTime: number },
iterationContext?: IterationContext
) => Promise<void>
/**
* Run-from-block configuration. When provided, executor runs in partial
* execution mode starting from the specified block.
*/
runFromBlockContext?: RunFromBlockContext
/**
* Stop execution after this block completes. Used for "run until block" feature.
*/
stopAfterBlockId?: string
}
export interface WorkflowInput {

View File

@@ -276,7 +276,16 @@ export class LoopOrchestrator {
scope: LoopScope
): LoopContinuationResult {
const results = scope.allIterationOutputs
this.state.setBlockOutput(loopId, { results }, DEFAULTS.EXECUTION_TIME)
const output = { results }
this.state.setBlockOutput(loopId, output, DEFAULTS.EXECUTION_TIME)
// Emit onBlockComplete for the loop container so the UI can track it
if (this.contextExtensions?.onBlockComplete) {
this.contextExtensions.onBlockComplete(loopId, 'Loop', 'loop', {
output,
executionTime: DEFAULTS.EXECUTION_TIME,
})
}
return {
shouldContinue: false,

View File

@@ -31,7 +31,18 @@ export class NodeExecutionOrchestrator {
throw new Error(`Node not found in DAG: ${nodeId}`)
}
if (this.state.hasExecuted(nodeId)) {
if (ctx.runFromBlockContext && !ctx.runFromBlockContext.dirtySet.has(nodeId)) {
const cachedOutput = this.state.getBlockOutput(nodeId) || {}
logger.debug('Skipping non-dirty block in run-from-block mode', { nodeId })
return {
nodeId,
output: cachedOutput,
isFinalOutput: false,
}
}
const isDirtyBlock = ctx.runFromBlockContext?.dirtySet.has(nodeId) ?? false
if (!isDirtyBlock && this.state.hasExecuted(nodeId)) {
const output = this.state.getBlockOutput(nodeId) || {}
return {
nodeId,

View File

@@ -228,9 +228,17 @@ export class ParallelOrchestrator {
const branchOutputs = scope.branchOutputs.get(i) || []
results.push(branchOutputs)
}
this.state.setBlockOutput(parallelId, {
results,
})
const output = { results }
this.state.setBlockOutput(parallelId, output)
// Emit onBlockComplete for the parallel container so the UI can track it
if (this.contextExtensions?.onBlockComplete) {
this.contextExtensions.onBlockComplete(parallelId, 'Parallel', 'parallel', {
output,
executionTime: 0,
})
}
return {
allBranchesComplete: true,
results,

View File

@@ -1,6 +1,7 @@
import type { TraceSpan } from '@/lib/logs/types'
import type { PermissionGroupConfig } from '@/lib/permission-groups/types'
import type { BlockOutput } from '@/blocks/types'
import type { RunFromBlockContext } from '@/executor/utils/run-from-block'
import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types'
export interface UserFile {
@@ -250,6 +251,17 @@ export interface ExecutionContext {
* will not have their base64 content fetched.
*/
base64MaxBytes?: number
/**
* Context for "run from block" mode. When present, only blocks in dirtySet
* will be executed; others return cached outputs from the source snapshot.
*/
runFromBlockContext?: RunFromBlockContext
/**
* Stop execution after this block completes. Used for "run until block" feature.
*/
stopAfterBlockId?: string
}
export interface ExecutionResult {

View File

@@ -1,7 +1,9 @@
import { getBlockOutputs } from '@/lib/workflows/blocks/block-outputs'
import { normalizeName } from '@/executor/constants'
import type { ExecutionContext } from '@/executor/types'
import type { OutputSchema } from '@/executor/utils/block-reference'
import type { SerializedBlock } from '@/serializer/types'
import type { ToolConfig } from '@/tools/types'
import { getTool } from '@/tools/utils'
export interface BlockDataCollection {
blockData: Record<string, unknown>
@@ -9,6 +11,32 @@ export interface BlockDataCollection {
blockOutputSchemas: Record<string, OutputSchema>
}
export function getBlockSchema(
block: SerializedBlock,
toolConfig?: ToolConfig
): OutputSchema | undefined {
const isTrigger =
block.metadata?.category === 'triggers' ||
(block.config?.params as Record<string, unknown> | undefined)?.triggerMode === true
// Triggers use saved outputs (defines the trigger payload schema)
if (isTrigger && block.outputs && Object.keys(block.outputs).length > 0) {
return block.outputs as OutputSchema
}
// When a tool is selected, tool outputs are the source of truth
if (toolConfig?.outputs && Object.keys(toolConfig.outputs).length > 0) {
return toolConfig.outputs as OutputSchema
}
// Fallback to saved outputs for blocks without tools
if (block.outputs && Object.keys(block.outputs).length > 0) {
return block.outputs as OutputSchema
}
return undefined
}
export function collectBlockData(ctx: ExecutionContext): BlockDataCollection {
const blockData: Record<string, unknown> = {}
const blockNameMapping: Record<string, string> = {}
@@ -18,24 +46,21 @@ export function collectBlockData(ctx: ExecutionContext): BlockDataCollection {
if (state.output !== undefined) {
blockData[id] = state.output
}
}
const workflowBlock = ctx.workflow?.blocks?.find((b) => b.id === id)
if (!workflowBlock) continue
const workflowBlocks = ctx.workflow?.blocks ?? []
for (const block of workflowBlocks) {
const id = block.id
if (workflowBlock.metadata?.name) {
blockNameMapping[normalizeName(workflowBlock.metadata.name)] = id
if (block.metadata?.name) {
blockNameMapping[normalizeName(block.metadata.name)] = id
}
const blockType = workflowBlock.metadata?.id
if (blockType) {
const params = workflowBlock.config?.params as Record<string, unknown> | undefined
const subBlocks = params
? Object.fromEntries(Object.entries(params).map(([k, v]) => [k, { value: v }]))
: undefined
const schema = getBlockOutputs(blockType, subBlocks)
if (schema && Object.keys(schema).length > 0) {
blockOutputSchemas[id] = schema
}
const toolId = block.config?.tool
const toolConfig = toolId ? getTool(toolId) : undefined
const schema = getBlockSchema(block, toolConfig)
if (schema && Object.keys(schema).length > 0) {
blockOutputSchemas[id] = schema
}
}

View File

@@ -0,0 +1,48 @@
/**
* Formats a JavaScript/TypeScript value as a code literal for the target language.
* Handles special cases like null, undefined, booleans, and Python-specific number representations.
*
* @param value - The value to format
* @param language - Target language ('javascript' or 'python')
* @returns A string literal representation valid in the target language
*
* @example
* formatLiteralForCode(null, 'python') // => 'None'
* formatLiteralForCode(true, 'python') // => 'True'
* formatLiteralForCode(NaN, 'python') // => "float('nan')"
* formatLiteralForCode("hello", 'javascript') // => '"hello"'
* formatLiteralForCode({a: 1}, 'python') // => "json.loads('{\"a\":1}')"
*/
export function formatLiteralForCode(value: unknown, language: 'javascript' | 'python'): string {
const isPython = language === 'python'
if (value === undefined) {
return isPython ? 'None' : 'undefined'
}
if (value === null) {
return isPython ? 'None' : 'null'
}
if (typeof value === 'boolean') {
return isPython ? (value ? 'True' : 'False') : String(value)
}
if (typeof value === 'number') {
if (Number.isNaN(value)) {
return isPython ? "float('nan')" : 'NaN'
}
if (value === Number.POSITIVE_INFINITY) {
return isPython ? "float('inf')" : 'Infinity'
}
if (value === Number.NEGATIVE_INFINITY) {
return isPython ? "float('-inf')" : '-Infinity'
}
return String(value)
}
if (typeof value === 'string') {
return JSON.stringify(value)
}
// Objects and arrays - Python needs json.loads() because JSON true/false/null aren't valid Python
if (isPython) {
return `json.loads(${JSON.stringify(JSON.stringify(value))})`
}
return JSON.stringify(value)
}

View File

@@ -0,0 +1,493 @@
import { describe, expect, it } from 'vitest'
import type { DAG, DAGNode } from '@/executor/dag/builder'
import type { DAGEdge, NodeMetadata } from '@/executor/dag/types'
import { computeDirtySet, validateRunFromBlock } from '@/executor/utils/run-from-block'
import type { SerializedLoop, SerializedParallel } from '@/serializer/types'
/**
* Helper to create a DAG node for testing
*/
function createNode(
id: string,
outgoingEdges: Array<{ target: string; sourceHandle?: string }> = [],
metadata: Partial<NodeMetadata> = {}
): DAGNode {
const edges = new Map<string, DAGEdge>()
for (const edge of outgoingEdges) {
edges.set(edge.target, { target: edge.target, sourceHandle: edge.sourceHandle })
}
return {
id,
block: {
id,
position: { x: 0, y: 0 },
config: { tool: 'test', params: {} },
inputs: {},
outputs: {},
metadata: { id: 'test', name: `block-${id}`, category: 'tools' },
enabled: true,
},
incomingEdges: new Set<string>(),
outgoingEdges: edges,
metadata: {
isParallelBranch: false,
isLoopNode: false,
isSentinel: false,
...metadata,
},
}
}
/**
* Helper to create a DAG for testing
*/
function createDAG(nodes: DAGNode[]): DAG {
const nodeMap = new Map<string, DAGNode>()
for (const node of nodes) {
nodeMap.set(node.id, node)
}
// Set up incoming edges based on outgoing edges
for (const node of nodes) {
for (const [, edge] of node.outgoingEdges) {
const targetNode = nodeMap.get(edge.target)
if (targetNode) {
targetNode.incomingEdges.add(node.id)
}
}
}
return {
nodes: nodeMap,
loopConfigs: new Map<string, SerializedLoop>(),
parallelConfigs: new Map<string, SerializedParallel>(),
}
}
describe('computeDirtySet', () => {
it('includes start block in dirty set', () => {
const dag = createDAG([createNode('A'), createNode('B'), createNode('C')])
const dirtySet = computeDirtySet(dag, 'B')
expect(dirtySet.has('B')).toBe(true)
})
it('includes all downstream blocks in linear workflow', () => {
// A → B → C → D
const dag = createDAG([
createNode('A', [{ target: 'B' }]),
createNode('B', [{ target: 'C' }]),
createNode('C', [{ target: 'D' }]),
createNode('D'),
])
const dirtySet = computeDirtySet(dag, 'B')
expect(dirtySet.has('A')).toBe(false)
expect(dirtySet.has('B')).toBe(true)
expect(dirtySet.has('C')).toBe(true)
expect(dirtySet.has('D')).toBe(true)
expect(dirtySet.size).toBe(3)
})
it('handles branching paths', () => {
// A → B → C
// ↓
// D → E
const dag = createDAG([
createNode('A', [{ target: 'B' }]),
createNode('B', [{ target: 'C' }, { target: 'D' }]),
createNode('C'),
createNode('D', [{ target: 'E' }]),
createNode('E'),
])
const dirtySet = computeDirtySet(dag, 'B')
expect(dirtySet.has('A')).toBe(false)
expect(dirtySet.has('B')).toBe(true)
expect(dirtySet.has('C')).toBe(true)
expect(dirtySet.has('D')).toBe(true)
expect(dirtySet.has('E')).toBe(true)
expect(dirtySet.size).toBe(4)
})
it('handles convergence points', () => {
// A → C
// B → C → D
const dag = createDAG([
createNode('A', [{ target: 'C' }]),
createNode('B', [{ target: 'C' }]),
createNode('C', [{ target: 'D' }]),
createNode('D'),
])
// Run from A: should include A, C, D (but not B)
const dirtySet = computeDirtySet(dag, 'A')
expect(dirtySet.has('A')).toBe(true)
expect(dirtySet.has('B')).toBe(false)
expect(dirtySet.has('C')).toBe(true)
expect(dirtySet.has('D')).toBe(true)
expect(dirtySet.size).toBe(3)
})
it('handles diamond pattern', () => {
// B
// ↗ ↘
// A D
// ↘ ↗
// C
const dag = createDAG([
createNode('A', [{ target: 'B' }, { target: 'C' }]),
createNode('B', [{ target: 'D' }]),
createNode('C', [{ target: 'D' }]),
createNode('D'),
])
const dirtySet = computeDirtySet(dag, 'A')
expect(dirtySet.has('A')).toBe(true)
expect(dirtySet.has('B')).toBe(true)
expect(dirtySet.has('C')).toBe(true)
expect(dirtySet.has('D')).toBe(true)
expect(dirtySet.size).toBe(4)
})
it('stops at graph boundaries', () => {
// A → B C → D (disconnected)
const dag = createDAG([
createNode('A', [{ target: 'B' }]),
createNode('B'),
createNode('C', [{ target: 'D' }]),
createNode('D'),
])
const dirtySet = computeDirtySet(dag, 'A')
expect(dirtySet.has('A')).toBe(true)
expect(dirtySet.has('B')).toBe(true)
expect(dirtySet.has('C')).toBe(false)
expect(dirtySet.has('D')).toBe(false)
expect(dirtySet.size).toBe(2)
})
it('handles single node workflow', () => {
const dag = createDAG([createNode('A')])
const dirtySet = computeDirtySet(dag, 'A')
expect(dirtySet.has('A')).toBe(true)
expect(dirtySet.size).toBe(1)
})
it('handles node not in DAG gracefully', () => {
const dag = createDAG([createNode('A'), createNode('B')])
const dirtySet = computeDirtySet(dag, 'nonexistent')
// Should just contain the start block ID even if not found
expect(dirtySet.has('nonexistent')).toBe(true)
expect(dirtySet.size).toBe(1)
})
it('includes convergent block when running from one branch of parallel', () => {
// Parallel branches converging:
// A → B → D
// A → C → D
// Running from B should include B and D (but not A or C)
const dag = createDAG([
createNode('A', [{ target: 'B' }, { target: 'C' }]),
createNode('B', [{ target: 'D' }]),
createNode('C', [{ target: 'D' }]),
createNode('D'),
])
const dirtySet = computeDirtySet(dag, 'B')
expect(dirtySet.has('A')).toBe(false)
expect(dirtySet.has('B')).toBe(true)
expect(dirtySet.has('C')).toBe(false)
expect(dirtySet.has('D')).toBe(true)
expect(dirtySet.size).toBe(2)
})
it('handles running from convergent block itself (all upstream non-dirty)', () => {
// A → C
// B → C
// Running from C should only include C
const dag = createDAG([
createNode('A', [{ target: 'C' }]),
createNode('B', [{ target: 'C' }]),
createNode('C', [{ target: 'D' }]),
createNode('D'),
])
const dirtySet = computeDirtySet(dag, 'C')
expect(dirtySet.has('A')).toBe(false)
expect(dirtySet.has('B')).toBe(false)
expect(dirtySet.has('C')).toBe(true)
expect(dirtySet.has('D')).toBe(true)
expect(dirtySet.size).toBe(2)
})
it('handles deep downstream chains', () => {
// A → B → C → D → E → F
// Running from C should include C, D, E, F
const dag = createDAG([
createNode('A', [{ target: 'B' }]),
createNode('B', [{ target: 'C' }]),
createNode('C', [{ target: 'D' }]),
createNode('D', [{ target: 'E' }]),
createNode('E', [{ target: 'F' }]),
createNode('F'),
])
const dirtySet = computeDirtySet(dag, 'C')
expect(dirtySet.has('A')).toBe(false)
expect(dirtySet.has('B')).toBe(false)
expect(dirtySet.has('C')).toBe(true)
expect(dirtySet.has('D')).toBe(true)
expect(dirtySet.has('E')).toBe(true)
expect(dirtySet.has('F')).toBe(true)
expect(dirtySet.size).toBe(4)
})
})
describe('validateRunFromBlock', () => {
it('accepts valid block', () => {
const dag = createDAG([createNode('A'), createNode('B')])
const executedBlocks = new Set(['A', 'B'])
const result = validateRunFromBlock('A', dag, executedBlocks)
expect(result.valid).toBe(true)
expect(result.error).toBeUndefined()
})
it('rejects block not found in DAG', () => {
const dag = createDAG([createNode('A')])
const executedBlocks = new Set(['A', 'B'])
const result = validateRunFromBlock('B', dag, executedBlocks)
expect(result.valid).toBe(false)
expect(result.error).toContain('Block not found')
})
it('rejects blocks inside loops', () => {
const dag = createDAG([createNode('A', [], { isLoopNode: true, loopId: 'loop-1' })])
const executedBlocks = new Set(['A'])
const result = validateRunFromBlock('A', dag, executedBlocks)
expect(result.valid).toBe(false)
expect(result.error).toContain('inside loop')
expect(result.error).toContain('loop-1')
})
it('rejects blocks inside parallels', () => {
const dag = createDAG([
createNode('A', [], { isParallelBranch: true, parallelId: 'parallel-1' }),
])
const executedBlocks = new Set(['A'])
const result = validateRunFromBlock('A', dag, executedBlocks)
expect(result.valid).toBe(false)
expect(result.error).toContain('inside parallel')
expect(result.error).toContain('parallel-1')
})
it('rejects sentinel nodes', () => {
const dag = createDAG([createNode('A', [], { isSentinel: true, sentinelType: 'start' })])
const executedBlocks = new Set(['A'])
const result = validateRunFromBlock('A', dag, executedBlocks)
expect(result.valid).toBe(false)
expect(result.error).toContain('sentinel')
})
it('rejects blocks with unexecuted upstream dependencies', () => {
// A → B, only A executed but B depends on A
const dag = createDAG([createNode('A', [{ target: 'B' }]), createNode('B')])
const executedBlocks = new Set<string>() // A was not executed
const result = validateRunFromBlock('B', dag, executedBlocks)
expect(result.valid).toBe(false)
expect(result.error).toContain('Upstream dependency not executed')
})
it('allows blocks with no dependencies even if not previously executed', () => {
// A and B are independent (no edges)
const dag = createDAG([createNode('A'), createNode('B')])
const executedBlocks = new Set(['A']) // B was not executed but has no deps
const result = validateRunFromBlock('B', dag, executedBlocks)
expect(result.valid).toBe(true) // B has no incoming edges, so it's valid
})
it('accepts regular executed block', () => {
const dag = createDAG([
createNode('trigger', [{ target: 'A' }]),
createNode('A', [{ target: 'B' }]),
createNode('B'),
])
const executedBlocks = new Set(['trigger', 'A', 'B'])
const result = validateRunFromBlock('A', dag, executedBlocks)
expect(result.valid).toBe(true)
})
it('accepts loop container when executed', () => {
// Loop container with sentinel nodes
const loopId = 'loop-container-1'
const sentinelStartId = `loop-${loopId}-sentinel-start`
const sentinelEndId = `loop-${loopId}-sentinel-end`
const dag = createDAG([
createNode('A', [{ target: sentinelStartId }]),
createNode(sentinelStartId, [{ target: 'B' }], {
isSentinel: true,
sentinelType: 'start',
loopId,
}),
createNode('B', [{ target: sentinelEndId }], { isLoopNode: true, loopId }),
createNode(sentinelEndId, [{ target: 'C' }], {
isSentinel: true,
sentinelType: 'end',
loopId,
}),
createNode('C'),
])
dag.loopConfigs.set(loopId, { id: loopId, nodes: ['B'], iterations: 3, loopType: 'for' } as any)
const executedBlocks = new Set(['A', loopId, sentinelStartId, 'B', sentinelEndId, 'C'])
const result = validateRunFromBlock(loopId, dag, executedBlocks)
expect(result.valid).toBe(true)
})
it('accepts parallel container when executed', () => {
// Parallel container with sentinel nodes
const parallelId = 'parallel-container-1'
const sentinelStartId = `parallel-${parallelId}-sentinel-start`
const sentinelEndId = `parallel-${parallelId}-sentinel-end`
const dag = createDAG([
createNode('A', [{ target: sentinelStartId }]),
createNode(sentinelStartId, [{ target: 'B₍0₎' }], {
isSentinel: true,
sentinelType: 'start',
parallelId,
}),
createNode('B₍0₎', [{ target: sentinelEndId }], { isParallelBranch: true, parallelId }),
createNode(sentinelEndId, [{ target: 'C' }], {
isSentinel: true,
sentinelType: 'end',
parallelId,
}),
createNode('C'),
])
dag.parallelConfigs.set(parallelId, { id: parallelId, nodes: ['B'], count: 2 } as any)
const executedBlocks = new Set(['A', parallelId, sentinelStartId, 'B₍0₎', sentinelEndId, 'C'])
const result = validateRunFromBlock(parallelId, dag, executedBlocks)
expect(result.valid).toBe(true)
})
it('allows loop container with no upstream dependencies', () => {
// Loop containers are validated via their sentinel nodes, not incoming edges on the container itself
// If the loop has no upstream dependencies, it should be valid
const loopId = 'loop-container-1'
const sentinelStartId = `loop-${loopId}-sentinel-start`
const dag = createDAG([
createNode(sentinelStartId, [], { isSentinel: true, sentinelType: 'start', loopId }),
])
dag.loopConfigs.set(loopId, { id: loopId, nodes: [], iterations: 3, loopType: 'for' } as any)
const executedBlocks = new Set<string>() // Nothing executed but loop has no deps
const result = validateRunFromBlock(loopId, dag, executedBlocks)
// Loop container validation doesn't check incoming edges (containers don't have nodes in dag.nodes)
// So this is valid - the loop can start fresh
expect(result.valid).toBe(true)
})
})
describe('computeDirtySet with containers', () => {
it('includes loop container and all downstream when running from loop', () => {
// A → loop-sentinel-start → B (inside loop) → loop-sentinel-end → C
const loopId = 'loop-1'
const sentinelStartId = `loop-${loopId}-sentinel-start`
const sentinelEndId = `loop-${loopId}-sentinel-end`
const dag = createDAG([
createNode('A', [{ target: sentinelStartId }]),
createNode(sentinelStartId, [{ target: 'B' }], {
isSentinel: true,
sentinelType: 'start',
loopId,
}),
createNode('B', [{ target: sentinelEndId }], { isLoopNode: true, loopId }),
createNode(sentinelEndId, [{ target: 'C' }], {
isSentinel: true,
sentinelType: 'end',
loopId,
}),
createNode('C'),
])
dag.loopConfigs.set(loopId, { id: loopId, nodes: ['B'], iterations: 3, loopType: 'for' } as any)
const dirtySet = computeDirtySet(dag, loopId)
// Should include loop container, sentinel-start, B, sentinel-end, C
expect(dirtySet.has(loopId)).toBe(true)
expect(dirtySet.has(sentinelStartId)).toBe(true)
expect(dirtySet.has('B')).toBe(true)
expect(dirtySet.has(sentinelEndId)).toBe(true)
expect(dirtySet.has('C')).toBe(true)
// Should NOT include A (upstream)
expect(dirtySet.has('A')).toBe(false)
})
it('includes parallel container and all downstream when running from parallel', () => {
// A → parallel-sentinel-start → B₍0₎ → parallel-sentinel-end → C
const parallelId = 'parallel-1'
const sentinelStartId = `parallel-${parallelId}-sentinel-start`
const sentinelEndId = `parallel-${parallelId}-sentinel-end`
const dag = createDAG([
createNode('A', [{ target: sentinelStartId }]),
createNode(sentinelStartId, [{ target: 'B₍0₎' }], {
isSentinel: true,
sentinelType: 'start',
parallelId,
}),
createNode('B₍0₎', [{ target: sentinelEndId }], { isParallelBranch: true, parallelId }),
createNode(sentinelEndId, [{ target: 'C' }], {
isSentinel: true,
sentinelType: 'end',
parallelId,
}),
createNode('C'),
])
dag.parallelConfigs.set(parallelId, { id: parallelId, nodes: ['B'], count: 2 } as any)
const dirtySet = computeDirtySet(dag, parallelId)
// Should include parallel container, sentinel-start, B₍0₎, sentinel-end, C
expect(dirtySet.has(parallelId)).toBe(true)
expect(dirtySet.has(sentinelStartId)).toBe(true)
expect(dirtySet.has('B₍0₎')).toBe(true)
expect(dirtySet.has(sentinelEndId)).toBe(true)
expect(dirtySet.has('C')).toBe(true)
// Should NOT include A (upstream)
expect(dirtySet.has('A')).toBe(false)
})
})

View File

@@ -0,0 +1,169 @@
import { createLogger } from '@sim/logger'
import { LOOP, PARALLEL } from '@/executor/constants'
import type { DAG } from '@/executor/dag/builder'
const logger = createLogger('run-from-block')
/**
* Builds the sentinel-start node ID for a loop.
*/
function buildLoopSentinelStartId(loopId: string): string {
return `${LOOP.SENTINEL.PREFIX}${loopId}${LOOP.SENTINEL.START_SUFFIX}`
}
/**
* Builds the sentinel-start node ID for a parallel.
*/
function buildParallelSentinelStartId(parallelId: string): string {
return `${PARALLEL.SENTINEL.PREFIX}${parallelId}${PARALLEL.SENTINEL.START_SUFFIX}`
}
/**
* Checks if a block ID is a loop or parallel container and returns the sentinel-start ID if so.
* Returns null if the block is not a container.
*/
export function resolveContainerToSentinelStart(blockId: string, dag: DAG): string | null {
if (dag.loopConfigs.has(blockId)) {
return buildLoopSentinelStartId(blockId)
}
if (dag.parallelConfigs.has(blockId)) {
return buildParallelSentinelStartId(blockId)
}
return null
}
/**
* Result of validating a block for run-from-block execution.
*/
export interface RunFromBlockValidation {
valid: boolean
error?: string
}
/**
* Context for run-from-block execution mode.
*/
export interface RunFromBlockContext {
/** The block ID to start execution from */
startBlockId: string
/** Set of block IDs that need re-execution (start block + all downstream) */
dirtySet: Set<string>
}
/**
* Computes all blocks that need re-execution when running from a specific block.
* Uses BFS to find all downstream blocks reachable via outgoing edges.
*
* For loop/parallel containers, starts from the sentinel-start node and includes
* the container ID itself in the dirty set.
*
* @param dag - The workflow DAG
* @param startBlockId - The block to start execution from
* @returns Set of block IDs that are "dirty" and need re-execution
*/
export function computeDirtySet(dag: DAG, startBlockId: string): Set<string> {
const dirty = new Set<string>([startBlockId])
const sentinelStartId = resolveContainerToSentinelStart(startBlockId, dag)
const traversalStartId = sentinelStartId ?? startBlockId
if (sentinelStartId) {
dirty.add(sentinelStartId)
}
const queue = [traversalStartId]
while (queue.length > 0) {
const nodeId = queue.shift()!
const node = dag.nodes.get(nodeId)
if (!node) continue
for (const [, edge] of node.outgoingEdges) {
if (!dirty.has(edge.target)) {
dirty.add(edge.target)
queue.push(edge.target)
}
}
}
logger.debug('Computed dirty set', {
startBlockId,
traversalStartId,
dirtySetSize: dirty.size,
dirtyBlocks: Array.from(dirty),
})
return dirty
}
/**
* Validates that a block can be used as a run-from-block starting point.
*
* Validation rules:
* - Block must exist in the DAG (or be a loop/parallel container)
* - Block cannot be inside a loop (but loop containers are allowed)
* - Block cannot be inside a parallel (but parallel containers are allowed)
* - Block cannot be a sentinel node
* - All upstream dependencies must have been executed (have cached outputs)
*
* @param blockId - The block ID to validate
* @param dag - The workflow DAG
* @param executedBlocks - Set of blocks that were executed in the source run
* @returns Validation result with error message if invalid
*/
export function validateRunFromBlock(
blockId: string,
dag: DAG,
executedBlocks: Set<string>
): RunFromBlockValidation {
const node = dag.nodes.get(blockId)
const isLoopContainer = dag.loopConfigs.has(blockId)
const isParallelContainer = dag.parallelConfigs.has(blockId)
const isContainer = isLoopContainer || isParallelContainer
if (!node && !isContainer) {
return { valid: false, error: `Block not found in workflow: ${blockId}` }
}
if (isContainer) {
const sentinelStartId = resolveContainerToSentinelStart(blockId, dag)
if (!sentinelStartId || !dag.nodes.has(sentinelStartId)) {
return {
valid: false,
error: `Container sentinel not found for: ${blockId}`,
}
}
}
if (node) {
if (node.metadata.isLoopNode) {
return {
valid: false,
error: `Cannot run from block inside loop: ${node.metadata.loopId}`,
}
}
if (node.metadata.isParallelBranch) {
return {
valid: false,
error: `Cannot run from block inside parallel: ${node.metadata.parallelId}`,
}
}
if (node.metadata.isSentinel) {
return { valid: false, error: 'Cannot run from sentinel node' }
}
if (node.incomingEdges.size > 0) {
for (const sourceId of node.incomingEdges.keys()) {
if (!executedBlocks.has(sourceId)) {
return {
valid: false,
error: `Upstream dependency not executed: ${sourceId}`,
}
}
}
}
}
return { valid: true }
}

View File

@@ -378,8 +378,30 @@ function buildManualTriggerOutput(
return mergeFilesIntoOutput(output, workflowInput)
}
function buildIntegrationTriggerOutput(workflowInput: unknown): NormalizedBlockOutput {
return isPlainObject(workflowInput) ? (workflowInput as NormalizedBlockOutput) : {}
function buildIntegrationTriggerOutput(
workflowInput: unknown,
structuredInput: Record<string, unknown>,
hasStructured: boolean
): NormalizedBlockOutput {
const output: NormalizedBlockOutput = {}
if (hasStructured) {
for (const [key, value] of Object.entries(structuredInput)) {
output[key] = value
}
}
if (isPlainObject(workflowInput)) {
for (const [key, value] of Object.entries(workflowInput)) {
if (value !== undefined && value !== null) {
output[key] = value
} else if (!Object.hasOwn(output, key)) {
output[key] = value
}
}
}
return mergeFilesIntoOutput(output, workflowInput)
}
function extractSubBlocks(block: SerializedBlock): Record<string, unknown> | undefined {
@@ -428,7 +450,7 @@ export function buildStartBlockOutput(options: StartBlockOutputOptions): Normali
return buildManualTriggerOutput(finalInput, workflowInput)
case StartBlockPath.EXTERNAL_TRIGGER:
return buildIntegrationTriggerOutput(workflowInput)
return buildIntegrationTriggerOutput(workflowInput, structuredInput, hasStructured)
case StartBlockPath.LEGACY_STARTER:
return buildLegacyStarterOutput(

View File

@@ -157,7 +157,14 @@ export class VariableResolver {
let replacementError: Error | null = null
// Use generic utility for smart variable reference replacement
const blockType = block?.metadata?.id
const language =
blockType === BlockType.FUNCTION
? ((block?.config?.params as Record<string, unknown> | undefined)?.language as
| string
| undefined)
: undefined
let result = replaceValidReferences(template, (match) => {
if (replacementError) return match
@@ -167,14 +174,7 @@ export class VariableResolver {
return match
}
const blockType = block?.metadata?.id
const isInTemplateLiteral =
blockType === BlockType.FUNCTION &&
template.includes('${') &&
template.includes('}') &&
template.includes('`')
return this.blockResolver.formatValueForBlock(resolved, blockType, isInTemplateLiteral)
return this.blockResolver.formatValueForBlock(resolved, blockType, language)
} catch (error) {
replacementError = error instanceof Error ? error : new Error(String(error))
return match

View File

@@ -257,15 +257,9 @@ describe('BlockResolver', () => {
expect(result).toBe('"hello"')
})
it.concurrent('should format string for function block in template literal', () => {
it.concurrent('should format object for function block', () => {
const resolver = new BlockResolver(createTestWorkflow())
const result = resolver.formatValueForBlock('hello', 'function', true)
expect(result).toBe('hello')
})
it.concurrent('should format object for function block in template literal', () => {
const resolver = new BlockResolver(createTestWorkflow())
const result = resolver.formatValueForBlock({ a: 1 }, 'function', true)
const result = resolver.formatValueForBlock({ a: 1 }, 'function')
expect(result).toBe('{"a":1}')
})

View File

@@ -1,15 +1,16 @@
import { getBlockOutputs } from '@/lib/workflows/blocks/block-outputs'
import {
isReference,
normalizeName,
parseReferencePath,
SPECIAL_REFERENCE_PREFIXES,
} from '@/executor/constants'
import { getBlockSchema } from '@/executor/utils/block-data'
import {
InvalidFieldError,
type OutputSchema,
resolveBlockReference,
} from '@/executor/utils/block-reference'
import { formatLiteralForCode } from '@/executor/utils/code-formatting'
import {
navigatePath,
type ResolutionContext,
@@ -67,15 +68,9 @@ export class BlockResolver implements Resolver {
blockData[blockId] = output
}
const blockType = block.metadata?.id
const params = block.config?.params as Record<string, unknown> | undefined
const subBlocks = params
? Object.fromEntries(Object.entries(params).map(([k, v]) => [k, { value: v }]))
: undefined
const toolId = block.config?.tool
const toolConfig = toolId ? getTool(toolId) : undefined
const outputSchema =
toolConfig?.outputs ?? (blockType ? getBlockOutputs(blockType, subBlocks) : block.outputs)
const outputSchema = getBlockSchema(block, toolConfig)
if (outputSchema && Object.keys(outputSchema).length > 0) {
blockOutputSchemas[blockId] = outputSchema
@@ -165,17 +160,13 @@ export class BlockResolver implements Resolver {
return this.nameToBlockId.get(normalizeName(name))
}
public formatValueForBlock(
value: any,
blockType: string | undefined,
isInTemplateLiteral = false
): string {
public formatValueForBlock(value: any, blockType: string | undefined, language?: string): string {
if (blockType === 'condition') {
return this.stringifyForCondition(value)
}
if (blockType === 'function') {
return this.formatValueForCodeContext(value, isInTemplateLiteral)
return this.formatValueForCodeContext(value, language)
}
if (blockType === 'response') {
@@ -216,29 +207,7 @@ export class BlockResolver implements Resolver {
return String(value)
}
private formatValueForCodeContext(value: any, isInTemplateLiteral: boolean): string {
if (isInTemplateLiteral) {
if (typeof value === 'string') {
return value
}
if (typeof value === 'object' && value !== null) {
return JSON.stringify(value)
}
return String(value)
}
if (typeof value === 'string') {
return JSON.stringify(value)
}
if (typeof value === 'object' && value !== null) {
return JSON.stringify(value)
}
if (value === undefined) {
return 'undefined'
}
if (value === null) {
return 'null'
}
return String(value)
private formatValueForCodeContext(value: any, language?: string): string {
return formatLiteralForCode(value, language === 'python' ? 'python' : 'javascript')
}
}

View File

@@ -30,7 +30,10 @@ export function navigatePath(obj: any, path: string[]): any {
const arrayMatch = part.match(/^([^[]+)(\[.+)$/)
if (arrayMatch) {
const [, prop, bracketsPart] = arrayMatch
current = current[prop]
current =
typeof current === 'object' && current !== null
? (current as Record<string, unknown>)[prop]
: undefined
if (current === undefined || current === null) {
return undefined
}
@@ -49,7 +52,10 @@ export function navigatePath(obj: any, path: string[]): any {
const index = Number.parseInt(part, 10)
current = Array.isArray(current) ? current[index] : undefined
} else {
current = current[part]
current =
typeof current === 'object' && current !== null
? (current as Record<string, unknown>)[part]
: undefined
}
}
return current

View File

@@ -1,5 +1,6 @@
import { useCallback } from 'react'
import type { Node, ReactFlowInstance } from 'reactflow'
import { BLOCK_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions'
interface VisibleBounds {
width: number
@@ -139,8 +140,8 @@ export function useCanvasViewport(reactFlowInstance: ReactFlowInstance | null) {
let maxY = Number.NEGATIVE_INFINITY
nodes.forEach((node) => {
const nodeWidth = node.width ?? 200
const nodeHeight = node.height ?? 100
const nodeWidth = node.width ?? BLOCK_DIMENSIONS.FIXED_WIDTH
const nodeHeight = node.height ?? BLOCK_DIMENSIONS.MIN_HEIGHT
minX = Math.min(minX, node.position.x)
minY = Math.min(minY, node.position.y)

View File

@@ -680,6 +680,10 @@ export function useCollaborativeWorkflow() {
previousPositions?: Map<string, { x: number; y: number; parentId?: string }>
}
) => {
if (isBaselineDiffView) {
return
}
if (!isInActiveRoom()) {
logger.debug('Skipping batch position update - not in active workflow')
return
@@ -725,7 +729,7 @@ export function useCollaborativeWorkflow() {
}
}
},
[addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
[isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
)
const collaborativeUpdateBlockName = useCallback(
@@ -817,6 +821,10 @@ export function useCollaborativeWorkflow() {
const collaborativeBatchToggleBlockEnabled = useCallback(
(ids: string[]) => {
if (isBaselineDiffView) {
return
}
if (ids.length === 0) return
const previousStates: Record<string, boolean> = {}
@@ -849,7 +857,7 @@ export function useCollaborativeWorkflow() {
undoRedo.recordBatchToggleEnabled(validIds, previousStates)
},
[addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
[isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
)
const collaborativeBatchUpdateParent = useCallback(
@@ -861,6 +869,10 @@ export function useCollaborativeWorkflow() {
affectedEdges: Edge[]
}>
) => {
if (isBaselineDiffView) {
return
}
if (!isInActiveRoom()) {
logger.debug('Skipping batch update parent - not in active workflow')
return
@@ -931,7 +943,7 @@ export function useCollaborativeWorkflow() {
logger.debug('Batch updated parent for blocks', { updateCount: updates.length })
},
[isInActiveRoom, undoRedo, addToQueue, activeWorkflowId, session?.user?.id]
[isBaselineDiffView, isInActiveRoom, undoRedo, addToQueue, activeWorkflowId, session?.user?.id]
)
const collaborativeToggleBlockAdvancedMode = useCallback(
@@ -951,18 +963,37 @@ export function useCollaborativeWorkflow() {
const collaborativeSetBlockCanonicalMode = useCallback(
(id: string, canonicalId: string, canonicalMode: 'basic' | 'advanced') => {
executeQueuedOperation(
BLOCK_OPERATIONS.UPDATE_CANONICAL_MODE,
OPERATION_TARGETS.BLOCK,
{ id, canonicalId, canonicalMode },
() => useWorkflowStore.getState().setBlockCanonicalMode(id, canonicalId, canonicalMode)
)
if (isBaselineDiffView) {
return
}
useWorkflowStore.getState().setBlockCanonicalMode(id, canonicalId, canonicalMode)
if (!activeWorkflowId) {
return
}
const operationId = crypto.randomUUID()
addToQueue({
id: operationId,
operation: {
operation: BLOCK_OPERATIONS.UPDATE_CANONICAL_MODE,
target: OPERATION_TARGETS.BLOCK,
payload: { id, canonicalId, canonicalMode },
},
workflowId: activeWorkflowId,
userId: session?.user?.id || 'unknown',
})
},
[executeQueuedOperation]
[isBaselineDiffView, activeWorkflowId, addToQueue, session?.user?.id]
)
const collaborativeBatchToggleBlockHandles = useCallback(
(ids: string[]) => {
if (isBaselineDiffView) {
return
}
if (ids.length === 0) return
const previousStates: Record<string, boolean> = {}
@@ -995,11 +1026,15 @@ export function useCollaborativeWorkflow() {
undoRedo.recordBatchToggleHandles(validIds, previousStates)
},
[addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
[isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
)
const collaborativeBatchAddEdges = useCallback(
(edges: Edge[], options?: { skipUndoRedo?: boolean }) => {
if (isBaselineDiffView) {
return false
}
if (!isInActiveRoom()) {
logger.debug('Skipping batch add edges - not in active workflow')
return false
@@ -1035,11 +1070,15 @@ export function useCollaborativeWorkflow() {
return true
},
[addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
[isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
)
const collaborativeBatchRemoveEdges = useCallback(
(edgeIds: string[], options?: { skipUndoRedo?: boolean }) => {
if (isBaselineDiffView) {
return false
}
if (!isInActiveRoom()) {
logger.debug('Skipping batch remove edges - not in active workflow')
return false
@@ -1089,7 +1128,7 @@ export function useCollaborativeWorkflow() {
logger.info('Batch removed edges', { count: validEdgeIds.length })
return true
},
[isInActiveRoom, addToQueue, activeWorkflowId, session, undoRedo]
[isBaselineDiffView, isInActiveRoom, addToQueue, activeWorkflowId, session, undoRedo]
)
const collaborativeSetSubblockValue = useCallback(
@@ -1165,6 +1204,10 @@ export function useCollaborativeWorkflow() {
(blockId: string, subblockId: string, value: any) => {
if (isApplyingRemoteChange.current) return
if (isBaselineDiffView) {
return
}
if (!isInActiveRoom()) {
logger.debug('Skipping tag selection - not in active workflow', {
currentWorkflowId,
@@ -1192,7 +1235,14 @@ export function useCollaborativeWorkflow() {
userId: session?.user?.id || 'unknown',
})
},
[addToQueue, currentWorkflowId, activeWorkflowId, session?.user?.id, isInActiveRoom]
[
isBaselineDiffView,
addToQueue,
currentWorkflowId,
activeWorkflowId,
session?.user?.id,
isInActiveRoom,
]
)
const collaborativeUpdateLoopType = useCallback(
@@ -1538,6 +1588,10 @@ export function useCollaborativeWorkflow() {
const collaborativeBatchRemoveBlocks = useCallback(
(blockIds: string[], options?: { skipUndoRedo?: boolean }) => {
if (isBaselineDiffView) {
return false
}
if (!isInActiveRoom()) {
logger.debug('Skipping batch remove blocks - not in active workflow')
return false
@@ -1619,6 +1673,7 @@ export function useCollaborativeWorkflow() {
return true
},
[
isBaselineDiffView,
addToQueue,
activeWorkflowId,
session?.user?.id,

View File

@@ -1,10 +1,85 @@
import { useCallback, useRef } from 'react'
import { createLogger } from '@sim/logger'
import type { ExecutionEvent } from '@/lib/workflows/executor/execution-events'
import type { SerializableExecutionState } from '@/executor/execution/types'
import type { SubflowType } from '@/stores/workflows/workflow/types'
const logger = createLogger('useExecutionStream')
/**
* Processes SSE events from a response body and invokes appropriate callbacks.
*/
async function processSSEStream(
reader: ReadableStreamDefaultReader<Uint8Array>,
callbacks: ExecutionStreamCallbacks,
logPrefix: string
): Promise<void> {
const decoder = new TextDecoder()
let buffer = ''
try {
while (true) {
const { done, value } = await reader.read()
if (done) break
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split('\n\n')
buffer = lines.pop() || ''
for (const line of lines) {
if (!line.trim() || !line.startsWith('data: ')) continue
const data = line.substring(6).trim()
if (data === '[DONE]') {
logger.info(`${logPrefix} stream completed`)
continue
}
try {
const event = JSON.parse(data) as ExecutionEvent
switch (event.type) {
case 'execution:started':
callbacks.onExecutionStarted?.(event.data)
break
case 'execution:completed':
callbacks.onExecutionCompleted?.(event.data)
break
case 'execution:error':
callbacks.onExecutionError?.(event.data)
break
case 'execution:cancelled':
callbacks.onExecutionCancelled?.(event.data)
break
case 'block:started':
callbacks.onBlockStarted?.(event.data)
break
case 'block:completed':
callbacks.onBlockCompleted?.(event.data)
break
case 'block:error':
callbacks.onBlockError?.(event.data)
break
case 'stream:chunk':
callbacks.onStreamChunk?.(event.data)
break
case 'stream:done':
callbacks.onStreamDone?.(event.data)
break
default:
logger.warn('Unknown event type:', (event as any).type)
}
} catch (error) {
logger.error('Failed to parse SSE event:', error, { data })
}
}
}
} finally {
reader.releaseLock()
}
}
export interface ExecutionStreamCallbacks {
onExecutionStarted?: (data: { startTime: string }) => void
onExecutionCompleted?: (data: {
@@ -68,6 +143,14 @@ export interface ExecuteStreamOptions {
loops?: Record<string, any>
parallels?: Record<string, any>
}
stopAfterBlockId?: string
callbacks?: ExecutionStreamCallbacks
}
export interface ExecuteFromBlockOptions {
workflowId: string
startBlockId: string
sourceSnapshot: SerializableExecutionState
callbacks?: ExecutionStreamCallbacks
}
@@ -119,91 +202,7 @@ export function useExecutionStream() {
}
const reader = response.body.getReader()
const decoder = new TextDecoder()
let buffer = ''
try {
while (true) {
const { done, value } = await reader.read()
if (done) {
break
}
buffer += decoder.decode(value, { stream: true })
const lines = buffer.split('\n\n')
buffer = lines.pop() || ''
for (const line of lines) {
if (!line.trim() || !line.startsWith('data: ')) {
continue
}
const data = line.substring(6).trim()
if (data === '[DONE]') {
logger.info('Stream completed')
continue
}
try {
const event = JSON.parse(data) as ExecutionEvent
logger.info('📡 SSE Event received:', {
type: event.type,
executionId: event.executionId,
data: event.data,
})
switch (event.type) {
case 'execution:started':
logger.info('🚀 Execution started')
callbacks.onExecutionStarted?.(event.data)
break
case 'execution:completed':
logger.info('✅ Execution completed')
callbacks.onExecutionCompleted?.(event.data)
break
case 'execution:error':
logger.error('❌ Execution error')
callbacks.onExecutionError?.(event.data)
break
case 'execution:cancelled':
logger.warn('🛑 Execution cancelled')
callbacks.onExecutionCancelled?.(event.data)
break
case 'block:started':
logger.info('🔷 Block started:', event.data.blockId)
callbacks.onBlockStarted?.(event.data)
break
case 'block:completed':
logger.info('✓ Block completed:', event.data.blockId)
callbacks.onBlockCompleted?.(event.data)
break
case 'block:error':
logger.error('✗ Block error:', event.data.blockId)
callbacks.onBlockError?.(event.data)
break
case 'stream:chunk':
callbacks.onStreamChunk?.(event.data)
break
case 'stream:done':
logger.info('Stream done:', event.data.blockId)
callbacks.onStreamDone?.(event.data)
break
default:
logger.warn('Unknown event type:', (event as any).type)
}
} catch (error) {
logger.error('Failed to parse SSE event:', error, { data })
}
}
}
} finally {
reader.releaseLock()
}
await processSSEStream(reader, callbacks, 'Execution')
} catch (error: any) {
if (error.name === 'AbortError') {
logger.info('Execution stream cancelled')
@@ -222,6 +221,65 @@ export function useExecutionStream() {
}
}, [])
const executeFromBlock = useCallback(async (options: ExecuteFromBlockOptions) => {
const { workflowId, startBlockId, sourceSnapshot, callbacks = {} } = options
if (abortControllerRef.current) {
abortControllerRef.current.abort()
}
const abortController = new AbortController()
abortControllerRef.current = abortController
currentExecutionRef.current = null
try {
const response = await fetch(`/api/workflows/${workflowId}/execute-from-block`, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({ startBlockId, sourceSnapshot }),
signal: abortController.signal,
})
if (!response.ok) {
const errorResponse = await response.json()
const error = new Error(errorResponse.error || 'Failed to start execution')
if (errorResponse && typeof errorResponse === 'object') {
Object.assign(error, { executionResult: errorResponse })
}
throw error
}
if (!response.body) {
throw new Error('No response body')
}
const executionId = response.headers.get('X-Execution-Id')
if (executionId) {
currentExecutionRef.current = { workflowId, executionId }
}
const reader = response.body.getReader()
await processSSEStream(reader, callbacks, 'Run-from-block')
} catch (error: any) {
if (error.name === 'AbortError') {
logger.info('Run-from-block execution cancelled')
callbacks.onExecutionCancelled?.({ duration: 0 })
} else {
logger.error('Run-from-block execution error:', error)
callbacks.onExecutionError?.({
error: error.message || 'Unknown error',
duration: 0,
})
}
throw error
} finally {
abortControllerRef.current = null
currentExecutionRef.current = null
}
}, [])
const cancel = useCallback(() => {
const execution = currentExecutionRef.current
if (execution) {
@@ -239,6 +297,7 @@ export function useExecutionStream() {
return {
execute,
executeFromBlock,
cancel,
}
}

View File

@@ -132,6 +132,8 @@ async function executeCode(request) {
for (const [key, value] of Object.entries(contextVariables)) {
if (value === undefined) {
await jail.set(key, undefined)
} else if (value === null) {
await jail.set(key, null)
} else {
await jail.set(key, new ivm.ExternalCopy(value).copyInto())
}

View File

@@ -8,6 +8,17 @@ const logger = createLogger('EmbeddingUtils')
const MAX_TOKENS_PER_REQUEST = 8000
const MAX_CONCURRENT_BATCHES = env.KB_CONFIG_CONCURRENCY_LIMIT || 50
const EMBEDDING_DIMENSIONS = 1536
/**
* Check if the model supports custom dimensions.
* text-embedding-3-* models support the dimensions parameter.
* Checks for 'embedding-3' to handle Azure deployments with custom naming conventions.
*/
function supportsCustomDimensions(modelName: string): boolean {
const name = modelName.toLowerCase()
return name.includes('embedding-3') && !name.includes('ada')
}
export class EmbeddingAPIError extends Error {
public status: number
@@ -93,15 +104,19 @@ async function getEmbeddingConfig(
async function callEmbeddingAPI(inputs: string[], config: EmbeddingConfig): Promise<number[][]> {
return retryWithExponentialBackoff(
async () => {
const useDimensions = supportsCustomDimensions(config.modelName)
const requestBody = config.useAzure
? {
input: inputs,
encoding_format: 'float',
...(useDimensions && { dimensions: EMBEDDING_DIMENSIONS }),
}
: {
input: inputs,
model: config.modelName,
encoding_format: 'float',
...(useDimensions && { dimensions: EMBEDDING_DIMENSIONS }),
}
const response = await fetch(config.apiUrl, {

View File

@@ -18,6 +18,52 @@ const logger = createLogger('BlobClient')
let _blobServiceClient: BlobServiceClientInstance | null = null
interface ParsedCredentials {
accountName: string
accountKey: string
}
/**
* Extract account name and key from an Azure connection string.
* Connection strings have the format: DefaultEndpointsProtocol=https;AccountName=...;AccountKey=...;EndpointSuffix=...
*/
function parseConnectionString(connectionString: string): ParsedCredentials {
const accountNameMatch = connectionString.match(/AccountName=([^;]+)/)
if (!accountNameMatch) {
throw new Error('Cannot extract account name from connection string')
}
const accountKeyMatch = connectionString.match(/AccountKey=([^;]+)/)
if (!accountKeyMatch) {
throw new Error('Cannot extract account key from connection string')
}
return {
accountName: accountNameMatch[1],
accountKey: accountKeyMatch[1],
}
}
/**
* Get account credentials from BLOB_CONFIG, extracting from connection string if necessary.
*/
function getAccountCredentials(): ParsedCredentials {
if (BLOB_CONFIG.connectionString) {
return parseConnectionString(BLOB_CONFIG.connectionString)
}
if (BLOB_CONFIG.accountName && BLOB_CONFIG.accountKey) {
return {
accountName: BLOB_CONFIG.accountName,
accountKey: BLOB_CONFIG.accountKey,
}
}
throw new Error(
'Azure Blob Storage credentials are missing set AZURE_CONNECTION_STRING or both AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY'
)
}
export async function getBlobServiceClient(): Promise<BlobServiceClientInstance> {
if (_blobServiceClient) return _blobServiceClient
@@ -127,6 +173,8 @@ export async function getPresignedUrl(key: string, expiresIn = 3600) {
const containerClient = blobServiceClient.getContainerClient(BLOB_CONFIG.containerName)
const blockBlobClient = containerClient.getBlockBlobClient(key)
const { accountName, accountKey } = getAccountCredentials()
const sasOptions = {
containerName: BLOB_CONFIG.containerName,
blobName: key,
@@ -137,13 +185,7 @@ export async function getPresignedUrl(key: string, expiresIn = 3600) {
const sasToken = generateBlobSASQueryParameters(
sasOptions,
new StorageSharedKeyCredential(
BLOB_CONFIG.accountName,
BLOB_CONFIG.accountKey ??
(() => {
throw new Error('AZURE_ACCOUNT_KEY is required when using account name authentication')
})()
)
new StorageSharedKeyCredential(accountName, accountKey)
).toString()
return `${blockBlobClient.url}?${sasToken}`
@@ -168,9 +210,14 @@ export async function getPresignedUrlWithConfig(
StorageSharedKeyCredential,
} = await import('@azure/storage-blob')
let tempBlobServiceClient: BlobServiceClientInstance
let accountName: string
let accountKey: string
if (customConfig.connectionString) {
tempBlobServiceClient = BlobServiceClient.fromConnectionString(customConfig.connectionString)
const credentials = parseConnectionString(customConfig.connectionString)
accountName = credentials.accountName
accountKey = credentials.accountKey
} else if (customConfig.accountName && customConfig.accountKey) {
const sharedKeyCredential = new StorageSharedKeyCredential(
customConfig.accountName,
@@ -180,6 +227,8 @@ export async function getPresignedUrlWithConfig(
`https://${customConfig.accountName}.blob.core.windows.net`,
sharedKeyCredential
)
accountName = customConfig.accountName
accountKey = customConfig.accountKey
} else {
throw new Error(
'Custom blob config must include either connectionString or accountName + accountKey'
@@ -199,13 +248,7 @@ export async function getPresignedUrlWithConfig(
const sasToken = generateBlobSASQueryParameters(
sasOptions,
new StorageSharedKeyCredential(
customConfig.accountName,
customConfig.accountKey ??
(() => {
throw new Error('Account key is required when using account name authentication')
})()
)
new StorageSharedKeyCredential(accountName, accountKey)
).toString()
return `${blockBlobClient.url}?${sasToken}`
@@ -403,13 +446,9 @@ export async function getMultipartPartUrls(
if (customConfig) {
if (customConfig.connectionString) {
blobServiceClient = BlobServiceClient.fromConnectionString(customConfig.connectionString)
const match = customConfig.connectionString.match(/AccountName=([^;]+)/)
if (!match) throw new Error('Cannot extract account name from connection string')
accountName = match[1]
const keyMatch = customConfig.connectionString.match(/AccountKey=([^;]+)/)
if (!keyMatch) throw new Error('Cannot extract account key from connection string')
accountKey = keyMatch[1]
const credentials = parseConnectionString(customConfig.connectionString)
accountName = credentials.accountName
accountKey = credentials.accountKey
} else if (customConfig.accountName && customConfig.accountKey) {
const credential = new StorageSharedKeyCredential(
customConfig.accountName,
@@ -428,12 +467,9 @@ export async function getMultipartPartUrls(
} else {
blobServiceClient = await getBlobServiceClient()
containerName = BLOB_CONFIG.containerName
accountName = BLOB_CONFIG.accountName
accountKey =
BLOB_CONFIG.accountKey ||
(() => {
throw new Error('AZURE_ACCOUNT_KEY is required')
})()
const credentials = getAccountCredentials()
accountName = credentials.accountName
accountKey = credentials.accountKey
}
const containerClient = blobServiceClient.getContainerClient(containerName)
@@ -501,12 +537,10 @@ export async function completeMultipartUpload(
const containerClient = blobServiceClient.getContainerClient(containerName)
const blockBlobClient = containerClient.getBlockBlobClient(key)
// Sort parts by part number and extract block IDs
const sortedBlockIds = parts
.sort((a, b) => a.partNumber - b.partNumber)
.map((part) => part.blockId)
// Commit the block list to create the final blob
await blockBlobClient.commitBlockList(sortedBlockIds, {
metadata: {
multipartUpload: 'completed',
@@ -557,10 +591,8 @@ export async function abortMultipartUpload(key: string, customConfig?: BlobConfi
const blockBlobClient = containerClient.getBlockBlobClient(key)
try {
// Delete the blob if it exists (this also cleans up any uncommitted blocks)
await blockBlobClient.deleteIfExists()
} catch (error) {
// Ignore errors since we're just cleaning up
logger.warn('Error cleaning up multipart upload:', error)
}
}

View File

@@ -618,13 +618,6 @@ export function getToolOutputs(
}
}
/**
* Generates output paths for a tool-based block.
*
* @param blockConfig - The block configuration containing tools config
* @param subBlocks - SubBlock values for tool selection and condition evaluation
* @returns Array of output paths for the tool, or empty array on error
*/
export function getToolOutputPaths(
blockConfig: BlockConfig,
subBlocks?: Record<string, SubBlockWithValue>
@@ -634,12 +627,22 @@ export function getToolOutputPaths(
if (!outputs || Object.keys(outputs).length === 0) return []
if (subBlocks && blockConfig.outputs) {
const filteredBlockOutputs = filterOutputsByCondition(blockConfig.outputs, subBlocks)
const allowedKeys = new Set(Object.keys(filteredBlockOutputs))
const filteredOutputs: Record<string, any> = {}
for (const [key, value] of Object.entries(outputs)) {
if (allowedKeys.has(key)) {
const blockOutput = blockConfig.outputs[key]
if (!blockOutput || typeof blockOutput !== 'object') {
filteredOutputs[key] = value
continue
}
const condition = 'condition' in blockOutput ? blockOutput.condition : undefined
if (condition) {
if (evaluateOutputCondition(condition, subBlocks)) {
filteredOutputs[key] = value
}
} else {
filteredOutputs[key] = value
}
}

View File

@@ -22,6 +22,7 @@ import type {
ContextExtensions,
ExecutionCallbacks,
IterationContext,
SerializableExecutionState,
} from '@/executor/execution/types'
import type { ExecutionResult, NormalizedBlockOutput } from '@/executor/types'
import { hasExecutionResult } from '@/executor/utils/errors'
@@ -40,6 +41,12 @@ export interface ExecuteWorkflowCoreOptions {
abortSignal?: AbortSignal
includeFileBase64?: boolean
base64MaxBytes?: number
stopAfterBlockId?: string
/** Run-from-block mode: execute starting from a specific block using cached upstream outputs */
runFromBlock?: {
startBlockId: string
sourceSnapshot: SerializableExecutionState
}
}
function parseVariableValueByType(value: unknown, type: string): unknown {
@@ -114,6 +121,8 @@ export async function executeWorkflowCore(
abortSignal,
includeFileBase64,
base64MaxBytes,
stopAfterBlockId,
runFromBlock,
} = options
const { metadata, workflow, input, workflowVariables, selectedOutputs } = snapshot
const { requestId, workflowId, userId, triggerType, executionId, triggerBlockId, useDraftState } =
@@ -297,6 +306,7 @@ export async function executeWorkflowCore(
abortSignal,
includeFileBase64,
base64MaxBytes,
stopAfterBlockId,
}
const executorInstance = new Executor({
@@ -319,10 +329,13 @@ export async function executeWorkflowCore(
}
}
const result = (await executorInstance.execute(
workflowId,
resolvedTriggerBlockId
)) as ExecutionResult
const result = runFromBlock
? ((await executorInstance.executeFromBlock(
workflowId,
runFromBlock.startBlockId,
runFromBlock.sourceSnapshot
)) as ExecutionResult)
: ((await executorInstance.execute(workflowId, resolvedTriggerBlockId)) as ExecutionResult)
// Build trace spans for logging from the full execution result
const { traceSpans, totalDuration } = buildTraceSpans(result)

View File

@@ -26,7 +26,7 @@ describe('VariableManager', () => {
it.concurrent('should handle boolean type variables', () => {
expect(VariableManager.parseInputForStorage('true', 'boolean')).toBe(true)
expect(VariableManager.parseInputForStorage('false', 'boolean')).toBe(false)
expect(VariableManager.parseInputForStorage('1', 'boolean')).toBe(true)
expect(VariableManager.parseInputForStorage('1', 'boolean')).toBe(false)
expect(VariableManager.parseInputForStorage('0', 'boolean')).toBe(false)
expect(VariableManager.parseInputForStorage('"true"', 'boolean')).toBe(true)
expect(VariableManager.parseInputForStorage("'false'", 'boolean')).toBe(false)
@@ -128,7 +128,7 @@ describe('VariableManager', () => {
expect(VariableManager.resolveForExecution(false, 'boolean')).toBe(false)
expect(VariableManager.resolveForExecution('true', 'boolean')).toBe(true)
expect(VariableManager.resolveForExecution('false', 'boolean')).toBe(false)
expect(VariableManager.resolveForExecution('1', 'boolean')).toBe(true)
expect(VariableManager.resolveForExecution('1', 'boolean')).toBe(false)
expect(VariableManager.resolveForExecution('0', 'boolean')).toBe(false)
})

View File

@@ -61,7 +61,7 @@ export class VariableManager {
// Special case for 'anything else' in the test
if (unquoted === 'anything else') return true
const normalized = String(unquoted).toLowerCase().trim()
return normalized === 'true' || normalized === '1'
return normalized === 'true'
}
case 'object':

View File

@@ -35,4 +35,23 @@ export const useExecutionStore = create<ExecutionState & ExecutionActions>()((se
},
clearRunPath: () => set({ lastRunPath: new Map(), lastRunEdges: new Map() }),
reset: () => set(initialState),
setLastExecutionSnapshot: (workflowId, snapshot) => {
const { lastExecutionSnapshots } = get()
const newSnapshots = new Map(lastExecutionSnapshots)
newSnapshots.set(workflowId, snapshot)
set({ lastExecutionSnapshots: newSnapshots })
},
getLastExecutionSnapshot: (workflowId) => {
const { lastExecutionSnapshots } = get()
return lastExecutionSnapshots.get(workflowId)
},
clearLastExecutionSnapshot: (workflowId) => {
const { lastExecutionSnapshots } = get()
const newSnapshots = new Map(lastExecutionSnapshots)
newSnapshots.delete(workflowId)
set({ lastExecutionSnapshots: newSnapshots })
},
}))

View File

@@ -1,4 +1,5 @@
import type { Executor } from '@/executor'
import type { SerializableExecutionState } from '@/executor/execution/types'
import type { ExecutionContext } from '@/executor/types'
/**
@@ -18,16 +19,9 @@ export interface ExecutionState {
pendingBlocks: string[]
executor: Executor | null
debugContext: ExecutionContext | null
/**
* Tracks blocks from the last execution run and their success/error status.
* Cleared when a new run starts. Used to show run path indicators (rings on blocks).
*/
lastRunPath: Map<string, BlockRunStatus>
/**
* Tracks edges from the last execution run and their success/error status.
* Cleared when a new run starts. Used to show run path indicators on edges.
*/
lastRunEdges: Map<string, EdgeRunStatus>
lastExecutionSnapshots: Map<string, SerializableExecutionState>
}
export interface ExecutionActions {
@@ -41,6 +35,9 @@ export interface ExecutionActions {
setEdgeRunStatus: (edgeId: string, status: EdgeRunStatus) => void
clearRunPath: () => void
reset: () => void
setLastExecutionSnapshot: (workflowId: string, snapshot: SerializableExecutionState) => void
getLastExecutionSnapshot: (workflowId: string) => SerializableExecutionState | undefined
clearLastExecutionSnapshot: (workflowId: string) => void
}
export const initialState: ExecutionState = {
@@ -52,4 +49,5 @@ export const initialState: ExecutionState = {
debugContext: null,
lastRunPath: new Map(),
lastRunEdges: new Map(),
lastExecutionSnapshots: new Map(),
}

View File

@@ -27,6 +27,9 @@ export function registerEmitFunctions(
emitSubblockUpdate = subblockEmit
emitVariableUpdate = variableEmit
currentRegisteredWorkflowId = workflowId
if (workflowId) {
useOperationQueueStore.getState().processNextOperation()
}
}
let currentRegisteredWorkflowId: string | null = null
@@ -262,16 +265,14 @@ export const useOperationQueueStore = create<OperationQueueState>((set, get) =>
return
}
const nextOperation = currentRegisteredWorkflowId
? state.operations.find(
(op) => op.status === 'pending' && op.workflowId === currentRegisteredWorkflowId
)
: state.operations.find((op) => op.status === 'pending')
if (!nextOperation) {
if (!currentRegisteredWorkflowId) {
return
}
if (currentRegisteredWorkflowId && nextOperation.workflowId !== currentRegisteredWorkflowId) {
const nextOperation = state.operations.find(
(op) => op.status === 'pending' && op.workflowId === currentRegisteredWorkflowId
)
if (!nextOperation) {
return
}

View File

@@ -38,11 +38,12 @@ export const storageUploadTool: ToolConfig<
visibility: 'user-or-llm',
description: 'Optional folder path (e.g., "folder/subfolder/")',
},
fileContent: {
type: 'string',
fileData: {
type: 'json',
required: true,
visibility: 'user-or-llm',
description: 'The file content (base64 encoded for binary files, or plain text)',
description:
'File to upload - UserFile object (basic mode) or string content (advanced mode: base64 or plain text). Supports data URLs.',
},
contentType: {
type: 'string',
@@ -65,65 +66,28 @@ export const storageUploadTool: ToolConfig<
},
request: {
url: (params) => {
// Combine folder path and fileName, ensuring proper formatting
let fullPath = params.fileName
if (params.path) {
// Ensure path ends with / and doesn't have double slashes
const folderPath = params.path.endsWith('/') ? params.path : `${params.path}/`
fullPath = `${folderPath}${params.fileName}`
}
return `https://${params.projectId}.supabase.co/storage/v1/object/${params.bucket}/${fullPath}`
},
url: '/api/tools/supabase/storage-upload',
method: 'POST',
headers: (params) => {
const headers: Record<string, string> = {
apikey: params.apiKey,
Authorization: `Bearer ${params.apiKey}`,
}
if (params.contentType) {
headers['Content-Type'] = params.contentType
}
if (params.upsert) {
headers['x-upsert'] = 'true'
}
return headers
},
body: (params) => {
// Return the file content wrapped in an object
// The actual upload will need to handle this appropriately
return {
content: params.fileContent,
}
},
},
transformResponse: async (response: Response) => {
let data
try {
data = await response.json()
} catch (parseError) {
throw new Error(`Failed to parse Supabase storage upload response: ${parseError}`)
}
return {
success: true,
output: {
message: 'Successfully uploaded file to storage',
results: data,
},
error: undefined,
}
headers: () => ({
'Content-Type': 'application/json',
}),
body: (params) => ({
projectId: params.projectId,
apiKey: params.apiKey,
bucket: params.bucket,
fileName: params.fileName,
path: params.path,
fileData: params.fileData,
contentType: params.contentType,
upsert: params.upsert,
}),
},
outputs: {
message: { type: 'string', description: 'Operation status message' },
results: {
type: 'object',
description: 'Upload result including file path and metadata',
description: 'Upload result including file path, bucket, and public URL',
},
},
}

View File

@@ -136,7 +136,7 @@ export interface SupabaseStorageUploadParams {
bucket: string
fileName: string
path?: string
fileContent: string
fileData: any // UserFile object (basic mode) or string (advanced mode: base64/plain text)
contentType?: string
upsert?: boolean
}

View File

@@ -52,7 +52,7 @@ services:
deploy:
resources:
limits:
memory: 8G
memory: 1G
healthcheck:
test: ['CMD', 'wget', '--spider', '--quiet', 'http://127.0.0.1:3002/health']
interval: 90s

View File

@@ -56,7 +56,7 @@ services:
deploy:
resources:
limits:
memory: 8G
memory: 1G
healthcheck:
test: ['CMD', 'wget', '--spider', '--quiet', 'http://127.0.0.1:3002/health']
interval: 90s

View File

@@ -42,7 +42,7 @@ services:
deploy:
resources:
limits:
memory: 4G
memory: 1G
environment:
- DATABASE_URL=postgresql://${POSTGRES_USER:-postgres}:${POSTGRES_PASSWORD:-postgres}@db:5432/${POSTGRES_DB:-simstudio}
- NEXT_PUBLIC_APP_URL=${NEXT_PUBLIC_APP_URL:-http://localhost:3000}

View File

@@ -10,13 +10,13 @@ global:
app:
enabled: true
replicaCount: 2
resources:
limits:
memory: "6Gi"
memory: "8Gi"
cpu: "2000m"
requests:
memory: "4Gi"
memory: "6Gi"
cpu: "1000m"
# Production URLs (REQUIRED - update with your actual domain names)
@@ -49,14 +49,14 @@ app:
realtime:
enabled: true
replicaCount: 2
resources:
limits:
memory: "4Gi"
cpu: "1000m"
requests:
memory: "2Gi"
memory: "1Gi"
cpu: "500m"
requests:
memory: "512Mi"
cpu: "250m"
env:
NEXT_PUBLIC_APP_URL: "https://sim.acme.ai"

View File

@@ -0,0 +1,25 @@
{{- if .Values.branding.enabled }}
---
# Branding ConfigMap
# Mounts custom branding assets (logos, CSS, etc.) into the application
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "sim.fullname" . }}-branding
namespace: {{ .Release.Namespace }}
labels:
{{- include "sim.labels" . | nindent 4 }}
app.kubernetes.io/component: branding
{{- if .Values.branding.files }}
data:
{{- range $key, $value := .Values.branding.files }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
{{- if .Values.branding.binaryFiles }}
binaryData:
{{- range $key, $value := .Values.branding.binaryFiles }}
{{ $key }}: {{ $value }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -110,8 +110,13 @@ spec:
{{- end }}
{{- include "sim.resources" .Values.app | nindent 10 }}
{{- include "sim.securityContext" .Values.app | nindent 10 }}
{{- if or .Values.extraVolumeMounts .Values.app.extraVolumeMounts }}
{{- if or .Values.branding.enabled .Values.extraVolumeMounts .Values.app.extraVolumeMounts }}
volumeMounts:
{{- if .Values.branding.enabled }}
- name: branding
mountPath: {{ .Values.branding.mountPath | default "/app/public/branding" }}
readOnly: true
{{- end }}
{{- with .Values.extraVolumeMounts }}
{{- toYaml . | nindent 12 }}
{{- end }}
@@ -119,8 +124,13 @@ spec:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
{{- if or .Values.extraVolumes .Values.app.extraVolumes }}
{{- if or .Values.branding.enabled .Values.extraVolumes .Values.app.extraVolumes }}
volumes:
{{- if .Values.branding.enabled }}
- name: branding
configMap:
name: {{ include "sim.fullname" . }}-branding
{{- end }}
{{- with .Values.extraVolumes }}
{{- toYaml . | nindent 8 }}
{{- end }}

View File

@@ -29,10 +29,10 @@ app:
# Resource limits and requests
resources:
limits:
memory: "4Gi"
memory: "8Gi"
cpu: "2000m"
requests:
memory: "2Gi"
memory: "4Gi"
cpu: "1000m"
# Node selector for pod scheduling (leave empty to allow scheduling on any node)
@@ -232,24 +232,24 @@ app:
realtime:
# Enable/disable the realtime service
enabled: true
# Image configuration
image:
repository: simstudioai/realtime
tag: latest
pullPolicy: Always
# Number of replicas
replicaCount: 1
# Resource limits and requests
resources:
limits:
memory: "2Gi"
cpu: "1000m"
requests:
memory: "1Gi"
cpu: "500m"
requests:
memory: "512Mi"
cpu: "250m"
# Node selector for pod scheduling (leave empty to allow scheduling on any node)
nodeSelector: {}
@@ -738,6 +738,32 @@ sharedStorage:
extraVolumes: []
extraVolumeMounts: []
# Branding configuration
# Use this to inject custom branding assets (logos, CSS, etc.) into the application
branding:
# Enable/disable branding ConfigMap
enabled: false
# Mount path in the container where branding files will be available
mountPath: "/app/public/branding"
# Text files (CSS, JSON, HTML, etc.) - values are plain text
# Example:
# files:
# custom.css: |
# .logo { background-color: #ff0000; }
# config.json: |
# {"theme": "dark"}
files: {}
# Binary files (PNG, JPG, ICO, etc.) - values must be base64 encoded
# Generate base64 with: base64 -i logo.png | tr -d '\n'
# Example:
# binaryFiles:
# logo.png: "iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAYAAAAfFcSJAAAADUlEQVR42mNk..."
# favicon.ico: "AAABAAEAEBAAAAEAIABoBAAAFgAAAA..."
binaryFiles: {}
# Additional environment variables for custom integrations
extraEnvVars: []

View File

@@ -197,7 +197,7 @@ async function getCommitsBetweenVersions(
const commitEntries = gitLog.split('\n').filter((line) => line.trim())
const nonVersionCommits = commitEntries.filter((line) => {
const [hash, message] = line.split('|')
const [, message] = line.split('|')
const isVersionCommit = message.match(/^v\d+\.\d+/)
if (isVersionCommit) {
console.log(`⏭️ Skipping version commit: ${message.substring(0, 50)}...`)
@@ -369,6 +369,25 @@ async function main() {
console.log(` No previous version found (this might be the first release)`)
}
try {
const existingRelease = await octokit.rest.repos.getReleaseByTag({
owner: REPO_OWNER,
repo: REPO_NAME,
tag: targetVersion,
})
if (existingRelease.data) {
console.log(` Release ${targetVersion} already exists, skipping creation`)
console.log(
`🔗 View release: https://github.com/${REPO_OWNER}/${REPO_NAME}/releases/tag/${targetVersion}`
)
return
}
} catch (error: any) {
if (error.status !== 404) {
throw error
}
}
const releaseBody = await generateReleaseBody(versionCommit, previousCommit || undefined)
console.log(`🚀 Creating GitHub release for ${targetVersion}...`)