Compare commits
36 Commits
feat/run-f
...
v0.5.69
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b74a2626a | ||
|
|
e9c4251c1c | ||
|
|
cc2be33d6b | ||
|
|
45371e521e | ||
|
|
0ce0f98aa5 | ||
|
|
dff1c9d083 | ||
|
|
b09f683072 | ||
|
|
a8bb0db660 | ||
|
|
af82820a28 | ||
|
|
4372841797 | ||
|
|
5e8c843241 | ||
|
|
7bf3d73ee6 | ||
|
|
7ffc11a738 | ||
|
|
be578e2ed7 | ||
|
|
f415e5edc4 | ||
|
|
13a6e6c3fa | ||
|
|
f5ab7f21ae | ||
|
|
bfb6fffe38 | ||
|
|
4fbec0a43f | ||
|
|
585f5e365b | ||
|
|
3792bdd252 | ||
|
|
eb5d1f3e5b | ||
|
|
54ab82c8dd | ||
|
|
f895bf469b | ||
|
|
dd3209af06 | ||
|
|
b6ba3b50a7 | ||
|
|
b304233062 | ||
|
|
57e4b49bd6 | ||
|
|
e12dd204ed | ||
|
|
3d9d9cbc54 | ||
|
|
0f4ec962ad | ||
|
|
4827866f9a | ||
|
|
3e697d9ed9 | ||
|
|
4431a1a484 | ||
|
|
4d1a9a3f22 | ||
|
|
eb07a080fb |
@@ -44,7 +44,7 @@ services:
|
||||
deploy:
|
||||
resources:
|
||||
limits:
|
||||
memory: 1G
|
||||
memory: 4G
|
||||
environment:
|
||||
- NODE_ENV=development
|
||||
- DATABASE_URL=postgresql://postgres:postgres@db:5432/simstudio
|
||||
|
||||
35
.github/workflows/ci.yml
vendored
@@ -10,9 +10,6 @@ concurrency:
|
||||
group: ci-${{ github.ref }}
|
||||
cancel-in-progress: false
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test-build:
|
||||
name: Test and Build
|
||||
@@ -30,11 +27,10 @@ jobs:
|
||||
steps:
|
||||
- name: Extract version from commit message
|
||||
id: extract
|
||||
env:
|
||||
COMMIT_MSG: ${{ github.event.head_commit.message }}
|
||||
run: |
|
||||
COMMIT_MSG="${{ github.event.head_commit.message }}"
|
||||
# Only tag versions on main branch
|
||||
if [ "$GITHUB_REF" = "refs/heads/main" ] && [[ "$COMMIT_MSG" =~ ^(v[0-9]+\.[0-9]+\.[0-9]+): ]]; then
|
||||
if [ "${{ github.ref }}" = "refs/heads/main" ] && [[ "$COMMIT_MSG" =~ ^(v[0-9]+\.[0-9]+\.[0-9]+): ]]; then
|
||||
VERSION="${BASH_REMATCH[1]}"
|
||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||
echo "is_release=true" >> $GITHUB_OUTPUT
|
||||
@@ -281,30 +277,3 @@ jobs:
|
||||
if: needs.check-docs-changes.outputs.docs_changed == 'true'
|
||||
uses: ./.github/workflows/docs-embeddings.yml
|
||||
secrets: inherit
|
||||
|
||||
# Create GitHub Release (only for version commits on main, after all builds complete)
|
||||
create-release:
|
||||
name: Create GitHub Release
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
needs: [create-ghcr-manifests, detect-version]
|
||||
if: needs.detect-version.outputs.is_release == 'true'
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
with:
|
||||
bun-version: latest
|
||||
|
||||
- name: Install dependencies
|
||||
run: bun install --frozen-lockfile
|
||||
|
||||
- name: Create release
|
||||
env:
|
||||
GH_PAT: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: bun run scripts/create-single-release.ts ${{ needs.detect-version.outputs.version }}
|
||||
|
||||
3
.github/workflows/docs-embeddings.yml
vendored
@@ -4,9 +4,6 @@ on:
|
||||
workflow_call:
|
||||
workflow_dispatch: # Allow manual triggering
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
process-docs-embeddings:
|
||||
name: Process Documentation Embeddings
|
||||
|
||||
3
.github/workflows/migrations.yml
vendored
@@ -4,9 +4,6 @@ on:
|
||||
workflow_call:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
migrate:
|
||||
name: Apply Database Migrations
|
||||
|
||||
3
.github/workflows/publish-cli.yml
vendored
@@ -6,9 +6,6 @@ on:
|
||||
paths:
|
||||
- 'packages/cli/**'
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
publish-npm:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
|
||||
3
.github/workflows/publish-python-sdk.yml
vendored
@@ -6,9 +6,6 @@ on:
|
||||
paths:
|
||||
- 'packages/python-sdk/**'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
publish-pypi:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
|
||||
3
.github/workflows/publish-ts-sdk.yml
vendored
@@ -6,9 +6,6 @@ on:
|
||||
paths:
|
||||
- 'packages/ts-sdk/**'
|
||||
|
||||
permissions:
|
||||
contents: write
|
||||
|
||||
jobs:
|
||||
publish-npm:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
|
||||
3
.github/workflows/test-build.yml
vendored
@@ -4,9 +4,6 @@ on:
|
||||
workflow_call:
|
||||
workflow_dispatch:
|
||||
|
||||
permissions:
|
||||
contents: read
|
||||
|
||||
jobs:
|
||||
test-build:
|
||||
name: Test and Build
|
||||
|
||||
@@ -119,19 +119,6 @@ aside#nd-sidebar {
|
||||
}
|
||||
}
|
||||
|
||||
/* Hide TOC popover on tablet/medium screens (768px - 1279px) */
|
||||
/* Keeps it visible on mobile (<768px) for easy navigation */
|
||||
/* Desktop (>=1280px) already hides it via fumadocs xl:hidden */
|
||||
@media (min-width: 768px) and (max-width: 1279px) {
|
||||
#nd-docs-layout {
|
||||
--fd-toc-popover-height: 0px !important;
|
||||
}
|
||||
|
||||
[data-toc-popover] {
|
||||
display: none !important;
|
||||
}
|
||||
}
|
||||
|
||||
/* Desktop only: Apply custom navbar offset, sidebar width and margin offsets */
|
||||
/* On mobile, let fumadocs handle the layout natively */
|
||||
@media (min-width: 1024px) {
|
||||
|
||||
@@ -1,38 +0,0 @@
|
||||
'use client'
|
||||
|
||||
import { getAssetUrl } from '@/lib/utils'
|
||||
|
||||
interface ActionImageProps {
|
||||
src: string
|
||||
alt: string
|
||||
}
|
||||
|
||||
interface ActionVideoProps {
|
||||
src: string
|
||||
alt: string
|
||||
}
|
||||
|
||||
export function ActionImage({ src, alt }: ActionImageProps) {
|
||||
return (
|
||||
<img
|
||||
src={src}
|
||||
alt={alt}
|
||||
className='inline-block w-full max-w-[200px] rounded border border-neutral-200 dark:border-neutral-700'
|
||||
/>
|
||||
)
|
||||
}
|
||||
|
||||
export function ActionVideo({ src, alt }: ActionVideoProps) {
|
||||
const resolvedSrc = getAssetUrl(src)
|
||||
|
||||
return (
|
||||
<video
|
||||
src={resolvedSrc}
|
||||
autoPlay
|
||||
loop
|
||||
muted
|
||||
playsInline
|
||||
className='inline-block w-full max-w-[200px] rounded border border-neutral-200 dark:border-neutral-700'
|
||||
/>
|
||||
)
|
||||
}
|
||||
@@ -10,20 +10,12 @@ Stellen Sie Sim auf Ihrer eigenen Infrastruktur mit Docker oder Kubernetes berei
|
||||
|
||||
## Anforderungen
|
||||
|
||||
| Ressource | Klein | Standard | Produktion |
|
||||
|----------|-------|----------|------------|
|
||||
| CPU | 2 Kerne | 4 Kerne | 8+ Kerne |
|
||||
| RAM | 12 GB | 16 GB | 32+ GB |
|
||||
| Speicher | 20 GB SSD | 50 GB SSD | 100+ GB SSD |
|
||||
| Docker | 20.10+ | 20.10+ | Neueste Version |
|
||||
|
||||
**Klein**: Entwicklung, Tests, Einzelnutzer (1-5 Nutzer)
|
||||
**Standard**: Teams (5-50 Nutzer), moderate Arbeitslasten
|
||||
**Produktion**: Große Teams (50+ Nutzer), Hochverfügbarkeit, intensive Workflow-Ausführung
|
||||
|
||||
<Callout type="info">
|
||||
Die Ressourcenanforderungen werden durch Workflow-Ausführung (isolated-vm Sandboxing), Dateiverarbeitung (In-Memory-Dokumentenparsing) und Vektoroperationen (pgvector) bestimmt. Arbeitsspeicher ist typischerweise der limitierende Faktor, nicht CPU. Produktionsdaten zeigen, dass die Hauptanwendung durchschnittlich 4-8 GB und bei hoher Last bis zu 12 GB benötigt.
|
||||
</Callout>
|
||||
| Ressource | Minimum | Empfohlen |
|
||||
|----------|---------|-------------|
|
||||
| CPU | 2 Kerne | 4+ Kerne |
|
||||
| RAM | 12 GB | 16+ GB |
|
||||
| Speicher | 20 GB SSD | 50+ GB SSD |
|
||||
| Docker | 20.10+ | Neueste Version |
|
||||
|
||||
## Schnellstart
|
||||
|
||||
|
||||
@@ -5,24 +5,44 @@ title: Copilot
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Image } from '@/components/ui/image'
|
||||
import { MessageCircle, Hammer, Zap, Globe, Paperclip, History, RotateCcw, Brain } from 'lucide-react'
|
||||
import { MessageCircle, Package, Zap, Infinity as InfinityIcon, Brain, BrainCircuit } from 'lucide-react'
|
||||
|
||||
Copilot is your in-editor assistant that helps you build and edit workflows. It can:
|
||||
Copilot is your in-editor assistant that helps you build and edit workflows with Sim Copilot, as well as understand and improve them. It can:
|
||||
|
||||
- **Explain**: Answer questions about Sim and your current workflow
|
||||
- **Guide**: Suggest edits and best practices
|
||||
- **Build**: Add blocks, wire connections, and configure settings
|
||||
- **Debug**: Analyze execution issues and optimize performance
|
||||
- **Edit**: Make changes to blocks, connections, and settings when you approve
|
||||
|
||||
<Callout type="info">
|
||||
Copilot is a Sim-managed service. For self-hosted deployments:
|
||||
Copilot is a Sim-managed service. For self-hosted deployments, generate a Copilot API key in the hosted app (sim.ai → Settings → Copilot)
|
||||
1. Go to [sim.ai](https://sim.ai) → Settings → Copilot and generate a Copilot API key
|
||||
2. Set `COPILOT_API_KEY` in your self-hosted environment
|
||||
2. Set `COPILOT_API_KEY` in your self-hosted environment to that value
|
||||
</Callout>
|
||||
|
||||
## Modes
|
||||
## Context Menu (@)
|
||||
|
||||
Switch between modes using the mode selector at the bottom of the input area.
|
||||
Use the `@` symbol to reference various resources and give Copilot more context about your workspace:
|
||||
|
||||
<Image
|
||||
src="/static/copilot/copilot-menu.png"
|
||||
alt="Copilot context menu showing available reference options"
|
||||
width={600}
|
||||
height={400}
|
||||
/>
|
||||
|
||||
The `@` menu provides access to:
|
||||
- **Chats**: Reference previous copilot conversations
|
||||
- **All workflows**: Reference any workflow in your workspace
|
||||
- **Workflow Blocks**: Reference specific blocks from workflows
|
||||
- **Blocks**: Reference block types and templates
|
||||
- **Knowledge**: Reference your uploaded documents and knowledgebase
|
||||
- **Docs**: Reference Sim documentation
|
||||
- **Templates**: Reference workflow templates
|
||||
- **Logs**: Reference execution logs and results
|
||||
|
||||
This contextual information helps Copilot provide more accurate and relevant assistance for your specific use case.
|
||||
|
||||
## Modes
|
||||
|
||||
<Cards>
|
||||
<Card
|
||||
@@ -40,153 +60,113 @@ Switch between modes using the mode selector at the bottom of the input area.
|
||||
<Card
|
||||
title={
|
||||
<span className="inline-flex items-center gap-2">
|
||||
<Hammer className="h-4 w-4 text-muted-foreground" />
|
||||
Build
|
||||
<Package className="h-4 w-4 text-muted-foreground" />
|
||||
Agent
|
||||
</span>
|
||||
}
|
||||
>
|
||||
<div className="m-0 text-sm">
|
||||
Workflow building mode. Copilot can add blocks, wire connections, edit configurations, and debug issues.
|
||||
Build-and-edit mode. Copilot proposes specific edits (add blocks, wire variables, tweak settings) and applies them when you approve.
|
||||
</div>
|
||||
</Card>
|
||||
</Cards>
|
||||
|
||||
## Models
|
||||
<div className="flex justify-center">
|
||||
<Image
|
||||
src="/static/copilot/copilot-mode.png"
|
||||
alt="Copilot mode selection interface"
|
||||
width={600}
|
||||
height={400}
|
||||
className="my-6"
|
||||
/>
|
||||
</div>
|
||||
|
||||
Select your preferred AI model using the model selector at the bottom right of the input area.
|
||||
## Depth Levels
|
||||
|
||||
**Available Models:**
|
||||
- Claude 4.5 Opus, Sonnet (default), Haiku
|
||||
- GPT 5.2 Codex, Pro
|
||||
- Gemini 3 Pro
|
||||
<Cards>
|
||||
<Card
|
||||
title={
|
||||
<span className="inline-flex items-center gap-2">
|
||||
<Zap className="h-4 w-4 text-muted-foreground" />
|
||||
Fast
|
||||
</span>
|
||||
}
|
||||
>
|
||||
<div className="m-0 text-sm">Quickest and cheapest. Best for small edits, simple workflows, and minor tweaks.</div>
|
||||
</Card>
|
||||
<Card
|
||||
title={
|
||||
<span className="inline-flex items-center gap-2">
|
||||
<InfinityIcon className="h-4 w-4 text-muted-foreground" />
|
||||
Auto
|
||||
</span>
|
||||
}
|
||||
>
|
||||
<div className="m-0 text-sm">Balanced speed and reasoning. Recommended default for most tasks.</div>
|
||||
</Card>
|
||||
<Card
|
||||
title={
|
||||
<span className="inline-flex items-center gap-2">
|
||||
<Brain className="h-4 w-4 text-muted-foreground" />
|
||||
Advanced
|
||||
</span>
|
||||
}
|
||||
>
|
||||
<div className="m-0 text-sm">More reasoning for larger workflows and complex edits while staying performant.</div>
|
||||
</Card>
|
||||
<Card
|
||||
title={
|
||||
<span className="inline-flex items-center gap-2">
|
||||
<BrainCircuit className="h-4 w-4 text-muted-foreground" />
|
||||
Behemoth
|
||||
</span>
|
||||
}
|
||||
>
|
||||
<div className="m-0 text-sm">Maximum reasoning for deep planning, debugging, and complex architectural changes.</div>
|
||||
</Card>
|
||||
</Cards>
|
||||
|
||||
Choose based on your needs: faster models for simple tasks, more capable models for complex workflows.
|
||||
### Mode Selection Interface
|
||||
|
||||
## Context Menu (@)
|
||||
You can easily switch between different reasoning modes using the mode selector in the Copilot interface:
|
||||
|
||||
Use the `@` symbol to reference resources and give Copilot more context:
|
||||
<Image
|
||||
src="/static/copilot/copilot-models.png"
|
||||
alt="Copilot mode selection showing Advanced mode with MAX toggle"
|
||||
width={600}
|
||||
height={300}
|
||||
/>
|
||||
|
||||
| Reference | Description |
|
||||
|-----------|-------------|
|
||||
| **Chats** | Previous copilot conversations |
|
||||
| **Workflows** | Any workflow in your workspace |
|
||||
| **Workflow Blocks** | Blocks in the current workflow |
|
||||
| **Blocks** | Block types and templates |
|
||||
| **Knowledge** | Uploaded documents and knowledge bases |
|
||||
| **Docs** | Sim documentation |
|
||||
| **Templates** | Workflow templates |
|
||||
| **Logs** | Execution logs and results |
|
||||
The interface allows you to:
|
||||
- **Select reasoning level**: Choose from Fast, Auto, Advanced, or Behemoth
|
||||
- **Enable MAX mode**: Toggle for maximum reasoning capabilities when you need the most thorough analysis
|
||||
- **See mode descriptions**: Understand what each mode is optimized for
|
||||
|
||||
Type `@` in the input field to open the context menu, then search or browse to find what you need.
|
||||
Choose your mode based on the complexity of your task - use Fast for simple questions and Behemoth for complex architectural changes.
|
||||
|
||||
## Slash Commands (/)
|
||||
## Billing and Cost Calculation
|
||||
|
||||
Use slash commands for quick actions:
|
||||
### How Costs Are Calculated
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/fast` | Fast mode execution |
|
||||
| `/research` | Research and exploration mode |
|
||||
| `/actions` | Execute agent actions |
|
||||
Copilot usage is billed per token from the underlying LLM:
|
||||
|
||||
**Web Commands:**
|
||||
- **Input tokens**: billed at the provider's base rate (**at-cost**)
|
||||
- **Output tokens**: billed at **1.5×** the provider's base output rate
|
||||
|
||||
| Command | Description |
|
||||
|---------|-------------|
|
||||
| `/search` | Search the web |
|
||||
| `/read` | Read a specific URL |
|
||||
| `/scrape` | Scrape web page content |
|
||||
| `/crawl` | Crawl multiple pages |
|
||||
```javascript
|
||||
copilotCost = (inputTokens × inputPrice + outputTokens × (outputPrice × 1.5)) / 1,000,000
|
||||
```
|
||||
|
||||
Type `/` in the input field to see available commands.
|
||||
| Component | Rate Applied |
|
||||
|----------|----------------------|
|
||||
| Input | inputPrice |
|
||||
| Output | outputPrice × 1.5 |
|
||||
|
||||
## Chat Management
|
||||
|
||||
### Starting a New Chat
|
||||
|
||||
Click the **+** button in the Copilot header to start a fresh conversation.
|
||||
|
||||
### Chat History
|
||||
|
||||
Click **History** to view previous conversations grouped by date. You can:
|
||||
- Click a chat to resume it
|
||||
- Delete chats you no longer need
|
||||
|
||||
### Editing Messages
|
||||
|
||||
Hover over any of your messages and click **Edit** to modify and resend it. This is useful for refining your prompts.
|
||||
|
||||
### Message Queue
|
||||
|
||||
If you send a message while Copilot is still responding, it gets queued. You can:
|
||||
- View queued messages in the expandable queue panel
|
||||
- Send a queued message immediately (aborts current response)
|
||||
- Remove messages from the queue
|
||||
|
||||
## File Attachments
|
||||
|
||||
Click the attachment icon to upload files with your message. Supported file types include:
|
||||
- Images (preview thumbnails shown)
|
||||
- PDFs
|
||||
- Text files, JSON, XML
|
||||
- Other document formats
|
||||
|
||||
Files are displayed as clickable thumbnails that open in a new tab.
|
||||
|
||||
## Checkpoints & Changes
|
||||
|
||||
When Copilot makes changes to your workflow, it saves checkpoints so you can revert if needed.
|
||||
|
||||
### Viewing Checkpoints
|
||||
|
||||
Hover over a Copilot message and click the checkpoints icon to see saved workflow states for that message.
|
||||
|
||||
### Reverting Changes
|
||||
|
||||
Click **Revert** on any checkpoint to restore your workflow to that state. A confirmation dialog will warn that this action cannot be undone.
|
||||
|
||||
### Accepting Changes
|
||||
|
||||
When Copilot proposes changes, you can:
|
||||
- **Accept**: Apply the proposed changes (`Mod+Shift+Enter`)
|
||||
- **Reject**: Dismiss the changes and keep your current workflow
|
||||
|
||||
## Thinking Blocks
|
||||
|
||||
For complex requests, Copilot may show its reasoning process in expandable thinking blocks:
|
||||
|
||||
- Blocks auto-expand while Copilot is thinking
|
||||
- Click to manually expand/collapse
|
||||
- Shows duration of the thinking process
|
||||
- Helps you understand how Copilot arrived at its solution
|
||||
|
||||
## Options Selection
|
||||
|
||||
When Copilot presents multiple options, you can select using:
|
||||
|
||||
| Control | Action |
|
||||
|---------|--------|
|
||||
| **1-9** | Select option by number |
|
||||
| **Arrow Up/Down** | Navigate between options |
|
||||
| **Enter** | Select highlighted option |
|
||||
|
||||
Selected options are highlighted; unselected options appear struck through.
|
||||
|
||||
## Keyboard Shortcuts
|
||||
|
||||
| Shortcut | Action |
|
||||
|----------|--------|
|
||||
| `@` | Open context menu |
|
||||
| `/` | Open slash commands |
|
||||
| `Arrow Up/Down` | Navigate menu items |
|
||||
| `Enter` | Select menu item |
|
||||
| `Esc` | Close menus |
|
||||
| `Mod+Shift+Enter` | Accept Copilot changes |
|
||||
|
||||
## Usage Limits
|
||||
|
||||
Copilot usage is billed per token from the underlying LLM. If you reach your usage limit, Copilot will prompt you to increase your limit. You can add usage in increments ($50, $100) from your current base.
|
||||
<Callout type="warning">
|
||||
Pricing shown reflects rates as of September 4, 2025. Check provider documentation for current pricing.
|
||||
</Callout>
|
||||
|
||||
<Callout type="info">
|
||||
See the [Cost Calculation page](/execution/costs) for billing details.
|
||||
Model prices are per million tokens. The calculation divides by 1,000,000 to get the actual cost. See <a href="/execution/costs">the Cost Calculation page</a> for background and examples.
|
||||
</Callout>
|
||||
|
||||
|
||||
@@ -34,8 +34,6 @@ Speed up your workflow building with these keyboard shortcuts and mouse controls
|
||||
| `Mod` + `V` | Paste blocks |
|
||||
| `Delete` or `Backspace` | Delete selected blocks or edges |
|
||||
| `Shift` + `L` | Auto-layout canvas |
|
||||
| `Mod` + `Shift` + `F` | Fit to view |
|
||||
| `Mod` + `Shift` + `Enter` | Accept Copilot changes |
|
||||
|
||||
## Panel Navigation
|
||||
|
||||
|
||||
@@ -3,7 +3,6 @@
|
||||
"pages": [
|
||||
"./introduction/index",
|
||||
"./getting-started/index",
|
||||
"./quick-reference/index",
|
||||
"triggers",
|
||||
"blocks",
|
||||
"tools",
|
||||
|
||||
@@ -1,375 +0,0 @@
|
||||
---
|
||||
title: Quick Reference
|
||||
description: Essential actions for navigating and using the Sim workflow editor
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { ActionImage, ActionVideo } from '@/components/ui/action-media'
|
||||
|
||||
A quick lookup for everyday actions in the Sim workflow editor. For keyboard shortcuts, see [Keyboard Shortcuts](/keyboard-shortcuts).
|
||||
|
||||
<Callout type="info">
|
||||
**Mod** refers to `Cmd` on macOS and `Ctrl` on Windows/Linux.
|
||||
</Callout>
|
||||
|
||||
## Workspaces
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Create a workspace</td>
|
||||
<td>Click workspace dropdown → **New Workspace**</td>
|
||||
<td><ActionVideo src="quick-reference/create-workspace.mp4" alt="Create workspace" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Switch workspaces</td>
|
||||
<td>Click workspace dropdown → Select workspace</td>
|
||||
<td><ActionVideo src="quick-reference/switch-workspace.mp4" alt="Switch workspaces" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Invite team members</td>
|
||||
<td>Sidebar → **Invite**</td>
|
||||
<td><ActionVideo src="quick-reference/invite.mp4" alt="Invite team members" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Rename a workspace</td>
|
||||
<td>Right-click workspace → **Rename**</td>
|
||||
<td rowSpan={4}><ActionImage src="/static/quick-reference/workspace-context-menu.png" alt="Workspace context menu" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Duplicate a workspace</td>
|
||||
<td>Right-click workspace → **Duplicate**</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Export a workspace</td>
|
||||
<td>Right-click workspace → **Export**</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Delete a workspace</td>
|
||||
<td>Right-click workspace → **Delete**</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
## Workflows
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Create a workflow</td>
|
||||
<td>Click **+** button in sidebar</td>
|
||||
<td><ActionImage src="/static/quick-reference/create-workflow.png" alt="Create workflow" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Reorder / move workflows</td>
|
||||
<td>Drag workflow up/down or onto a folder</td>
|
||||
<td><ActionVideo src="quick-reference/reordering.mp4" alt="Reorder workflows" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Import a workflow</td>
|
||||
<td>Click import button in sidebar → Select file</td>
|
||||
<td><ActionImage src="/static/quick-reference/import-workflow.png" alt="Import workflow" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Multi-select workflows</td>
|
||||
<td>`Mod+Click` or `Shift+Click` workflows in sidebar</td>
|
||||
<td><ActionVideo src="quick-reference/multiselect.mp4" alt="Multi-select workflows" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Open in new tab</td>
|
||||
<td>Right-click workflow → **Open in New Tab**</td>
|
||||
<td rowSpan={6}><ActionImage src="/static/quick-reference/workflow-context-menu.png" alt="Workflow context menu" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Rename a workflow</td>
|
||||
<td>Right-click workflow → **Rename**</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Assign workflow color</td>
|
||||
<td>Right-click workflow → **Change Color**</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Duplicate a workflow</td>
|
||||
<td>Right-click workflow → **Duplicate**</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Export a workflow</td>
|
||||
<td>Right-click workflow → **Export**</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Delete a workflow</td>
|
||||
<td>Right-click workflow → **Delete**</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Rename a folder</td>
|
||||
<td>Right-click folder → **Rename**</td>
|
||||
<td rowSpan={6}><ActionImage src="/static/quick-reference/folder-context-menu.png" alt="Folder context menu" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Create workflow in folder</td>
|
||||
<td>Right-click folder → **Create workflow**</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Create folder in folder</td>
|
||||
<td>Right-click folder → **Create folder**</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Duplicate a folder</td>
|
||||
<td>Right-click folder → **Duplicate**</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Export a folder</td>
|
||||
<td>Right-click folder → **Export**</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Delete a folder</td>
|
||||
<td>Right-click folder → **Delete**</td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
## Blocks
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Add a block</td>
|
||||
<td>Drag from Toolbar panel, or right-click canvas → **Add Block**</td>
|
||||
<td><ActionVideo src="quick-reference/add-block.mp4" alt="Add a block" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Multi-select blocks</td>
|
||||
<td>`Mod+Click` additional blocks, or shift-drag to draw selection box</td>
|
||||
<td><ActionVideo src="quick-reference/multiselect-blocks.mp4" alt="Multi-select blocks" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Copy blocks</td>
|
||||
<td>`Mod+C` with blocks selected</td>
|
||||
<td rowSpan={2}><ActionVideo src="quick-reference/copy-paste.mp4" alt="Copy and paste blocks" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Paste blocks</td>
|
||||
<td>`Mod+V` to paste copied blocks</td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Duplicate blocks</td>
|
||||
<td>Right-click → **Duplicate**</td>
|
||||
<td><ActionVideo src="quick-reference/duplicate-block.mp4" alt="Duplicate blocks" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Delete blocks</td>
|
||||
<td>`Delete` or `Backspace` key, or right-click → **Delete**</td>
|
||||
<td><ActionImage src="/static/quick-reference/delete-block.png" alt="Delete block" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Rename a block</td>
|
||||
<td>Click block name in header, or edit in the Editor panel</td>
|
||||
<td><ActionVideo src="quick-reference/rename-block.mp4" alt="Rename a block" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Enable/Disable a block</td>
|
||||
<td>Right-click → **Enable/Disable**</td>
|
||||
<td><ActionImage src="/static/quick-reference/disable-block.png" alt="Disable block" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Toggle handle orientation</td>
|
||||
<td>Right-click → **Toggle Handles**</td>
|
||||
<td><ActionVideo src="quick-reference/toggle-handles.mp4" alt="Toggle handle orientation" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Configure a block</td>
|
||||
<td>Select block → use Editor panel on right</td>
|
||||
<td><ActionVideo src="quick-reference/configure-block.mp4" alt="Configure a block" /></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
## Connections
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Create a connection</td>
|
||||
<td>Drag from output handle to input handle</td>
|
||||
<td><ActionVideo src="quick-reference/connect-blocks.mp4" alt="Connect blocks" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Delete a connection</td>
|
||||
<td>Click edge to select → `Delete` key</td>
|
||||
<td><ActionVideo src="quick-reference/delete-connection.mp4" alt="Delete connection" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Use output in another block</td>
|
||||
<td>Drag connection tag into input field</td>
|
||||
<td><ActionVideo src="quick-reference/connection-tag.mp4" alt="Use connection tag" /></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
## Panels & Views
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Search toolbar</td>
|
||||
<td>`Mod+F`</td>
|
||||
<td><ActionVideo src="quick-reference/search-toolbar.mp4" alt="Search toolbar" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Search everything</td>
|
||||
<td>`Mod+K`</td>
|
||||
<td><ActionImage src="/static/quick-reference/search-everything.png" alt="Search everything" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Toggle manual mode</td>
|
||||
<td>Click toggle button to switch between manual and selector</td>
|
||||
<td><ActionImage src="/static/quick-reference/toggle-manual-mode.png" alt="Toggle manual mode" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Collapse/expand sidebar</td>
|
||||
<td>Click collapse button on sidebar</td>
|
||||
<td><ActionVideo src="quick-reference/collapse-sidebar.mp4" alt="Collapse sidebar" /></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
## Running & Testing
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Run workflow</td>
|
||||
<td>Click Run Workflow button or `Mod+Enter`</td>
|
||||
<td><ActionImage src="/static/quick-reference/run-workflow.png" alt="Run workflow" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Stop workflow</td>
|
||||
<td>Click Stop button or `Mod+Enter` while running</td>
|
||||
<td><ActionImage src="/static/quick-reference/stop-workflow.png" alt="Stop workflow" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Test with chat</td>
|
||||
<td>Use Chat panel on the right side</td>
|
||||
<td><ActionImage src="/static/quick-reference/test-chat.png" alt="Test with chat" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Select output to view</td>
|
||||
<td>Click dropdown in Chat panel → Select block output</td>
|
||||
<td><ActionImage src="/static/quick-reference/output-select.png" alt="Select output to view" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Clear chat history</td>
|
||||
<td>Click clear button in Chat panel</td>
|
||||
<td><ActionImage src="/static/quick-reference/clear-chat.png" alt="Clear chat history" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>View execution logs</td>
|
||||
<td>Open terminal panel at bottom, or `Mod+L`</td>
|
||||
<td><ActionImage src="/static/quick-reference/terminal.png" alt="Execution logs terminal" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Filter logs by block or status</td>
|
||||
<td>Click block filter in terminal or right-click log entry → **Filter by Block** or **Filter by Status**</td>
|
||||
<td><ActionImage src="/static/quick-reference/filter-block.png" alt="Filter logs by block" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Search logs</td>
|
||||
<td>Use search field in terminal or right-click log entry → **Search**</td>
|
||||
<td><ActionImage src="/static/quick-reference/terminal-search.png" alt="Search logs" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Copy log entry</td>
|
||||
<td>Clipboard Icon or Right-click log entry → **Copy**</td>
|
||||
<td><ActionImage src="/static/quick-reference/copy-log.png" alt="Copy log entry" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Clear terminal</td>
|
||||
<td>Trash icon or `Mod+D`</td>
|
||||
<td><ActionImage src="/static/quick-reference/clear-terminal.png" alt="Clear terminal" /></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
## Deployment
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Deploy a workflow</td>
|
||||
<td>Click **Deploy** button in panel</td>
|
||||
<td><ActionImage src="/static/quick-reference/deploy.png" alt="Deploy workflow" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Update deployment</td>
|
||||
<td>Click **Update** when changes are detected</td>
|
||||
<td><ActionImage src="/static/quick-reference/update-deployment.png" alt="Update deployment" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>View deployment status</td>
|
||||
<td>Check status indicator (Live/Update/Deploy) in Deploy tab</td>
|
||||
<td><ActionImage src="/static/quick-reference/view-deployment.png" alt="View deployment status" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Revert deployment</td>
|
||||
<td>Access previous versions in Deploy tab → **Promote to live**</td>
|
||||
<td><ActionImage src="/static/quick-reference/promote-deployment.png" alt="Promote deployment to live" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Copy API endpoint</td>
|
||||
<td>Deploy tab → API → Copy API cURL</td>
|
||||
<td><ActionImage src="/static/quick-reference/copy-api.png" alt="Copy API endpoint" /></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
## Variables
|
||||
|
||||
<table>
|
||||
<thead>
|
||||
<tr><th>Action</th><th>How</th><th>Preview</th></tr>
|
||||
</thead>
|
||||
<tbody>
|
||||
<tr>
|
||||
<td>Add / Edit / Delete workflow variable</td>
|
||||
<td>Panel -> Variables -> **Add Variable**, click to edit, or delete icon</td>
|
||||
<td><ActionImage src="/static/quick-reference/variables.png" alt="Variables panel" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Add environment variable</td>
|
||||
<td>Settings → **Environment Variables** → **Add**</td>
|
||||
<td><ActionImage src="/static/quick-reference/add-env-variable.png" alt="Add environment variable" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Reference a workflow variable</td>
|
||||
<td>Use `<blockName.itemName>` syntax in block inputs</td>
|
||||
<td><ActionImage src="/static/quick-reference/variable-reference.png" alt="Reference workflow variable" /></td>
|
||||
</tr>
|
||||
<tr>
|
||||
<td>Reference an environment variable</td>
|
||||
<td>Use `{{ENV_VAR}}` syntax in block inputs</td>
|
||||
<td><ActionImage src="/static/quick-reference/env-variable-reference.png" alt="Reference environment variable" /></td>
|
||||
</tr>
|
||||
</tbody>
|
||||
</table>
|
||||
|
||||
@@ -16,20 +16,12 @@ Deploy Sim on your own infrastructure with Docker or Kubernetes.
|
||||
|
||||
## Requirements
|
||||
|
||||
| Resource | Small | Standard | Production |
|
||||
|----------|-------|----------|------------|
|
||||
| CPU | 2 cores | 4 cores | 8+ cores |
|
||||
| RAM | 12 GB | 16 GB | 32+ GB |
|
||||
| Storage | 20 GB SSD | 50 GB SSD | 100+ GB SSD |
|
||||
| Docker | 20.10+ | 20.10+ | Latest |
|
||||
|
||||
**Small**: Development, testing, single user (1-5 users)
|
||||
**Standard**: Teams (5-50 users), moderate workloads
|
||||
**Production**: Large teams (50+ users), high availability, heavy workflow execution
|
||||
|
||||
<Callout type="info">
|
||||
Resource requirements are driven by workflow execution (isolated-vm sandboxing), file processing (in-memory document parsing), and vector operations (pgvector). Memory is typically the constraining factor rather than CPU. Production telemetry shows the main app uses 4-8 GB average with peaks up to 12 GB under heavy load.
|
||||
</Callout>
|
||||
| Resource | Minimum | Recommended |
|
||||
|----------|---------|-------------|
|
||||
| CPU | 2 cores | 4+ cores |
|
||||
| RAM | 12 GB | 16+ GB |
|
||||
| Storage | 20 GB SSD | 50+ GB SSD |
|
||||
| Docker | 20.10+ | Latest |
|
||||
|
||||
## Quick Start
|
||||
|
||||
|
||||
@@ -10,20 +10,12 @@ Despliega Sim en tu propia infraestructura con Docker o Kubernetes.
|
||||
|
||||
## Requisitos
|
||||
|
||||
| Recurso | Pequeño | Estándar | Producción |
|
||||
|----------|---------|----------|------------|
|
||||
| CPU | 2 núcleos | 4 núcleos | 8+ núcleos |
|
||||
| RAM | 12 GB | 16 GB | 32+ GB |
|
||||
| Almacenamiento | 20 GB SSD | 50 GB SSD | 100+ GB SSD |
|
||||
| Docker | 20.10+ | 20.10+ | Última versión |
|
||||
|
||||
**Pequeño**: Desarrollo, pruebas, usuario único (1-5 usuarios)
|
||||
**Estándar**: Equipos (5-50 usuarios), cargas de trabajo moderadas
|
||||
**Producción**: Equipos grandes (50+ usuarios), alta disponibilidad, ejecución intensiva de workflows
|
||||
|
||||
<Callout type="info">
|
||||
Los requisitos de recursos están determinados por la ejecución de workflows (sandboxing isolated-vm), procesamiento de archivos (análisis de documentos en memoria) y operaciones vectoriales (pgvector). La memoria suele ser el factor limitante, no la CPU. La telemetría de producción muestra que la aplicación principal usa 4-8 GB en promedio con picos de hasta 12 GB bajo carga pesada.
|
||||
</Callout>
|
||||
| Recurso | Mínimo | Recomendado |
|
||||
|----------|---------|-------------|
|
||||
| CPU | 2 núcleos | 4+ núcleos |
|
||||
| RAM | 12 GB | 16+ GB |
|
||||
| Almacenamiento | 20 GB SSD | 50+ GB SSD |
|
||||
| Docker | 20.10+ | Última versión |
|
||||
|
||||
## Inicio rápido
|
||||
|
||||
|
||||
@@ -10,20 +10,12 @@ Déployez Sim sur votre propre infrastructure avec Docker ou Kubernetes.
|
||||
|
||||
## Prérequis
|
||||
|
||||
| Ressource | Petit | Standard | Production |
|
||||
|----------|-------|----------|------------|
|
||||
| CPU | 2 cœurs | 4 cœurs | 8+ cœurs |
|
||||
| RAM | 12 Go | 16 Go | 32+ Go |
|
||||
| Stockage | 20 Go SSD | 50 Go SSD | 100+ Go SSD |
|
||||
| Docker | 20.10+ | 20.10+ | Dernière version |
|
||||
|
||||
**Petit** : Développement, tests, utilisateur unique (1-5 utilisateurs)
|
||||
**Standard** : Équipes (5-50 utilisateurs), charges de travail modérées
|
||||
**Production** : Grandes équipes (50+ utilisateurs), haute disponibilité, exécution intensive de workflows
|
||||
|
||||
<Callout type="info">
|
||||
Les besoins en ressources sont déterminés par l'exécution des workflows (sandboxing isolated-vm), le traitement des fichiers (analyse de documents en mémoire) et les opérations vectorielles (pgvector). La mémoire est généralement le facteur limitant, pas le CPU. La télémétrie de production montre que l'application principale utilise 4-8 Go en moyenne avec des pics jusqu'à 12 Go sous forte charge.
|
||||
</Callout>
|
||||
| Ressource | Minimum | Recommandé |
|
||||
|----------|---------|-------------|
|
||||
| CPU | 2 cœurs | 4+ cœurs |
|
||||
| RAM | 12 Go | 16+ Go |
|
||||
| Stockage | 20 Go SSD | 50+ Go SSD |
|
||||
| Docker | 20.10+ | Dernière version |
|
||||
|
||||
## Démarrage rapide
|
||||
|
||||
|
||||
@@ -10,20 +10,12 @@ DockerまたはKubernetesを使用して、自社のインフラストラクチ
|
||||
|
||||
## 要件
|
||||
|
||||
| リソース | スモール | スタンダード | プロダクション |
|
||||
|----------|---------|-------------|----------------|
|
||||
| CPU | 2コア | 4コア | 8+コア |
|
||||
| RAM | 12 GB | 16 GB | 32+ GB |
|
||||
| ストレージ | 20 GB SSD | 50 GB SSD | 100+ GB SSD |
|
||||
| Docker | 20.10+ | 20.10+ | 最新版 |
|
||||
|
||||
**スモール**: 開発、テスト、シングルユーザー(1-5ユーザー)
|
||||
**スタンダード**: チーム(5-50ユーザー)、中程度のワークロード
|
||||
**プロダクション**: 大規模チーム(50+ユーザー)、高可用性、高負荷ワークフロー実行
|
||||
|
||||
<Callout type="info">
|
||||
リソース要件は、ワークフロー実行(isolated-vmサンドボックス)、ファイル処理(メモリ内ドキュメント解析)、ベクトル演算(pgvector)によって決まります。CPUよりもメモリが制約要因となることが多いです。本番環境のテレメトリによると、メインアプリは平均4-8 GB、高負荷時は最大12 GBを使用します。
|
||||
</Callout>
|
||||
| リソース | 最小 | 推奨 |
|
||||
|----------|---------|-------------|
|
||||
| CPU | 2コア | 4+コア |
|
||||
| RAM | 12 GB | 16+ GB |
|
||||
| ストレージ | 20 GB SSD | 50+ GB SSD |
|
||||
| Docker | 20.10+ | 最新版 |
|
||||
|
||||
## クイックスタート
|
||||
|
||||
|
||||
@@ -10,20 +10,12 @@ import { Callout } from 'fumadocs-ui/components/callout'
|
||||
|
||||
## 要求
|
||||
|
||||
| 资源 | 小型 | 标准 | 生产环境 |
|
||||
|----------|------|------|----------|
|
||||
| CPU | 2 核 | 4 核 | 8+ 核 |
|
||||
| 内存 | 12 GB | 16 GB | 32+ GB |
|
||||
| 存储 | 20 GB SSD | 50 GB SSD | 100+ GB SSD |
|
||||
| Docker | 20.10+ | 20.10+ | 最新版本 |
|
||||
|
||||
**小型**: 开发、测试、单用户(1-5 用户)
|
||||
**标准**: 团队(5-50 用户)、中等工作负载
|
||||
**生产环境**: 大型团队(50+ 用户)、高可用性、密集工作流执行
|
||||
|
||||
<Callout type="info">
|
||||
资源需求由工作流执行(isolated-vm 沙箱)、文件处理(内存中文档解析)和向量运算(pgvector)决定。内存通常是限制因素,而不是 CPU。生产遥测数据显示,主应用平均使用 4-8 GB,高负载时峰值可达 12 GB。
|
||||
</Callout>
|
||||
| 资源 | 最低要求 | 推荐配置 |
|
||||
|----------|---------|-------------|
|
||||
| CPU | 2 核 | 4 核及以上 |
|
||||
| 内存 | 12 GB | 16 GB 及以上 |
|
||||
| 存储 | 20 GB SSD | 50 GB 及以上 SSD |
|
||||
| Docker | 20.10+ | 最新版本 |
|
||||
|
||||
## 快速开始
|
||||
|
||||
|
||||
|
Before Width: | Height: | Size: 104 KiB |
|
Before Width: | Height: | Size: 37 KiB |
|
Before Width: | Height: | Size: 45 KiB |
|
Before Width: | Height: | Size: 114 KiB |
|
Before Width: | Height: | Size: 44 KiB |
|
Before Width: | Height: | Size: 26 KiB |
|
Before Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 6.7 KiB |
|
Before Width: | Height: | Size: 24 KiB |
|
Before Width: | Height: | Size: 36 KiB |
|
Before Width: | Height: | Size: 66 KiB |
|
Before Width: | Height: | Size: 48 KiB |
|
Before Width: | Height: | Size: 20 KiB |
|
Before Width: | Height: | Size: 31 KiB |
|
Before Width: | Height: | Size: 49 KiB |
|
Before Width: | Height: | Size: 25 KiB |
|
Before Width: | Height: | Size: 5.9 KiB |
|
Before Width: | Height: | Size: 78 KiB |
|
Before Width: | Height: | Size: 12 KiB |
|
Before Width: | Height: | Size: 82 KiB |
|
Before Width: | Height: | Size: 146 KiB |
|
Before Width: | Height: | Size: 7.1 KiB |
|
Before Width: | Height: | Size: 28 KiB |
|
Before Width: | Height: | Size: 6.8 KiB |
|
Before Width: | Height: | Size: 60 KiB |
|
Before Width: | Height: | Size: 31 KiB |
|
Before Width: | Height: | Size: 90 KiB |
|
Before Width: | Height: | Size: 36 KiB |
|
Before Width: | Height: | Size: 103 KiB |
@@ -31,7 +31,6 @@ const DEFAULT_ENABLED_MODELS: Record<CopilotModelId, boolean> = {
|
||||
'claude-4.5-opus': true,
|
||||
'claude-4.1-opus': false,
|
||||
'gemini-3-pro': true,
|
||||
'auto': true,
|
||||
}
|
||||
|
||||
// GET - Fetch user's enabled models
|
||||
|
||||
@@ -8,7 +8,6 @@ import { executeInIsolatedVM } from '@/lib/execution/isolated-vm'
|
||||
import { CodeLanguage, DEFAULT_CODE_LANGUAGE, isValidCodeLanguage } from '@/lib/execution/languages'
|
||||
import { escapeRegExp, normalizeName, REFERENCE } from '@/executor/constants'
|
||||
import { type OutputSchema, resolveBlockReference } from '@/executor/utils/block-reference'
|
||||
import { formatLiteralForCode } from '@/executor/utils/code-formatting'
|
||||
import {
|
||||
createEnvVarPattern,
|
||||
createWorkflowVariablePattern,
|
||||
@@ -388,12 +387,7 @@ function resolveWorkflowVariables(
|
||||
if (type === 'number') {
|
||||
variableValue = Number(variableValue)
|
||||
} else if (type === 'boolean') {
|
||||
if (typeof variableValue === 'boolean') {
|
||||
// Already a boolean, keep as-is
|
||||
} else {
|
||||
const normalized = String(variableValue).toLowerCase().trim()
|
||||
variableValue = normalized === 'true'
|
||||
}
|
||||
variableValue = variableValue === 'true' || variableValue === true
|
||||
} else if (type === 'json' && typeof variableValue === 'string') {
|
||||
try {
|
||||
variableValue = JSON.parse(variableValue)
|
||||
@@ -693,7 +687,11 @@ export async function POST(req: NextRequest) {
|
||||
prologue += `const environmentVariables = JSON.parse(${JSON.stringify(JSON.stringify(envVars))});\n`
|
||||
prologueLineCount++
|
||||
for (const [k, v] of Object.entries(contextVariables)) {
|
||||
prologue += `const ${k} = ${formatLiteralForCode(v, 'javascript')};\n`
|
||||
if (v === undefined) {
|
||||
prologue += `const ${k} = undefined;\n`
|
||||
} else {
|
||||
prologue += `const ${k} = JSON.parse(${JSON.stringify(JSON.stringify(v))});\n`
|
||||
}
|
||||
prologueLineCount++
|
||||
}
|
||||
|
||||
@@ -764,7 +762,11 @@ export async function POST(req: NextRequest) {
|
||||
prologue += `environmentVariables = json.loads(${JSON.stringify(JSON.stringify(envVars))})\n`
|
||||
prologueLineCount++
|
||||
for (const [k, v] of Object.entries(contextVariables)) {
|
||||
prologue += `${k} = ${formatLiteralForCode(v, 'python')}\n`
|
||||
if (v === undefined) {
|
||||
prologue += `${k} = None\n`
|
||||
} else {
|
||||
prologue += `${k} = json.loads(${JSON.stringify(JSON.stringify(v))})\n`
|
||||
}
|
||||
prologueLineCount++
|
||||
}
|
||||
const wrapped = [
|
||||
|
||||
@@ -408,7 +408,6 @@ describe('Knowledge Search Utils', () => {
|
||||
input: ['test query'],
|
||||
model: 'text-embedding-3-small',
|
||||
encoding_format: 'float',
|
||||
dimensions: 1536,
|
||||
}),
|
||||
})
|
||||
)
|
||||
|
||||
204
apps/sim/app/api/organizations/[id]/workspaces/route.ts
Normal file
@@ -0,0 +1,204 @@
|
||||
import { db } from '@sim/db'
|
||||
import { member, permissions, user, workspace } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { and, eq, or } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getSession } from '@/lib/auth'
|
||||
|
||||
const logger = createLogger('OrganizationWorkspacesAPI')
|
||||
|
||||
/**
|
||||
* GET /api/organizations/[id]/workspaces
|
||||
* Get workspaces related to the organization with optional filtering
|
||||
* Query parameters:
|
||||
* - ?available=true - Only workspaces where user can invite others (admin permissions)
|
||||
* - ?member=userId - Workspaces where specific member has access
|
||||
*/
|
||||
export async function GET(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
|
||||
try {
|
||||
const session = await getSession()
|
||||
|
||||
if (!session?.user?.id) {
|
||||
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
|
||||
const { id: organizationId } = await params
|
||||
const url = new URL(request.url)
|
||||
const availableOnly = url.searchParams.get('available') === 'true'
|
||||
const memberId = url.searchParams.get('member')
|
||||
|
||||
// Verify user is a member of this organization
|
||||
const memberEntry = await db
|
||||
.select()
|
||||
.from(member)
|
||||
.where(and(eq(member.organizationId, organizationId), eq(member.userId, session.user.id)))
|
||||
.limit(1)
|
||||
|
||||
if (memberEntry.length === 0) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: 'Forbidden - Not a member of this organization',
|
||||
},
|
||||
{ status: 403 }
|
||||
)
|
||||
}
|
||||
|
||||
const userRole = memberEntry[0].role
|
||||
const hasAdminAccess = ['owner', 'admin'].includes(userRole)
|
||||
|
||||
if (availableOnly) {
|
||||
// Get workspaces where user has admin permissions (can invite others)
|
||||
const availableWorkspaces = await db
|
||||
.select({
|
||||
id: workspace.id,
|
||||
name: workspace.name,
|
||||
ownerId: workspace.ownerId,
|
||||
createdAt: workspace.createdAt,
|
||||
isOwner: eq(workspace.ownerId, session.user.id),
|
||||
permissionType: permissions.permissionType,
|
||||
})
|
||||
.from(workspace)
|
||||
.leftJoin(
|
||||
permissions,
|
||||
and(
|
||||
eq(permissions.entityType, 'workspace'),
|
||||
eq(permissions.entityId, workspace.id),
|
||||
eq(permissions.userId, session.user.id)
|
||||
)
|
||||
)
|
||||
.where(
|
||||
or(
|
||||
// User owns the workspace
|
||||
eq(workspace.ownerId, session.user.id),
|
||||
// User has admin permission on the workspace
|
||||
and(
|
||||
eq(permissions.userId, session.user.id),
|
||||
eq(permissions.entityType, 'workspace'),
|
||||
eq(permissions.permissionType, 'admin')
|
||||
)
|
||||
)
|
||||
)
|
||||
|
||||
// Filter and format the results
|
||||
const workspacesWithInvitePermission = availableWorkspaces
|
||||
.filter((workspace) => {
|
||||
// Include if user owns the workspace OR has admin permission
|
||||
return workspace.isOwner || workspace.permissionType === 'admin'
|
||||
})
|
||||
.map((workspace) => ({
|
||||
id: workspace.id,
|
||||
name: workspace.name,
|
||||
isOwner: workspace.isOwner,
|
||||
canInvite: true, // All returned workspaces have invite permission
|
||||
createdAt: workspace.createdAt,
|
||||
}))
|
||||
|
||||
logger.info('Retrieved available workspaces for organization member', {
|
||||
organizationId,
|
||||
userId: session.user.id,
|
||||
workspaceCount: workspacesWithInvitePermission.length,
|
||||
})
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
data: {
|
||||
workspaces: workspacesWithInvitePermission,
|
||||
totalCount: workspacesWithInvitePermission.length,
|
||||
filter: 'available',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
if (memberId && hasAdminAccess) {
|
||||
// Get workspaces where specific member has access (admin only)
|
||||
const memberWorkspaces = await db
|
||||
.select({
|
||||
id: workspace.id,
|
||||
name: workspace.name,
|
||||
ownerId: workspace.ownerId,
|
||||
isOwner: eq(workspace.ownerId, memberId),
|
||||
permissionType: permissions.permissionType,
|
||||
createdAt: permissions.createdAt,
|
||||
})
|
||||
.from(workspace)
|
||||
.leftJoin(
|
||||
permissions,
|
||||
and(
|
||||
eq(permissions.entityType, 'workspace'),
|
||||
eq(permissions.entityId, workspace.id),
|
||||
eq(permissions.userId, memberId)
|
||||
)
|
||||
)
|
||||
.where(
|
||||
or(
|
||||
// Member owns the workspace
|
||||
eq(workspace.ownerId, memberId),
|
||||
// Member has permissions on the workspace
|
||||
and(eq(permissions.userId, memberId), eq(permissions.entityType, 'workspace'))
|
||||
)
|
||||
)
|
||||
|
||||
const formattedWorkspaces = memberWorkspaces.map((workspace) => ({
|
||||
id: workspace.id,
|
||||
name: workspace.name,
|
||||
isOwner: workspace.isOwner,
|
||||
permission: workspace.permissionType,
|
||||
joinedAt: workspace.createdAt,
|
||||
createdAt: workspace.createdAt,
|
||||
}))
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
data: {
|
||||
workspaces: formattedWorkspaces,
|
||||
totalCount: formattedWorkspaces.length,
|
||||
filter: 'member',
|
||||
memberId,
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// Default: Get all workspaces (basic info only for regular members)
|
||||
if (!hasAdminAccess) {
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
data: {
|
||||
workspaces: [],
|
||||
totalCount: 0,
|
||||
message: 'Workspace access information is only available to organization admins',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
// For admins: Get summary of all workspaces
|
||||
const allWorkspaces = await db
|
||||
.select({
|
||||
id: workspace.id,
|
||||
name: workspace.name,
|
||||
ownerId: workspace.ownerId,
|
||||
createdAt: workspace.createdAt,
|
||||
ownerName: user.name,
|
||||
})
|
||||
.from(workspace)
|
||||
.leftJoin(user, eq(workspace.ownerId, user.id))
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
data: {
|
||||
workspaces: allWorkspaces,
|
||||
totalCount: allWorkspaces.length,
|
||||
filter: 'all',
|
||||
},
|
||||
userRole,
|
||||
hasAdminAccess,
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error('Failed to get organization workspaces', { error })
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: 'Internal server error',
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,257 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { checkInternalAuth } from '@/lib/auth/hybrid'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { processSingleFileToUserFile } from '@/lib/uploads/utils/file-utils'
|
||||
import { downloadFileFromStorage } from '@/lib/uploads/utils/file-utils.server'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
const logger = createLogger('SupabaseStorageUploadAPI')
|
||||
|
||||
const SupabaseStorageUploadSchema = z.object({
|
||||
projectId: z.string().min(1, 'Project ID is required'),
|
||||
apiKey: z.string().min(1, 'API key is required'),
|
||||
bucket: z.string().min(1, 'Bucket name is required'),
|
||||
fileName: z.string().min(1, 'File name is required'),
|
||||
path: z.string().optional().nullable(),
|
||||
fileData: z.any(),
|
||||
contentType: z.string().optional().nullable(),
|
||||
upsert: z.boolean().optional().default(false),
|
||||
})
|
||||
|
||||
export async function POST(request: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
|
||||
try {
|
||||
const authResult = await checkInternalAuth(request, { requireWorkflowId: false })
|
||||
|
||||
if (!authResult.success) {
|
||||
logger.warn(
|
||||
`[${requestId}] Unauthorized Supabase storage upload attempt: ${authResult.error}`
|
||||
)
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: authResult.error || 'Authentication required',
|
||||
},
|
||||
{ status: 401 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.info(
|
||||
`[${requestId}] Authenticated Supabase storage upload request via ${authResult.authType}`,
|
||||
{
|
||||
userId: authResult.userId,
|
||||
}
|
||||
)
|
||||
|
||||
const body = await request.json()
|
||||
const validatedData = SupabaseStorageUploadSchema.parse(body)
|
||||
|
||||
const fileData = validatedData.fileData
|
||||
const isStringInput = typeof fileData === 'string'
|
||||
|
||||
logger.info(`[${requestId}] Uploading to Supabase Storage`, {
|
||||
bucket: validatedData.bucket,
|
||||
fileName: validatedData.fileName,
|
||||
path: validatedData.path,
|
||||
fileDataType: isStringInput ? 'string' : 'object',
|
||||
})
|
||||
|
||||
if (!fileData) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: 'fileData is required',
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
let uploadBody: Buffer
|
||||
let uploadContentType: string | undefined
|
||||
|
||||
if (isStringInput) {
|
||||
let content = fileData as string
|
||||
|
||||
const dataUrlMatch = content.match(/^data:([^;]+);base64,(.+)$/s)
|
||||
if (dataUrlMatch) {
|
||||
const [, mimeType, base64Data] = dataUrlMatch
|
||||
content = base64Data
|
||||
if (!validatedData.contentType) {
|
||||
uploadContentType = mimeType
|
||||
}
|
||||
logger.info(`[${requestId}] Extracted base64 from data URL (MIME: ${mimeType})`)
|
||||
}
|
||||
|
||||
const cleanedContent = content.replace(/[\s\r\n]/g, '')
|
||||
const isLikelyBase64 = /^[A-Za-z0-9+/]*={0,2}$/.test(cleanedContent)
|
||||
|
||||
if (isLikelyBase64 && cleanedContent.length >= 4) {
|
||||
try {
|
||||
uploadBody = Buffer.from(cleanedContent, 'base64')
|
||||
|
||||
const expectedMinSize = Math.floor(cleanedContent.length * 0.7)
|
||||
const expectedMaxSize = Math.ceil(cleanedContent.length * 0.8)
|
||||
|
||||
if (
|
||||
uploadBody.length >= expectedMinSize &&
|
||||
uploadBody.length <= expectedMaxSize &&
|
||||
uploadBody.length > 0
|
||||
) {
|
||||
logger.info(
|
||||
`[${requestId}] Decoded base64 content: ${cleanedContent.length} chars -> ${uploadBody.length} bytes`
|
||||
)
|
||||
} else {
|
||||
const reEncoded = uploadBody.toString('base64')
|
||||
if (reEncoded !== cleanedContent) {
|
||||
logger.info(
|
||||
`[${requestId}] Content looked like base64 but re-encoding didn't match, using as plain text`
|
||||
)
|
||||
uploadBody = Buffer.from(content, 'utf-8')
|
||||
} else {
|
||||
logger.info(
|
||||
`[${requestId}] Decoded base64 content (verified): ${uploadBody.length} bytes`
|
||||
)
|
||||
}
|
||||
}
|
||||
} catch (decodeError) {
|
||||
logger.info(
|
||||
`[${requestId}] Failed to decode as base64, using as plain text: ${decodeError}`
|
||||
)
|
||||
uploadBody = Buffer.from(content, 'utf-8')
|
||||
}
|
||||
} else {
|
||||
uploadBody = Buffer.from(content, 'utf-8')
|
||||
logger.info(`[${requestId}] Using content as plain text (${uploadBody.length} bytes)`)
|
||||
}
|
||||
|
||||
uploadContentType =
|
||||
uploadContentType || validatedData.contentType || 'application/octet-stream'
|
||||
} else {
|
||||
const rawFile = fileData
|
||||
logger.info(`[${requestId}] Processing file object: ${rawFile.name || 'unknown'}`)
|
||||
|
||||
let userFile
|
||||
try {
|
||||
userFile = processSingleFileToUserFile(rawFile, requestId, logger)
|
||||
} catch (error) {
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Failed to process file',
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
const buffer = await downloadFileFromStorage(userFile, requestId, logger)
|
||||
|
||||
uploadBody = buffer
|
||||
uploadContentType = validatedData.contentType || userFile.type || 'application/octet-stream'
|
||||
}
|
||||
|
||||
let fullPath = validatedData.fileName
|
||||
if (validatedData.path) {
|
||||
const folderPath = validatedData.path.endsWith('/')
|
||||
? validatedData.path
|
||||
: `${validatedData.path}/`
|
||||
fullPath = `${folderPath}${validatedData.fileName}`
|
||||
}
|
||||
|
||||
const supabaseUrl = `https://${validatedData.projectId}.supabase.co/storage/v1/object/${validatedData.bucket}/${fullPath}`
|
||||
|
||||
const headers: Record<string, string> = {
|
||||
apikey: validatedData.apiKey,
|
||||
Authorization: `Bearer ${validatedData.apiKey}`,
|
||||
'Content-Type': uploadContentType,
|
||||
}
|
||||
|
||||
if (validatedData.upsert) {
|
||||
headers['x-upsert'] = 'true'
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Sending to Supabase: ${supabaseUrl}`, {
|
||||
contentType: uploadContentType,
|
||||
bodySize: uploadBody.length,
|
||||
upsert: validatedData.upsert,
|
||||
})
|
||||
|
||||
const response = await fetch(supabaseUrl, {
|
||||
method: 'POST',
|
||||
headers,
|
||||
body: new Uint8Array(uploadBody),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
let errorData
|
||||
try {
|
||||
errorData = JSON.parse(errorText)
|
||||
} catch {
|
||||
errorData = { message: errorText }
|
||||
}
|
||||
|
||||
logger.error(`[${requestId}] Supabase Storage upload failed:`, {
|
||||
status: response.status,
|
||||
statusText: response.statusText,
|
||||
error: errorData,
|
||||
})
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: errorData.message || errorData.error || `Upload failed: ${response.statusText}`,
|
||||
details: errorData,
|
||||
},
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const result = await response.json()
|
||||
|
||||
logger.info(`[${requestId}] File uploaded successfully to Supabase Storage`, {
|
||||
bucket: validatedData.bucket,
|
||||
path: fullPath,
|
||||
})
|
||||
|
||||
const publicUrl = `https://${validatedData.projectId}.supabase.co/storage/v1/object/public/${validatedData.bucket}/${fullPath}`
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
output: {
|
||||
message: 'Successfully uploaded file to storage',
|
||||
results: {
|
||||
...result,
|
||||
path: fullPath,
|
||||
bucket: validatedData.bucket,
|
||||
publicUrl,
|
||||
},
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
if (error instanceof z.ZodError) {
|
||||
logger.warn(`[${requestId}] Invalid request data`, { errors: error.errors })
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: 'Invalid request data',
|
||||
details: error.errors,
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.error(`[${requestId}] Error uploading to Supabase Storage:`, error)
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: error instanceof Error ? error.message : 'Internal server error',
|
||||
},
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,377 +0,0 @@
|
||||
import { db, workflow as workflowTable } from '@sim/db'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
import { z } from 'zod'
|
||||
import { checkHybridAuth } from '@/lib/auth/hybrid'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { SSE_HEADERS } from '@/lib/core/utils/sse'
|
||||
import { getPersonalAndWorkspaceEnv } from '@/lib/environment/utils'
|
||||
import { markExecutionCancelled } from '@/lib/execution/cancellation'
|
||||
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
|
||||
import { type ExecutionEvent, encodeSSEEvent } from '@/lib/workflows/executor/execution-events'
|
||||
import { DAGExecutor } from '@/executor/execution/executor'
|
||||
import type { IterationContext, SerializableExecutionState } from '@/executor/execution/types'
|
||||
import type { NormalizedBlockOutput } from '@/executor/types'
|
||||
import { hasExecutionResult } from '@/executor/utils/errors'
|
||||
import { Serializer } from '@/serializer'
|
||||
import { mergeSubblockState } from '@/stores/workflows/server-utils'
|
||||
|
||||
const logger = createLogger('ExecuteFromBlockAPI')
|
||||
|
||||
const ExecuteFromBlockSchema = z.object({
|
||||
startBlockId: z.string().min(1, 'Start block ID is required'),
|
||||
sourceSnapshot: z.object({
|
||||
blockStates: z.record(z.any()),
|
||||
executedBlocks: z.array(z.string()),
|
||||
blockLogs: z.array(z.any()),
|
||||
decisions: z.object({
|
||||
router: z.record(z.string()),
|
||||
condition: z.record(z.string()),
|
||||
}),
|
||||
completedLoops: z.array(z.string()),
|
||||
loopExecutions: z.record(z.any()).optional(),
|
||||
parallelExecutions: z.record(z.any()).optional(),
|
||||
parallelBlockMapping: z.record(z.any()).optional(),
|
||||
activeExecutionPath: z.array(z.string()),
|
||||
}),
|
||||
})
|
||||
|
||||
export const runtime = 'nodejs'
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
/**
|
||||
* POST /api/workflows/[id]/execute-from-block
|
||||
*
|
||||
* Executes a workflow starting from a specific block using cached outputs
|
||||
* for upstream/unaffected blocks from the source snapshot.
|
||||
*/
|
||||
export async function POST(req: NextRequest, { params }: { params: Promise<{ id: string }> }) {
|
||||
const requestId = generateRequestId()
|
||||
const { id: workflowId } = await params
|
||||
|
||||
try {
|
||||
const auth = await checkHybridAuth(req, { requireWorkflowId: false })
|
||||
if (!auth.success || !auth.userId) {
|
||||
return NextResponse.json({ error: auth.error || 'Unauthorized' }, { status: 401 })
|
||||
}
|
||||
const userId = auth.userId
|
||||
|
||||
let body: unknown
|
||||
try {
|
||||
body = await req.json()
|
||||
} catch {
|
||||
return NextResponse.json({ error: 'Invalid JSON body' }, { status: 400 })
|
||||
}
|
||||
|
||||
const validation = ExecuteFromBlockSchema.safeParse(body)
|
||||
if (!validation.success) {
|
||||
logger.warn(`[${requestId}] Invalid request body:`, validation.error.errors)
|
||||
return NextResponse.json(
|
||||
{
|
||||
error: 'Invalid request body',
|
||||
details: validation.error.errors.map((e) => ({
|
||||
path: e.path.join('.'),
|
||||
message: e.message,
|
||||
})),
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
const { startBlockId, sourceSnapshot } = validation.data
|
||||
|
||||
logger.info(`[${requestId}] Starting run-from-block execution`, {
|
||||
workflowId,
|
||||
userId,
|
||||
startBlockId,
|
||||
executedBlocksCount: sourceSnapshot.executedBlocks.length,
|
||||
})
|
||||
|
||||
const executionId = uuidv4()
|
||||
|
||||
// Load workflow record to get workspaceId
|
||||
const [workflowRecord] = await db
|
||||
.select({ workspaceId: workflowTable.workspaceId })
|
||||
.from(workflowTable)
|
||||
.where(eq(workflowTable.id, workflowId))
|
||||
.limit(1)
|
||||
|
||||
if (!workflowRecord?.workspaceId) {
|
||||
return NextResponse.json({ error: 'Workflow not found or has no workspace' }, { status: 404 })
|
||||
}
|
||||
|
||||
const workspaceId = workflowRecord.workspaceId
|
||||
|
||||
// Load workflow state
|
||||
const workflowData = await loadWorkflowFromNormalizedTables(workflowId)
|
||||
if (!workflowData) {
|
||||
return NextResponse.json({ error: 'Workflow state not found' }, { status: 404 })
|
||||
}
|
||||
|
||||
const { blocks, edges, loops, parallels } = workflowData
|
||||
|
||||
// Merge block states
|
||||
const mergedStates = mergeSubblockState(blocks)
|
||||
|
||||
// Get environment variables
|
||||
const { personalDecrypted, workspaceDecrypted } = await getPersonalAndWorkspaceEnv(
|
||||
userId,
|
||||
workspaceId
|
||||
)
|
||||
const decryptedEnvVars: Record<string, string> = { ...personalDecrypted, ...workspaceDecrypted }
|
||||
|
||||
// Serialize workflow
|
||||
const serializedWorkflow = new Serializer().serializeWorkflow(
|
||||
mergedStates,
|
||||
edges,
|
||||
loops,
|
||||
parallels,
|
||||
true
|
||||
)
|
||||
|
||||
const encoder = new TextEncoder()
|
||||
const abortController = new AbortController()
|
||||
let isStreamClosed = false
|
||||
|
||||
const stream = new ReadableStream<Uint8Array>({
|
||||
async start(controller) {
|
||||
const sendEvent = (event: ExecutionEvent) => {
|
||||
if (isStreamClosed) return
|
||||
|
||||
try {
|
||||
controller.enqueue(encodeSSEEvent(event))
|
||||
} catch {
|
||||
isStreamClosed = true
|
||||
}
|
||||
}
|
||||
|
||||
try {
|
||||
const startTime = new Date()
|
||||
|
||||
sendEvent({
|
||||
type: 'execution:started',
|
||||
timestamp: startTime.toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: {
|
||||
startTime: startTime.toISOString(),
|
||||
},
|
||||
})
|
||||
|
||||
const onBlockStart = async (
|
||||
blockId: string,
|
||||
blockName: string,
|
||||
blockType: string,
|
||||
iterationContext?: IterationContext
|
||||
) => {
|
||||
sendEvent({
|
||||
type: 'block:started',
|
||||
timestamp: new Date().toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: {
|
||||
blockId,
|
||||
blockName,
|
||||
blockType,
|
||||
...(iterationContext && {
|
||||
iterationCurrent: iterationContext.iterationCurrent,
|
||||
iterationTotal: iterationContext.iterationTotal,
|
||||
iterationType: iterationContext.iterationType,
|
||||
}),
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
const onBlockComplete = async (
|
||||
blockId: string,
|
||||
blockName: string,
|
||||
blockType: string,
|
||||
callbackData: { input?: unknown; output: NormalizedBlockOutput; executionTime: number },
|
||||
iterationContext?: IterationContext
|
||||
) => {
|
||||
const hasError = (callbackData.output as any)?.error
|
||||
|
||||
if (hasError) {
|
||||
sendEvent({
|
||||
type: 'block:error',
|
||||
timestamp: new Date().toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: {
|
||||
blockId,
|
||||
blockName,
|
||||
blockType,
|
||||
input: callbackData.input,
|
||||
error: (callbackData.output as any).error,
|
||||
durationMs: callbackData.executionTime || 0,
|
||||
...(iterationContext && {
|
||||
iterationCurrent: iterationContext.iterationCurrent,
|
||||
iterationTotal: iterationContext.iterationTotal,
|
||||
iterationType: iterationContext.iterationType,
|
||||
}),
|
||||
},
|
||||
})
|
||||
} else {
|
||||
sendEvent({
|
||||
type: 'block:completed',
|
||||
timestamp: new Date().toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: {
|
||||
blockId,
|
||||
blockName,
|
||||
blockType,
|
||||
input: callbackData.input,
|
||||
output: callbackData.output,
|
||||
durationMs: callbackData.executionTime || 0,
|
||||
...(iterationContext && {
|
||||
iterationCurrent: iterationContext.iterationCurrent,
|
||||
iterationTotal: iterationContext.iterationTotal,
|
||||
iterationType: iterationContext.iterationType,
|
||||
}),
|
||||
},
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const onStream = async (streamingExecution: unknown) => {
|
||||
const streamingExec = streamingExecution as { stream: ReadableStream; execution: any }
|
||||
const blockId = streamingExec.execution?.blockId
|
||||
|
||||
const reader = streamingExec.stream.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
const chunk = decoder.decode(value, { stream: true })
|
||||
sendEvent({
|
||||
type: 'stream:chunk',
|
||||
timestamp: new Date().toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: { blockId, chunk },
|
||||
})
|
||||
}
|
||||
|
||||
sendEvent({
|
||||
type: 'stream:done',
|
||||
timestamp: new Date().toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: { blockId },
|
||||
})
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error streaming block content:`, error)
|
||||
} finally {
|
||||
try {
|
||||
reader.releaseLock()
|
||||
} catch {}
|
||||
}
|
||||
}
|
||||
|
||||
// Create executor and run from block
|
||||
const executor = new DAGExecutor({
|
||||
workflow: serializedWorkflow,
|
||||
envVarValues: decryptedEnvVars,
|
||||
workflowInput: {},
|
||||
workflowVariables: {},
|
||||
contextExtensions: {
|
||||
stream: true,
|
||||
executionId,
|
||||
workspaceId,
|
||||
userId,
|
||||
isDeployedContext: false,
|
||||
onBlockStart,
|
||||
onBlockComplete,
|
||||
onStream,
|
||||
abortSignal: abortController.signal,
|
||||
},
|
||||
})
|
||||
|
||||
const result = await executor.executeFromBlock(
|
||||
workflowId,
|
||||
startBlockId,
|
||||
sourceSnapshot as SerializableExecutionState
|
||||
)
|
||||
|
||||
if (result.status === 'cancelled') {
|
||||
sendEvent({
|
||||
type: 'execution:cancelled',
|
||||
timestamp: new Date().toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: {
|
||||
duration: result.metadata?.duration || 0,
|
||||
},
|
||||
})
|
||||
return
|
||||
}
|
||||
|
||||
sendEvent({
|
||||
type: 'execution:completed',
|
||||
timestamp: new Date().toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: {
|
||||
success: result.success,
|
||||
output: result.output,
|
||||
duration: result.metadata?.duration || 0,
|
||||
startTime: result.metadata?.startTime || startTime.toISOString(),
|
||||
endTime: result.metadata?.endTime || new Date().toISOString(),
|
||||
},
|
||||
})
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
|
||||
logger.error(`[${requestId}] Run-from-block execution failed: ${errorMessage}`)
|
||||
|
||||
const executionResult = hasExecutionResult(error) ? error.executionResult : undefined
|
||||
|
||||
sendEvent({
|
||||
type: 'execution:error',
|
||||
timestamp: new Date().toISOString(),
|
||||
executionId,
|
||||
workflowId,
|
||||
data: {
|
||||
error: executionResult?.error || errorMessage,
|
||||
duration: executionResult?.metadata?.duration || 0,
|
||||
},
|
||||
})
|
||||
} finally {
|
||||
if (!isStreamClosed) {
|
||||
try {
|
||||
controller.enqueue(encoder.encode('data: [DONE]\n\n'))
|
||||
controller.close()
|
||||
} catch {
|
||||
// Stream already closed
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
cancel() {
|
||||
isStreamClosed = true
|
||||
logger.info(`[${requestId}] Client aborted SSE stream, signalling cancellation`)
|
||||
abortController.abort()
|
||||
markExecutionCancelled(executionId).catch(() => {})
|
||||
},
|
||||
})
|
||||
|
||||
return new NextResponse(stream, {
|
||||
headers: {
|
||||
...SSE_HEADERS,
|
||||
'X-Execution-Id': executionId,
|
||||
},
|
||||
})
|
||||
} catch (error: unknown) {
|
||||
const errorMessage = error instanceof Error ? error.message : 'Unknown error'
|
||||
logger.error(`[${requestId}] Failed to start run-from-block execution:`, error)
|
||||
return NextResponse.json(
|
||||
{ error: errorMessage || 'Failed to start execution' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,12 +1,11 @@
|
||||
import { memo, useCallback } from 'react'
|
||||
import { ArrowLeftRight, ArrowUpDown, Circle, CircleOff, LogOut, Play } from 'lucide-react'
|
||||
import { ArrowLeftRight, ArrowUpDown, Circle, CircleOff, LogOut } from 'lucide-react'
|
||||
import { Button, Copy, Tooltip, Trash2 } from '@/components/emcn'
|
||||
import { cn } from '@/lib/core/utils/cn'
|
||||
import { isInputDefinitionTrigger } from '@/lib/workflows/triggers/input-definition-triggers'
|
||||
import { useUserPermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'
|
||||
import { validateTriggerPaste } from '@/app/workspace/[workspaceId]/w/[workflowId]/utils'
|
||||
import { useCollaborativeWorkflow } from '@/hooks/use-collaborative-workflow'
|
||||
import { useExecutionStore } from '@/stores/execution'
|
||||
import { useNotificationStore } from '@/stores/notifications'
|
||||
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
|
||||
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
|
||||
@@ -98,42 +97,12 @@ export const ActionBar = memo(
|
||||
)
|
||||
)
|
||||
|
||||
const { activeWorkflowId } = useWorkflowRegistry()
|
||||
const { isExecuting, getLastExecutionSnapshot } = useExecutionStore()
|
||||
const userPermissions = useUserPermissionsContext()
|
||||
|
||||
const isStartBlock = isInputDefinitionTrigger(blockType)
|
||||
const isResponseBlock = blockType === 'response'
|
||||
const isNoteBlock = blockType === 'note'
|
||||
const isSubflowBlock = blockType === 'loop' || blockType === 'parallel'
|
||||
const isInsideSubflow = parentId && (parentType === 'loop' || parentType === 'parallel')
|
||||
|
||||
// Check if run-from-block is available
|
||||
const hasExecutionSnapshot = activeWorkflowId
|
||||
? !!getLastExecutionSnapshot(activeWorkflowId)
|
||||
: false
|
||||
const wasExecuted = activeWorkflowId
|
||||
? getLastExecutionSnapshot(activeWorkflowId)?.executedBlocks.includes(blockId) ?? false
|
||||
: false
|
||||
const canRunFromBlock =
|
||||
hasExecutionSnapshot &&
|
||||
wasExecuted &&
|
||||
!isStartBlock &&
|
||||
!isNoteBlock &&
|
||||
!isSubflowBlock &&
|
||||
!isInsideSubflow &&
|
||||
!isExecuting
|
||||
|
||||
const handleRunFromBlock = useCallback(() => {
|
||||
if (!activeWorkflowId || !canRunFromBlock) return
|
||||
|
||||
// Dispatch a custom event to trigger run-from-block execution
|
||||
window.dispatchEvent(
|
||||
new CustomEvent('run-from-block', {
|
||||
detail: { blockId, workflowId: activeWorkflowId },
|
||||
})
|
||||
)
|
||||
}, [blockId, activeWorkflowId, canRunFromBlock])
|
||||
|
||||
/**
|
||||
* Get appropriate tooltip message based on disabled state
|
||||
@@ -182,7 +151,7 @@ export const ActionBar = memo(
|
||||
</Tooltip.Root>
|
||||
)}
|
||||
|
||||
{canRunFromBlock && (
|
||||
{isSubflowBlock && (
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger asChild>
|
||||
<Button
|
||||
@@ -190,17 +159,17 @@ export const ActionBar = memo(
|
||||
onClick={(e) => {
|
||||
e.stopPropagation()
|
||||
if (!disabled) {
|
||||
handleRunFromBlock()
|
||||
collaborativeBatchToggleBlockEnabled([blockId])
|
||||
}
|
||||
}}
|
||||
className={ACTION_BUTTON_STYLES}
|
||||
disabled={disabled || isExecuting}
|
||||
disabled={disabled}
|
||||
>
|
||||
<Play className={ICON_SIZE} />
|
||||
{isEnabled ? <Circle className={ICON_SIZE} /> : <CircleOff className={ICON_SIZE} />}
|
||||
</Button>
|
||||
</Tooltip.Trigger>
|
||||
<Tooltip.Content side='top'>
|
||||
{isExecuting ? 'Execution in progress' : getTooltipMessage('Run from this block')}
|
||||
{getTooltipMessage(isEnabled ? 'Disable Block' : 'Enable Block')}
|
||||
</Tooltip.Content>
|
||||
</Tooltip.Root>
|
||||
)}
|
||||
|
||||
@@ -246,7 +246,6 @@ export function getCommandDisplayLabel(commandId: string): string {
|
||||
* Model configuration options
|
||||
*/
|
||||
export const MODEL_OPTIONS = [
|
||||
{ value: 'auto', label: 'Auto' },
|
||||
{ value: 'claude-4.5-opus', label: 'Claude 4.5 Opus' },
|
||||
{ value: 'claude-4.5-sonnet', label: 'Claude 4.5 Sonnet' },
|
||||
{ value: 'claude-4.5-haiku', label: 'Claude 4.5 Haiku' },
|
||||
|
||||
@@ -284,37 +284,22 @@ const renderLabel = (
|
||||
</>
|
||||
)}
|
||||
{showCanonicalToggle && (
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger asChild>
|
||||
<button
|
||||
type='button'
|
||||
className='flex h-[12px] w-[12px] flex-shrink-0 items-center justify-center bg-transparent p-0 disabled:cursor-not-allowed disabled:opacity-50'
|
||||
onClick={canonicalToggle?.onToggle}
|
||||
disabled={canonicalToggleDisabledResolved}
|
||||
aria-label={
|
||||
canonicalToggle?.mode === 'advanced'
|
||||
? 'Switch to selector'
|
||||
: 'Switch to manual ID'
|
||||
}
|
||||
>
|
||||
<ArrowLeftRight
|
||||
className={cn(
|
||||
'!h-[12px] !w-[12px]',
|
||||
canonicalToggle?.mode === 'advanced'
|
||||
? 'text-[var(--text-primary)]'
|
||||
: 'text-[var(--text-secondary)]'
|
||||
)}
|
||||
/>
|
||||
</button>
|
||||
</Tooltip.Trigger>
|
||||
<Tooltip.Content side='top'>
|
||||
<p>
|
||||
{canonicalToggle?.mode === 'advanced'
|
||||
? 'Switch to selector'
|
||||
: 'Switch to manual ID'}
|
||||
</p>
|
||||
</Tooltip.Content>
|
||||
</Tooltip.Root>
|
||||
<button
|
||||
type='button'
|
||||
className='flex h-[12px] w-[12px] flex-shrink-0 items-center justify-center bg-transparent p-0 disabled:cursor-not-allowed disabled:opacity-50'
|
||||
onClick={canonicalToggle?.onToggle}
|
||||
disabled={canonicalToggleDisabledResolved}
|
||||
aria-label={canonicalToggle?.mode === 'advanced' ? 'Use selector' : 'Enter manual ID'}
|
||||
>
|
||||
<ArrowLeftRight
|
||||
className={cn(
|
||||
'!h-[12px] !w-[12px]',
|
||||
canonicalToggle?.mode === 'advanced'
|
||||
? 'text-[var(--text-primary)]'
|
||||
: 'text-[var(--text-secondary)]'
|
||||
)}
|
||||
/>
|
||||
</button>
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
@@ -338,11 +323,6 @@ const arePropsEqual = (prevProps: SubBlockProps, nextProps: SubBlockProps): bool
|
||||
const configEqual =
|
||||
prevProps.config.id === nextProps.config.id && prevProps.config.type === nextProps.config.type
|
||||
|
||||
const canonicalToggleEqual =
|
||||
!!prevProps.canonicalToggle === !!nextProps.canonicalToggle &&
|
||||
prevProps.canonicalToggle?.mode === nextProps.canonicalToggle?.mode &&
|
||||
prevProps.canonicalToggle?.disabled === nextProps.canonicalToggle?.disabled
|
||||
|
||||
return (
|
||||
prevProps.blockId === nextProps.blockId &&
|
||||
configEqual &&
|
||||
@@ -351,7 +331,8 @@ const arePropsEqual = (prevProps: SubBlockProps, nextProps: SubBlockProps): bool
|
||||
prevProps.disabled === nextProps.disabled &&
|
||||
prevProps.fieldDiffStatus === nextProps.fieldDiffStatus &&
|
||||
prevProps.allowExpandInPreview === nextProps.allowExpandInPreview &&
|
||||
canonicalToggleEqual
|
||||
prevProps.canonicalToggle?.mode === nextProps.canonicalToggle?.mode &&
|
||||
prevProps.canonicalToggle?.disabled === nextProps.canonicalToggle?.disabled
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@@ -13,11 +13,7 @@ interface UseCanvasContextMenuProps {
|
||||
|
||||
/**
|
||||
* Hook for managing workflow canvas context menus.
|
||||
*
|
||||
* Handles right-click events on nodes, pane, and selections with proper multi-select behavior.
|
||||
*
|
||||
* @param props - Hook configuration
|
||||
* @returns Context menu state and handlers
|
||||
* Handles right-click events, menu state, click-outside detection, and block info extraction.
|
||||
*/
|
||||
export function useCanvasContextMenu({ blocks, getNodes, setNodes }: UseCanvasContextMenuProps) {
|
||||
const [activeMenu, setActiveMenu] = useState<MenuType>(null)
|
||||
@@ -50,29 +46,19 @@ export function useCanvasContextMenu({ blocks, getNodes, setNodes }: UseCanvasCo
|
||||
event.stopPropagation()
|
||||
|
||||
const isMultiSelect = event.shiftKey || event.metaKey || event.ctrlKey
|
||||
const currentSelectedNodes = getNodes().filter((n) => n.selected)
|
||||
const isClickedNodeSelected = currentSelectedNodes.some((n) => n.id === node.id)
|
||||
setNodes((nodes) =>
|
||||
nodes.map((n) => ({
|
||||
...n,
|
||||
selected: isMultiSelect ? (n.id === node.id ? true : n.selected) : n.id === node.id,
|
||||
}))
|
||||
)
|
||||
|
||||
let nodesToUse: Node[]
|
||||
if (isClickedNodeSelected) {
|
||||
nodesToUse = currentSelectedNodes
|
||||
} else if (isMultiSelect) {
|
||||
nodesToUse = [...currentSelectedNodes, node]
|
||||
setNodes((nodes) =>
|
||||
nodes.map((n) => ({
|
||||
...n,
|
||||
selected: n.id === node.id ? true : n.selected,
|
||||
}))
|
||||
)
|
||||
} else {
|
||||
nodesToUse = [node]
|
||||
setNodes((nodes) =>
|
||||
nodes.map((n) => ({
|
||||
...n,
|
||||
selected: n.id === node.id,
|
||||
}))
|
||||
)
|
||||
}
|
||||
const selectedNodes = getNodes().filter((n) => n.selected)
|
||||
const nodesToUse = isMultiSelect
|
||||
? selectedNodes.some((n) => n.id === node.id)
|
||||
? selectedNodes
|
||||
: [...selectedNodes, node]
|
||||
: [node]
|
||||
|
||||
setPosition({ x: event.clientX, y: event.clientY })
|
||||
setSelectedBlocks(nodesToBlockInfos(nodesToUse))
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
import { useCallback, useEffect, useRef, useState } from 'react'
|
||||
import { useCallback, useRef, useState } from 'react'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { useQueryClient } from '@tanstack/react-query'
|
||||
import { v4 as uuidv4 } from 'uuid'
|
||||
@@ -15,8 +15,7 @@ import {
|
||||
TriggerUtils,
|
||||
} from '@/lib/workflows/triggers/triggers'
|
||||
import { useCurrentWorkflow } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-current-workflow'
|
||||
import type { SerializableExecutionState } from '@/executor/execution/types'
|
||||
import type { BlockLog, BlockState, ExecutionResult, StreamingExecution } from '@/executor/types'
|
||||
import type { BlockLog, ExecutionResult, StreamingExecution } from '@/executor/types'
|
||||
import { hasExecutionResult } from '@/executor/utils/errors'
|
||||
import { coerceValue } from '@/executor/utils/start-block'
|
||||
import { subscriptionKeys } from '@/hooks/queries/subscription'
|
||||
@@ -33,9 +32,6 @@ import { useWorkflowStore } from '@/stores/workflows/workflow/store'
|
||||
|
||||
const logger = createLogger('useWorkflowExecution')
|
||||
|
||||
// Module-level guard to prevent duplicate run-from-block executions across hook instances
|
||||
let runFromBlockGlobalLock = false
|
||||
|
||||
// Debug state validation result
|
||||
interface DebugValidationResult {
|
||||
isValid: boolean
|
||||
@@ -102,8 +98,6 @@ export function useWorkflowExecution() {
|
||||
setActiveBlocks,
|
||||
setBlockRunStatus,
|
||||
setEdgeRunStatus,
|
||||
setLastExecutionSnapshot,
|
||||
getLastExecutionSnapshot,
|
||||
} = useExecutionStore()
|
||||
const [executionResult, setExecutionResult] = useState<ExecutionResult | null>(null)
|
||||
const executionStream = useExecutionStream()
|
||||
@@ -882,8 +876,6 @@ export function useWorkflowExecution() {
|
||||
const activeBlocksSet = new Set<string>()
|
||||
const streamedContent = new Map<string, string>()
|
||||
const accumulatedBlockLogs: BlockLog[] = []
|
||||
const accumulatedBlockStates = new Map<string, BlockState>()
|
||||
const executedBlockIds = new Set<string>()
|
||||
|
||||
// Execute the workflow
|
||||
try {
|
||||
@@ -930,14 +922,6 @@ export function useWorkflowExecution() {
|
||||
// Track successful block execution in run path
|
||||
setBlockRunStatus(data.blockId, 'success')
|
||||
|
||||
// Track block state for run-from-block snapshot
|
||||
executedBlockIds.add(data.blockId)
|
||||
accumulatedBlockStates.set(data.blockId, {
|
||||
output: data.output,
|
||||
executed: true,
|
||||
executionTime: data.durationMs,
|
||||
})
|
||||
|
||||
// Edges already tracked in onBlockStarted, no need to track again
|
||||
|
||||
const startedAt = new Date(Date.now() - data.durationMs).toISOString()
|
||||
@@ -1072,23 +1056,6 @@ export function useWorkflowExecution() {
|
||||
},
|
||||
logs: accumulatedBlockLogs,
|
||||
}
|
||||
|
||||
// Store execution snapshot for run-from-block
|
||||
if (data.success && activeWorkflowId) {
|
||||
const snapshot: SerializableExecutionState = {
|
||||
blockStates: Object.fromEntries(accumulatedBlockStates),
|
||||
executedBlocks: Array.from(executedBlockIds),
|
||||
blockLogs: accumulatedBlockLogs,
|
||||
decisions: { router: {}, condition: {} },
|
||||
completedLoops: [],
|
||||
activeExecutionPath: Array.from(executedBlockIds),
|
||||
}
|
||||
setLastExecutionSnapshot(activeWorkflowId, snapshot)
|
||||
logger.info('Stored execution snapshot for run-from-block', {
|
||||
workflowId: activeWorkflowId,
|
||||
executedBlocksCount: executedBlockIds.size,
|
||||
})
|
||||
}
|
||||
},
|
||||
|
||||
onExecutionError: (data) => {
|
||||
@@ -1409,228 +1376,6 @@ export function useWorkflowExecution() {
|
||||
setActiveBlocks,
|
||||
])
|
||||
|
||||
/**
|
||||
* Handles running workflow from a specific block using cached outputs
|
||||
*/
|
||||
const handleRunFromBlock = useCallback(
|
||||
async (blockId: string, workflowId: string) => {
|
||||
// Prevent duplicate executions across multiple hook instances (panel.tsx and chat.tsx)
|
||||
if (runFromBlockGlobalLock) {
|
||||
logger.debug('Run-from-block already in progress (global lock), ignoring duplicate request', {
|
||||
workflowId,
|
||||
blockId,
|
||||
})
|
||||
return
|
||||
}
|
||||
runFromBlockGlobalLock = true
|
||||
|
||||
const snapshot = getLastExecutionSnapshot(workflowId)
|
||||
if (!snapshot) {
|
||||
logger.error('No execution snapshot available for run-from-block', { workflowId, blockId })
|
||||
runFromBlockGlobalLock = false
|
||||
return
|
||||
}
|
||||
|
||||
if (!snapshot.executedBlocks.includes(blockId)) {
|
||||
logger.error('Block was not executed in the source run', { workflowId, blockId })
|
||||
runFromBlockGlobalLock = false
|
||||
return
|
||||
}
|
||||
|
||||
logger.info('Starting run-from-block execution', {
|
||||
workflowId,
|
||||
startBlockId: blockId,
|
||||
snapshotExecutedBlocks: snapshot.executedBlocks.length,
|
||||
})
|
||||
|
||||
setIsExecuting(true)
|
||||
|
||||
const workflowEdges = useWorkflowStore.getState().edges
|
||||
const executionId = uuidv4()
|
||||
const accumulatedBlockLogs: BlockLog[] = []
|
||||
const accumulatedBlockStates = new Map<string, BlockState>()
|
||||
const executedBlockIds = new Set<string>()
|
||||
const activeBlocksSet = new Set<string>()
|
||||
|
||||
try {
|
||||
await executionStream.executeFromBlock({
|
||||
workflowId,
|
||||
startBlockId: blockId,
|
||||
sourceSnapshot: snapshot,
|
||||
callbacks: {
|
||||
onExecutionStarted: (data) => {
|
||||
logger.info('Run-from-block execution started:', data)
|
||||
},
|
||||
|
||||
onBlockStarted: (data) => {
|
||||
activeBlocksSet.add(data.blockId)
|
||||
setActiveBlocks(new Set(activeBlocksSet))
|
||||
|
||||
const incomingEdges = workflowEdges.filter((edge) => edge.target === data.blockId)
|
||||
incomingEdges.forEach((edge) => {
|
||||
setEdgeRunStatus(edge.id, 'success')
|
||||
})
|
||||
},
|
||||
|
||||
onBlockCompleted: (data) => {
|
||||
activeBlocksSet.delete(data.blockId)
|
||||
setActiveBlocks(new Set(activeBlocksSet))
|
||||
|
||||
setBlockRunStatus(data.blockId, 'success')
|
||||
|
||||
executedBlockIds.add(data.blockId)
|
||||
accumulatedBlockStates.set(data.blockId, {
|
||||
output: data.output,
|
||||
executed: true,
|
||||
executionTime: data.durationMs,
|
||||
})
|
||||
|
||||
const startedAt = new Date(Date.now() - data.durationMs).toISOString()
|
||||
const endedAt = new Date().toISOString()
|
||||
|
||||
accumulatedBlockLogs.push({
|
||||
blockId: data.blockId,
|
||||
blockName: data.blockName || 'Unknown Block',
|
||||
blockType: data.blockType || 'unknown',
|
||||
input: data.input || {},
|
||||
output: data.output,
|
||||
success: true,
|
||||
durationMs: data.durationMs,
|
||||
startedAt,
|
||||
endedAt,
|
||||
})
|
||||
|
||||
addConsole({
|
||||
input: data.input || {},
|
||||
output: data.output,
|
||||
success: true,
|
||||
durationMs: data.durationMs,
|
||||
startedAt,
|
||||
endedAt,
|
||||
workflowId,
|
||||
blockId: data.blockId,
|
||||
executionId,
|
||||
blockName: data.blockName || 'Unknown Block',
|
||||
blockType: data.blockType || 'unknown',
|
||||
iterationCurrent: data.iterationCurrent,
|
||||
iterationTotal: data.iterationTotal,
|
||||
iterationType: data.iterationType,
|
||||
})
|
||||
},
|
||||
|
||||
onBlockError: (data) => {
|
||||
activeBlocksSet.delete(data.blockId)
|
||||
setActiveBlocks(new Set(activeBlocksSet))
|
||||
|
||||
setBlockRunStatus(data.blockId, 'error')
|
||||
|
||||
const startedAt = new Date(Date.now() - data.durationMs).toISOString()
|
||||
const endedAt = new Date().toISOString()
|
||||
|
||||
accumulatedBlockLogs.push({
|
||||
blockId: data.blockId,
|
||||
blockName: data.blockName || 'Unknown Block',
|
||||
blockType: data.blockType || 'unknown',
|
||||
input: data.input || {},
|
||||
output: {},
|
||||
success: false,
|
||||
error: data.error,
|
||||
durationMs: data.durationMs,
|
||||
startedAt,
|
||||
endedAt,
|
||||
})
|
||||
|
||||
addConsole({
|
||||
input: data.input || {},
|
||||
output: {},
|
||||
success: false,
|
||||
error: data.error,
|
||||
durationMs: data.durationMs,
|
||||
startedAt,
|
||||
endedAt,
|
||||
workflowId,
|
||||
blockId: data.blockId,
|
||||
executionId,
|
||||
blockName: data.blockName,
|
||||
blockType: data.blockType,
|
||||
iterationCurrent: data.iterationCurrent,
|
||||
iterationTotal: data.iterationTotal,
|
||||
iterationType: data.iterationType,
|
||||
})
|
||||
},
|
||||
|
||||
onExecutionCompleted: (data) => {
|
||||
if (data.success) {
|
||||
// Merge new states with snapshot states for updated snapshot
|
||||
const mergedBlockStates: Record<string, BlockState> = { ...snapshot.blockStates }
|
||||
for (const [bId, state] of accumulatedBlockStates) {
|
||||
mergedBlockStates[bId] = state
|
||||
}
|
||||
|
||||
const mergedExecutedBlocks = new Set([
|
||||
...snapshot.executedBlocks,
|
||||
...executedBlockIds,
|
||||
])
|
||||
|
||||
const updatedSnapshot: SerializableExecutionState = {
|
||||
...snapshot,
|
||||
blockStates: mergedBlockStates,
|
||||
executedBlocks: Array.from(mergedExecutedBlocks),
|
||||
blockLogs: [...snapshot.blockLogs, ...accumulatedBlockLogs],
|
||||
activeExecutionPath: Array.from(mergedExecutedBlocks),
|
||||
}
|
||||
setLastExecutionSnapshot(workflowId, updatedSnapshot)
|
||||
logger.info('Updated execution snapshot after run-from-block', {
|
||||
workflowId,
|
||||
newBlocksExecuted: executedBlockIds.size,
|
||||
})
|
||||
}
|
||||
},
|
||||
|
||||
onExecutionError: (data) => {
|
||||
logger.error('Run-from-block execution error:', data.error)
|
||||
},
|
||||
|
||||
onExecutionCancelled: () => {
|
||||
logger.info('Run-from-block execution cancelled')
|
||||
},
|
||||
},
|
||||
})
|
||||
} catch (error) {
|
||||
if ((error as Error).name !== 'AbortError') {
|
||||
logger.error('Run-from-block execution failed:', error)
|
||||
}
|
||||
} finally {
|
||||
setIsExecuting(false)
|
||||
setActiveBlocks(new Set())
|
||||
runFromBlockGlobalLock = false
|
||||
}
|
||||
},
|
||||
[
|
||||
getLastExecutionSnapshot,
|
||||
setLastExecutionSnapshot,
|
||||
setIsExecuting,
|
||||
setActiveBlocks,
|
||||
setBlockRunStatus,
|
||||
setEdgeRunStatus,
|
||||
addConsole,
|
||||
executionStream,
|
||||
]
|
||||
)
|
||||
|
||||
// Listen for run-from-block events from the action bar
|
||||
useEffect(() => {
|
||||
const handleRunFromBlockEvent = (event: CustomEvent<{ blockId: string; workflowId: string }>) => {
|
||||
const { blockId, workflowId } = event.detail
|
||||
handleRunFromBlock(blockId, workflowId)
|
||||
}
|
||||
|
||||
window.addEventListener('run-from-block', handleRunFromBlockEvent as EventListener)
|
||||
return () => {
|
||||
window.removeEventListener('run-from-block', handleRunFromBlockEvent as EventListener)
|
||||
}
|
||||
}, [handleRunFromBlock])
|
||||
|
||||
return {
|
||||
isExecuting,
|
||||
isDebugging,
|
||||
@@ -1641,6 +1386,5 @@ export function useWorkflowExecution() {
|
||||
handleResumeDebug,
|
||||
handleCancelDebug,
|
||||
handleCancelExecution,
|
||||
handleRunFromBlock,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1641,36 +1641,51 @@ const WorkflowContent = React.memo(() => {
|
||||
}, [screenToFlowPosition, handleToolbarDrop])
|
||||
|
||||
/**
|
||||
* Focus canvas on changed blocks when diff appears.
|
||||
* Focus canvas on changed blocks when diff appears
|
||||
* Focuses on new/edited blocks rather than fitting the entire workflow
|
||||
*/
|
||||
const pendingZoomBlockIdsRef = useRef<Set<string> | null>(null)
|
||||
const prevDiffReadyRef = useRef(false)
|
||||
|
||||
// Phase 1: When diff becomes ready, record which blocks we want to zoom to
|
||||
// Phase 2 effect is located after displayNodes is defined (search for "Phase 2")
|
||||
useEffect(() => {
|
||||
// Only focus when diff transitions from not ready to ready
|
||||
if (isDiffReady && !prevDiffReadyRef.current && diffAnalysis) {
|
||||
// Diff just became ready - record blocks to zoom to
|
||||
const changedBlockIds = [
|
||||
...(diffAnalysis.new_blocks || []),
|
||||
...(diffAnalysis.edited_blocks || []),
|
||||
]
|
||||
|
||||
if (changedBlockIds.length > 0) {
|
||||
pendingZoomBlockIdsRef.current = new Set(changedBlockIds)
|
||||
const allNodes = getNodes()
|
||||
const changedNodes = allNodes.filter((node) => changedBlockIds.includes(node.id))
|
||||
|
||||
if (changedNodes.length > 0) {
|
||||
logger.info('Diff ready - focusing on changed blocks', {
|
||||
changedBlockIds,
|
||||
foundNodes: changedNodes.length,
|
||||
})
|
||||
requestAnimationFrame(() => {
|
||||
fitViewToBounds({
|
||||
nodes: changedNodes,
|
||||
duration: 600,
|
||||
padding: 0.1,
|
||||
minZoom: 0.5,
|
||||
maxZoom: 1.0,
|
||||
})
|
||||
})
|
||||
} else {
|
||||
logger.info('Diff ready - no changed nodes found, fitting all')
|
||||
requestAnimationFrame(() => {
|
||||
fitViewToBounds({ padding: 0.1, duration: 600 })
|
||||
})
|
||||
}
|
||||
} else {
|
||||
// No specific blocks to focus on, fit all after a frame
|
||||
pendingZoomBlockIdsRef.current = null
|
||||
logger.info('Diff ready - no changed blocks, fitting all')
|
||||
requestAnimationFrame(() => {
|
||||
fitViewToBounds({ padding: 0.1, duration: 600 })
|
||||
})
|
||||
}
|
||||
} else if (!isDiffReady && prevDiffReadyRef.current) {
|
||||
// Diff was cleared (accepted/rejected) - cancel any pending zoom
|
||||
pendingZoomBlockIdsRef.current = null
|
||||
}
|
||||
prevDiffReadyRef.current = isDiffReady
|
||||
}, [isDiffReady, diffAnalysis, fitViewToBounds])
|
||||
}, [isDiffReady, diffAnalysis, fitViewToBounds, getNodes])
|
||||
|
||||
/** Displays trigger warning notifications. */
|
||||
useEffect(() => {
|
||||
@@ -2078,48 +2093,6 @@ const WorkflowContent = React.memo(() => {
|
||||
})
|
||||
}, [derivedNodes, blocks, pendingSelection, clearPendingSelection])
|
||||
|
||||
// Phase 2: When displayNodes updates, check if pending zoom blocks are ready
|
||||
// (Phase 1 is located earlier in the file where pendingZoomBlockIdsRef is defined)
|
||||
useEffect(() => {
|
||||
const pendingBlockIds = pendingZoomBlockIdsRef.current
|
||||
if (!pendingBlockIds || pendingBlockIds.size === 0) {
|
||||
return
|
||||
}
|
||||
|
||||
// Find the nodes we're waiting for
|
||||
const pendingNodes = displayNodes.filter((node) => pendingBlockIds.has(node.id))
|
||||
|
||||
// Check if all expected nodes are present with valid dimensions
|
||||
const allNodesReady =
|
||||
pendingNodes.length === pendingBlockIds.size &&
|
||||
pendingNodes.every(
|
||||
(node) =>
|
||||
typeof node.width === 'number' &&
|
||||
typeof node.height === 'number' &&
|
||||
node.width > 0 &&
|
||||
node.height > 0
|
||||
)
|
||||
|
||||
if (allNodesReady) {
|
||||
logger.info('Diff ready - focusing on changed blocks', {
|
||||
changedBlockIds: Array.from(pendingBlockIds),
|
||||
foundNodes: pendingNodes.length,
|
||||
})
|
||||
// Clear pending state before zooming to prevent re-triggers
|
||||
pendingZoomBlockIdsRef.current = null
|
||||
// Use requestAnimationFrame to ensure React has finished rendering
|
||||
requestAnimationFrame(() => {
|
||||
fitViewToBounds({
|
||||
nodes: pendingNodes,
|
||||
duration: 600,
|
||||
padding: 0.1,
|
||||
minZoom: 0.5,
|
||||
maxZoom: 1.0,
|
||||
})
|
||||
})
|
||||
}
|
||||
}, [displayNodes, fitViewToBounds])
|
||||
|
||||
/** Handles ActionBar remove-from-subflow events. */
|
||||
useEffect(() => {
|
||||
const handleRemoveFromSubflow = (event: Event) => {
|
||||
|
||||
@@ -27,13 +27,18 @@ export function useContextMenu({ onContextMenu }: UseContextMenuProps = {}) {
|
||||
const [isOpen, setIsOpen] = useState(false)
|
||||
const [position, setPosition] = useState<ContextMenuPosition>({ x: 0, y: 0 })
|
||||
const menuRef = useRef<HTMLDivElement>(null)
|
||||
// Used to prevent click-outside dismissal when trigger is clicked
|
||||
const dismissPreventedRef = useRef(false)
|
||||
|
||||
/**
|
||||
* Handle right-click event
|
||||
*/
|
||||
const handleContextMenu = useCallback(
|
||||
(e: React.MouseEvent) => {
|
||||
e.preventDefault()
|
||||
e.stopPropagation()
|
||||
|
||||
// Calculate position relative to viewport
|
||||
const x = e.clientX
|
||||
const y = e.clientY
|
||||
|
||||
@@ -45,10 +50,17 @@ export function useContextMenu({ onContextMenu }: UseContextMenuProps = {}) {
|
||||
[onContextMenu]
|
||||
)
|
||||
|
||||
/**
|
||||
* Close the context menu
|
||||
*/
|
||||
const closeMenu = useCallback(() => {
|
||||
setIsOpen(false)
|
||||
}, [])
|
||||
|
||||
/**
|
||||
* Prevent the next click-outside from dismissing the menu.
|
||||
* Call this on pointerdown of a toggle trigger to allow proper toggle behavior.
|
||||
*/
|
||||
const preventDismiss = useCallback(() => {
|
||||
dismissPreventedRef.current = true
|
||||
}, [])
|
||||
@@ -60,6 +72,7 @@ export function useContextMenu({ onContextMenu }: UseContextMenuProps = {}) {
|
||||
if (!isOpen) return
|
||||
|
||||
const handleClickOutside = (e: MouseEvent) => {
|
||||
// Check if dismissal was prevented (e.g., by toggle trigger's pointerdown)
|
||||
if (dismissPreventedRef.current) {
|
||||
dismissPreventedRef.current = false
|
||||
return
|
||||
@@ -69,6 +82,7 @@ export function useContextMenu({ onContextMenu }: UseContextMenuProps = {}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Small delay to prevent immediate close from the same click that opened the menu
|
||||
const timeoutId = setTimeout(() => {
|
||||
document.addEventListener('click', handleClickOutside)
|
||||
}, 0)
|
||||
|
||||
@@ -214,6 +214,15 @@ export const A2ABlock: BlockConfig<A2AResponse> = {
|
||||
],
|
||||
config: {
|
||||
tool: (params) => params.operation as string,
|
||||
params: (params) => {
|
||||
const { fileUpload, fileReference, ...rest } = params
|
||||
const hasFileUpload = Array.isArray(fileUpload) ? fileUpload.length > 0 : !!fileUpload
|
||||
const files = hasFileUpload ? fileUpload : fileReference
|
||||
return {
|
||||
...rest,
|
||||
...(files ? { files } : {}),
|
||||
}
|
||||
},
|
||||
},
|
||||
},
|
||||
inputs: {
|
||||
|
||||
@@ -581,18 +581,6 @@ export const GmailV2Block: BlockConfig<GmailToolResponse> = {
|
||||
results: { type: 'json', description: 'Search/read summary results' },
|
||||
attachments: { type: 'json', description: 'Downloaded attachments (if enabled)' },
|
||||
|
||||
// Draft-specific outputs
|
||||
draftId: {
|
||||
type: 'string',
|
||||
description: 'Draft ID',
|
||||
condition: { field: 'operation', value: 'draft_gmail' },
|
||||
},
|
||||
messageId: {
|
||||
type: 'string',
|
||||
description: 'Gmail message ID for the draft',
|
||||
condition: { field: 'operation', value: 'draft_gmail' },
|
||||
},
|
||||
|
||||
// Trigger outputs (unchanged)
|
||||
email_id: { type: 'string', description: 'Gmail message ID' },
|
||||
thread_id: { type: 'string', description: 'Gmail thread ID' },
|
||||
|
||||
@@ -661,25 +661,12 @@ Return ONLY the PostgREST filter expression - no explanations, no markdown, no e
|
||||
placeholder: 'folder/subfolder/',
|
||||
condition: { field: 'operation', value: 'storage_upload' },
|
||||
},
|
||||
{
|
||||
id: 'file',
|
||||
title: 'File',
|
||||
type: 'file-upload',
|
||||
canonicalParamId: 'fileData',
|
||||
placeholder: 'Upload file to storage',
|
||||
condition: { field: 'operation', value: 'storage_upload' },
|
||||
mode: 'basic',
|
||||
multiple: false,
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
id: 'fileContent',
|
||||
title: 'File Content',
|
||||
type: 'code',
|
||||
canonicalParamId: 'fileData',
|
||||
placeholder: 'Base64 encoded for binary files, or plain text',
|
||||
condition: { field: 'operation', value: 'storage_upload' },
|
||||
mode: 'advanced',
|
||||
required: true,
|
||||
},
|
||||
{
|
||||
|
||||
@@ -259,17 +259,6 @@ export class ExecutionEngine {
|
||||
}
|
||||
|
||||
private initializeQueue(triggerBlockId?: string): void {
|
||||
// Run-from-block mode: start directly from specified block
|
||||
if (this.context.runFromBlockContext) {
|
||||
const { startBlockId } = this.context.runFromBlockContext
|
||||
logger.info('Initializing queue for run-from-block mode', {
|
||||
startBlockId,
|
||||
dirtySetSize: this.context.runFromBlockContext.dirtySet.size,
|
||||
})
|
||||
this.addToQueue(startBlockId)
|
||||
return
|
||||
}
|
||||
|
||||
const pendingBlocks = this.context.metadata.pendingBlocks
|
||||
const remainingEdges = (this.context.metadata as any).remainingEdges
|
||||
|
||||
|
||||
@@ -5,21 +5,12 @@ import { BlockExecutor } from '@/executor/execution/block-executor'
|
||||
import { EdgeManager } from '@/executor/execution/edge-manager'
|
||||
import { ExecutionEngine } from '@/executor/execution/engine'
|
||||
import { ExecutionState } from '@/executor/execution/state'
|
||||
import type {
|
||||
ContextExtensions,
|
||||
SerializableExecutionState,
|
||||
WorkflowInput,
|
||||
} from '@/executor/execution/types'
|
||||
import type { ContextExtensions, WorkflowInput } from '@/executor/execution/types'
|
||||
import { createBlockHandlers } from '@/executor/handlers/registry'
|
||||
import { LoopOrchestrator } from '@/executor/orchestrators/loop'
|
||||
import { NodeExecutionOrchestrator } from '@/executor/orchestrators/node'
|
||||
import { ParallelOrchestrator } from '@/executor/orchestrators/parallel'
|
||||
import type { BlockState, ExecutionContext, ExecutionResult } from '@/executor/types'
|
||||
import {
|
||||
computeDirtySet,
|
||||
type RunFromBlockContext,
|
||||
validateRunFromBlock,
|
||||
} from '@/executor/utils/run-from-block'
|
||||
import {
|
||||
buildResolutionFromBlock,
|
||||
buildStartBlockOutput,
|
||||
@@ -98,126 +89,17 @@ export class DAGExecutor {
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Execute workflow starting from a specific block, using cached outputs
|
||||
* for all upstream/unaffected blocks from the source snapshot.
|
||||
*
|
||||
* This implements Jupyter notebook-style execution where:
|
||||
* - The start block and all downstream blocks are re-executed
|
||||
* - Upstream blocks retain their cached outputs from the source snapshot
|
||||
* - The result is a merged execution state
|
||||
*
|
||||
* @param workflowId - The workflow ID
|
||||
* @param startBlockId - The block to start execution from
|
||||
* @param sourceSnapshot - The execution state from a previous run
|
||||
* @returns Merged execution result with cached + fresh outputs
|
||||
*/
|
||||
async executeFromBlock(
|
||||
workflowId: string,
|
||||
startBlockId: string,
|
||||
sourceSnapshot: SerializableExecutionState
|
||||
): Promise<ExecutionResult> {
|
||||
// Build full DAG (no trigger constraint - we need all blocks for validation)
|
||||
const dag = this.dagBuilder.build(this.workflow)
|
||||
|
||||
// Validate the start block
|
||||
const executedBlocks = new Set(sourceSnapshot.executedBlocks)
|
||||
const validation = validateRunFromBlock(startBlockId, dag, executedBlocks)
|
||||
if (!validation.valid) {
|
||||
throw new Error(validation.error)
|
||||
}
|
||||
|
||||
// Compute dirty set (blocks that will be re-executed)
|
||||
const dirtySet = computeDirtySet(dag, startBlockId)
|
||||
|
||||
logger.info('Executing from block', {
|
||||
workflowId,
|
||||
startBlockId,
|
||||
dirtySetSize: dirtySet.size,
|
||||
totalBlocks: dag.nodes.size,
|
||||
dirtyBlocks: Array.from(dirtySet),
|
||||
})
|
||||
|
||||
// For convergent blocks in the dirty set, remove incoming edges from non-dirty sources.
|
||||
// This ensures that a dirty block waiting on multiple inputs doesn't wait for non-dirty
|
||||
// upstream blocks (whose outputs are already cached).
|
||||
for (const nodeId of dirtySet) {
|
||||
const node = dag.nodes.get(nodeId)
|
||||
if (!node) continue
|
||||
|
||||
const nonDirtyIncoming: string[] = []
|
||||
for (const sourceId of node.incomingEdges) {
|
||||
if (!dirtySet.has(sourceId)) {
|
||||
nonDirtyIncoming.push(sourceId)
|
||||
}
|
||||
}
|
||||
|
||||
for (const sourceId of nonDirtyIncoming) {
|
||||
node.incomingEdges.delete(sourceId)
|
||||
logger.debug('Removed non-dirty incoming edge for run-from-block', {
|
||||
nodeId,
|
||||
sourceId,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
// Create context with snapshot state + runFromBlockContext
|
||||
const runFromBlockContext = { startBlockId, dirtySet }
|
||||
const { context, state } = this.createExecutionContext(workflowId, undefined, {
|
||||
snapshotState: sourceSnapshot,
|
||||
runFromBlockContext,
|
||||
})
|
||||
|
||||
// Setup orchestrators and engine (same as execute())
|
||||
const resolver = new VariableResolver(this.workflow, this.workflowVariables, state)
|
||||
const loopOrchestrator = new LoopOrchestrator(dag, state, resolver)
|
||||
loopOrchestrator.setContextExtensions(this.contextExtensions)
|
||||
const parallelOrchestrator = new ParallelOrchestrator(dag, state)
|
||||
parallelOrchestrator.setResolver(resolver)
|
||||
parallelOrchestrator.setContextExtensions(this.contextExtensions)
|
||||
const allHandlers = createBlockHandlers()
|
||||
const blockExecutor = new BlockExecutor(allHandlers, resolver, this.contextExtensions, state)
|
||||
const edgeManager = new EdgeManager(dag)
|
||||
loopOrchestrator.setEdgeManager(edgeManager)
|
||||
const nodeOrchestrator = new NodeExecutionOrchestrator(
|
||||
dag,
|
||||
state,
|
||||
blockExecutor,
|
||||
loopOrchestrator,
|
||||
parallelOrchestrator
|
||||
)
|
||||
const engine = new ExecutionEngine(context, dag, edgeManager, nodeOrchestrator)
|
||||
|
||||
// Run and return result
|
||||
return await engine.run()
|
||||
}
|
||||
|
||||
private createExecutionContext(
|
||||
workflowId: string,
|
||||
triggerBlockId?: string,
|
||||
overrides?: {
|
||||
snapshotState?: SerializableExecutionState
|
||||
runFromBlockContext?: RunFromBlockContext
|
||||
}
|
||||
triggerBlockId?: string
|
||||
): { context: ExecutionContext; state: ExecutionState } {
|
||||
const snapshotState = overrides?.snapshotState ?? this.contextExtensions.snapshotState
|
||||
const snapshotState = this.contextExtensions.snapshotState
|
||||
const blockStates = snapshotState?.blockStates
|
||||
? new Map(Object.entries(snapshotState.blockStates))
|
||||
: new Map<string, BlockState>()
|
||||
let executedBlocks = snapshotState?.executedBlocks
|
||||
const executedBlocks = snapshotState?.executedBlocks
|
||||
? new Set(snapshotState.executedBlocks)
|
||||
: new Set<string>()
|
||||
|
||||
// In run-from-block mode, clear the executed status for dirty blocks so they can be re-executed
|
||||
if (overrides?.runFromBlockContext) {
|
||||
const { dirtySet } = overrides.runFromBlockContext
|
||||
executedBlocks = new Set([...executedBlocks].filter((id) => !dirtySet.has(id)))
|
||||
logger.info('Cleared executed status for dirty blocks', {
|
||||
dirtySetSize: dirtySet.size,
|
||||
remainingExecutedBlocks: executedBlocks.size,
|
||||
})
|
||||
}
|
||||
|
||||
const state = new ExecutionState(blockStates, executedBlocks)
|
||||
|
||||
const context: ExecutionContext = {
|
||||
@@ -287,7 +169,6 @@ export class DAGExecutor {
|
||||
abortSignal: this.contextExtensions.abortSignal,
|
||||
includeFileBase64: this.contextExtensions.includeFileBase64,
|
||||
base64MaxBytes: this.contextExtensions.base64MaxBytes,
|
||||
runFromBlockContext: overrides?.runFromBlockContext,
|
||||
}
|
||||
|
||||
if (this.contextExtensions.resumeFromSnapshot) {
|
||||
@@ -312,12 +193,6 @@ export class DAGExecutor {
|
||||
pendingBlocks: context.metadata.pendingBlocks,
|
||||
skipStarterBlockInit: true,
|
||||
})
|
||||
} else if (overrides?.runFromBlockContext) {
|
||||
// In run-from-block mode, skip starter block initialization
|
||||
// All block states come from the snapshot
|
||||
logger.info('Run-from-block mode: skipping starter block initialization', {
|
||||
startBlockId: overrides.runFromBlockContext.startBlockId,
|
||||
})
|
||||
} else {
|
||||
this.initializeStarterBlock(context, state, triggerBlockId)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import type { Edge } from 'reactflow'
|
||||
import type { BlockLog, BlockState, NormalizedBlockOutput } from '@/executor/types'
|
||||
import type { RunFromBlockContext } from '@/executor/utils/run-from-block'
|
||||
import type { SubflowType } from '@/stores/workflows/workflow/types'
|
||||
|
||||
export interface ExecutionMetadata {
|
||||
@@ -106,12 +105,6 @@ export interface ContextExtensions {
|
||||
output: { input?: any; output: NormalizedBlockOutput; executionTime: number },
|
||||
iterationContext?: IterationContext
|
||||
) => Promise<void>
|
||||
|
||||
/**
|
||||
* Run-from-block configuration. When provided, executor runs in partial
|
||||
* execution mode starting from the specified block.
|
||||
*/
|
||||
runFromBlockContext?: RunFromBlockContext
|
||||
}
|
||||
|
||||
export interface WorkflowInput {
|
||||
|
||||
@@ -4,9 +4,9 @@ import { createLogger } from '@sim/logger'
|
||||
import { eq } from 'drizzle-orm'
|
||||
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
import type { BlockOutput } from '@/blocks/types'
|
||||
import { BlockType, DEFAULTS, EVALUATOR } from '@/executor/constants'
|
||||
import { BlockType, DEFAULTS, EVALUATOR, HTTP } from '@/executor/constants'
|
||||
import type { BlockHandler, ExecutionContext } from '@/executor/types'
|
||||
import { buildAPIUrl, buildAuthHeaders, extractAPIErrorMessage } from '@/executor/utils/http'
|
||||
import { buildAPIUrl, extractAPIErrorMessage } from '@/executor/utils/http'
|
||||
import { isJSONString, parseJSON, stringifyJSON } from '@/executor/utils/json'
|
||||
import { validateModelProvider } from '@/executor/utils/permission-check'
|
||||
import { calculateCost, getProviderFromModel } from '@/providers/utils'
|
||||
@@ -143,7 +143,9 @@ export class EvaluatorBlockHandler implements BlockHandler {
|
||||
|
||||
const response = await fetch(url.toString(), {
|
||||
method: 'POST',
|
||||
headers: await buildAuthHeaders(),
|
||||
headers: {
|
||||
'Content-Type': HTTP.CONTENT_TYPE.JSON,
|
||||
},
|
||||
body: stringifyJSON(providerRequest),
|
||||
})
|
||||
|
||||
|
||||
@@ -9,12 +9,12 @@ import type { BlockOutput } from '@/blocks/types'
|
||||
import {
|
||||
BlockType,
|
||||
DEFAULTS,
|
||||
HTTP,
|
||||
isAgentBlockType,
|
||||
isRouterV2BlockType,
|
||||
ROUTER,
|
||||
} from '@/executor/constants'
|
||||
import type { BlockHandler, ExecutionContext } from '@/executor/types'
|
||||
import { buildAuthHeaders } from '@/executor/utils/http'
|
||||
import { validateModelProvider } from '@/executor/utils/permission-check'
|
||||
import { calculateCost, getProviderFromModel } from '@/providers/utils'
|
||||
import type { SerializedBlock } from '@/serializer/types'
|
||||
@@ -118,7 +118,9 @@ export class RouterBlockHandler implements BlockHandler {
|
||||
|
||||
const response = await fetch(url.toString(), {
|
||||
method: 'POST',
|
||||
headers: await buildAuthHeaders(),
|
||||
headers: {
|
||||
'Content-Type': HTTP.CONTENT_TYPE.JSON,
|
||||
},
|
||||
body: JSON.stringify(providerRequest),
|
||||
})
|
||||
|
||||
@@ -275,7 +277,9 @@ export class RouterBlockHandler implements BlockHandler {
|
||||
|
||||
const response = await fetch(url.toString(), {
|
||||
method: 'POST',
|
||||
headers: await buildAuthHeaders(),
|
||||
headers: {
|
||||
'Content-Type': HTTP.CONTENT_TYPE.JSON,
|
||||
},
|
||||
body: JSON.stringify(providerRequest),
|
||||
})
|
||||
|
||||
|
||||
@@ -31,20 +31,7 @@ export class NodeExecutionOrchestrator {
|
||||
throw new Error(`Node not found in DAG: ${nodeId}`)
|
||||
}
|
||||
|
||||
// In run-from-block mode, skip execution for non-dirty blocks and return cached output
|
||||
if (ctx.runFromBlockContext && !ctx.runFromBlockContext.dirtySet.has(nodeId)) {
|
||||
const cachedOutput = this.state.getBlockOutput(nodeId) || {}
|
||||
logger.debug('Skipping non-dirty block in run-from-block mode', { nodeId })
|
||||
return {
|
||||
nodeId,
|
||||
output: cachedOutput,
|
||||
isFinalOutput: false,
|
||||
}
|
||||
}
|
||||
|
||||
// Skip hasExecuted check for dirty blocks in run-from-block mode - they need to be re-executed
|
||||
const isDirtyBlock = ctx.runFromBlockContext?.dirtySet.has(nodeId) ?? false
|
||||
if (!isDirtyBlock && this.state.hasExecuted(nodeId)) {
|
||||
if (this.state.hasExecuted(nodeId)) {
|
||||
const output = this.state.getBlockOutput(nodeId) || {}
|
||||
return {
|
||||
nodeId,
|
||||
|
||||
@@ -1,7 +1,6 @@
|
||||
import type { TraceSpan } from '@/lib/logs/types'
|
||||
import type { PermissionGroupConfig } from '@/lib/permission-groups/types'
|
||||
import type { BlockOutput } from '@/blocks/types'
|
||||
import type { RunFromBlockContext } from '@/executor/utils/run-from-block'
|
||||
import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types'
|
||||
|
||||
export interface UserFile {
|
||||
@@ -251,12 +250,6 @@ export interface ExecutionContext {
|
||||
* will not have their base64 content fetched.
|
||||
*/
|
||||
base64MaxBytes?: number
|
||||
|
||||
/**
|
||||
* Context for "run from block" mode. When present, only blocks in dirtySet
|
||||
* will be executed; others return cached outputs from the source snapshot.
|
||||
*/
|
||||
runFromBlockContext?: RunFromBlockContext
|
||||
}
|
||||
|
||||
export interface ExecutionResult {
|
||||
|
||||
@@ -1,9 +1,7 @@
|
||||
import { getBlockOutputs } from '@/lib/workflows/blocks/block-outputs'
|
||||
import { normalizeName } from '@/executor/constants'
|
||||
import type { ExecutionContext } from '@/executor/types'
|
||||
import type { OutputSchema } from '@/executor/utils/block-reference'
|
||||
import type { SerializedBlock } from '@/serializer/types'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
import { getTool } from '@/tools/utils'
|
||||
|
||||
export interface BlockDataCollection {
|
||||
blockData: Record<string, unknown>
|
||||
@@ -11,32 +9,6 @@ export interface BlockDataCollection {
|
||||
blockOutputSchemas: Record<string, OutputSchema>
|
||||
}
|
||||
|
||||
export function getBlockSchema(
|
||||
block: SerializedBlock,
|
||||
toolConfig?: ToolConfig
|
||||
): OutputSchema | undefined {
|
||||
const isTrigger =
|
||||
block.metadata?.category === 'triggers' ||
|
||||
(block.config?.params as Record<string, unknown> | undefined)?.triggerMode === true
|
||||
|
||||
// Triggers use saved outputs (defines the trigger payload schema)
|
||||
if (isTrigger && block.outputs && Object.keys(block.outputs).length > 0) {
|
||||
return block.outputs as OutputSchema
|
||||
}
|
||||
|
||||
// When a tool is selected, tool outputs are the source of truth
|
||||
if (toolConfig?.outputs && Object.keys(toolConfig.outputs).length > 0) {
|
||||
return toolConfig.outputs as OutputSchema
|
||||
}
|
||||
|
||||
// Fallback to saved outputs for blocks without tools
|
||||
if (block.outputs && Object.keys(block.outputs).length > 0) {
|
||||
return block.outputs as OutputSchema
|
||||
}
|
||||
|
||||
return undefined
|
||||
}
|
||||
|
||||
export function collectBlockData(ctx: ExecutionContext): BlockDataCollection {
|
||||
const blockData: Record<string, unknown> = {}
|
||||
const blockNameMapping: Record<string, string> = {}
|
||||
@@ -46,21 +18,24 @@ export function collectBlockData(ctx: ExecutionContext): BlockDataCollection {
|
||||
if (state.output !== undefined) {
|
||||
blockData[id] = state.output
|
||||
}
|
||||
}
|
||||
|
||||
const workflowBlocks = ctx.workflow?.blocks ?? []
|
||||
for (const block of workflowBlocks) {
|
||||
const id = block.id
|
||||
const workflowBlock = ctx.workflow?.blocks?.find((b) => b.id === id)
|
||||
if (!workflowBlock) continue
|
||||
|
||||
if (block.metadata?.name) {
|
||||
blockNameMapping[normalizeName(block.metadata.name)] = id
|
||||
if (workflowBlock.metadata?.name) {
|
||||
blockNameMapping[normalizeName(workflowBlock.metadata.name)] = id
|
||||
}
|
||||
|
||||
const toolId = block.config?.tool
|
||||
const toolConfig = toolId ? getTool(toolId) : undefined
|
||||
const schema = getBlockSchema(block, toolConfig)
|
||||
if (schema && Object.keys(schema).length > 0) {
|
||||
blockOutputSchemas[id] = schema
|
||||
const blockType = workflowBlock.metadata?.id
|
||||
if (blockType) {
|
||||
const params = workflowBlock.config?.params as Record<string, unknown> | undefined
|
||||
const subBlocks = params
|
||||
? Object.fromEntries(Object.entries(params).map(([k, v]) => [k, { value: v }]))
|
||||
: undefined
|
||||
const schema = getBlockOutputs(blockType, subBlocks)
|
||||
if (schema && Object.keys(schema).length > 0) {
|
||||
blockOutputSchemas[id] = schema
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -1,48 +0,0 @@
|
||||
/**
|
||||
* Formats a JavaScript/TypeScript value as a code literal for the target language.
|
||||
* Handles special cases like null, undefined, booleans, and Python-specific number representations.
|
||||
*
|
||||
* @param value - The value to format
|
||||
* @param language - Target language ('javascript' or 'python')
|
||||
* @returns A string literal representation valid in the target language
|
||||
*
|
||||
* @example
|
||||
* formatLiteralForCode(null, 'python') // => 'None'
|
||||
* formatLiteralForCode(true, 'python') // => 'True'
|
||||
* formatLiteralForCode(NaN, 'python') // => "float('nan')"
|
||||
* formatLiteralForCode("hello", 'javascript') // => '"hello"'
|
||||
* formatLiteralForCode({a: 1}, 'python') // => "json.loads('{\"a\":1}')"
|
||||
*/
|
||||
export function formatLiteralForCode(value: unknown, language: 'javascript' | 'python'): string {
|
||||
const isPython = language === 'python'
|
||||
|
||||
if (value === undefined) {
|
||||
return isPython ? 'None' : 'undefined'
|
||||
}
|
||||
if (value === null) {
|
||||
return isPython ? 'None' : 'null'
|
||||
}
|
||||
if (typeof value === 'boolean') {
|
||||
return isPython ? (value ? 'True' : 'False') : String(value)
|
||||
}
|
||||
if (typeof value === 'number') {
|
||||
if (Number.isNaN(value)) {
|
||||
return isPython ? "float('nan')" : 'NaN'
|
||||
}
|
||||
if (value === Number.POSITIVE_INFINITY) {
|
||||
return isPython ? "float('inf')" : 'Infinity'
|
||||
}
|
||||
if (value === Number.NEGATIVE_INFINITY) {
|
||||
return isPython ? "float('-inf')" : '-Infinity'
|
||||
}
|
||||
return String(value)
|
||||
}
|
||||
if (typeof value === 'string') {
|
||||
return JSON.stringify(value)
|
||||
}
|
||||
// Objects and arrays - Python needs json.loads() because JSON true/false/null aren't valid Python
|
||||
if (isPython) {
|
||||
return `json.loads(${JSON.stringify(JSON.stringify(value))})`
|
||||
}
|
||||
return JSON.stringify(value)
|
||||
}
|
||||
@@ -1,336 +0,0 @@
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import type { DAG, DAGNode } from '@/executor/dag/builder'
|
||||
import type { DAGEdge, NodeMetadata } from '@/executor/dag/types'
|
||||
import type { SerializedLoop, SerializedParallel } from '@/serializer/types'
|
||||
import { computeDirtySet, validateRunFromBlock } from '@/executor/utils/run-from-block'
|
||||
|
||||
/**
|
||||
* Helper to create a DAG node for testing
|
||||
*/
|
||||
function createNode(
|
||||
id: string,
|
||||
outgoingEdges: Array<{ target: string; sourceHandle?: string }> = [],
|
||||
metadata: Partial<NodeMetadata> = {}
|
||||
): DAGNode {
|
||||
const edges = new Map<string, DAGEdge>()
|
||||
for (const edge of outgoingEdges) {
|
||||
edges.set(edge.target, { target: edge.target, sourceHandle: edge.sourceHandle })
|
||||
}
|
||||
|
||||
return {
|
||||
id,
|
||||
block: {
|
||||
id,
|
||||
position: { x: 0, y: 0 },
|
||||
config: { tool: 'test', params: {} },
|
||||
inputs: {},
|
||||
outputs: {},
|
||||
metadata: { id: 'test', name: `block-${id}`, category: 'tools' },
|
||||
enabled: true,
|
||||
},
|
||||
incomingEdges: new Set<string>(),
|
||||
outgoingEdges: edges,
|
||||
metadata: {
|
||||
isParallelBranch: false,
|
||||
isLoopNode: false,
|
||||
isSentinel: false,
|
||||
...metadata,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Helper to create a DAG for testing
|
||||
*/
|
||||
function createDAG(nodes: DAGNode[]): DAG {
|
||||
const nodeMap = new Map<string, DAGNode>()
|
||||
for (const node of nodes) {
|
||||
nodeMap.set(node.id, node)
|
||||
}
|
||||
|
||||
// Set up incoming edges based on outgoing edges
|
||||
for (const node of nodes) {
|
||||
for (const [, edge] of node.outgoingEdges) {
|
||||
const targetNode = nodeMap.get(edge.target)
|
||||
if (targetNode) {
|
||||
targetNode.incomingEdges.add(node.id)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
nodes: nodeMap,
|
||||
loopConfigs: new Map<string, SerializedLoop>(),
|
||||
parallelConfigs: new Map<string, SerializedParallel>(),
|
||||
}
|
||||
}
|
||||
|
||||
describe('computeDirtySet', () => {
|
||||
it('includes start block in dirty set', () => {
|
||||
const dag = createDAG([createNode('A'), createNode('B'), createNode('C')])
|
||||
|
||||
const dirtySet = computeDirtySet(dag, 'B')
|
||||
|
||||
expect(dirtySet.has('B')).toBe(true)
|
||||
})
|
||||
|
||||
it('includes all downstream blocks in linear workflow', () => {
|
||||
// A → B → C → D
|
||||
const dag = createDAG([
|
||||
createNode('A', [{ target: 'B' }]),
|
||||
createNode('B', [{ target: 'C' }]),
|
||||
createNode('C', [{ target: 'D' }]),
|
||||
createNode('D'),
|
||||
])
|
||||
|
||||
const dirtySet = computeDirtySet(dag, 'B')
|
||||
|
||||
expect(dirtySet.has('A')).toBe(false)
|
||||
expect(dirtySet.has('B')).toBe(true)
|
||||
expect(dirtySet.has('C')).toBe(true)
|
||||
expect(dirtySet.has('D')).toBe(true)
|
||||
expect(dirtySet.size).toBe(3)
|
||||
})
|
||||
|
||||
it('handles branching paths', () => {
|
||||
// A → B → C
|
||||
// ↓
|
||||
// D → E
|
||||
const dag = createDAG([
|
||||
createNode('A', [{ target: 'B' }]),
|
||||
createNode('B', [{ target: 'C' }, { target: 'D' }]),
|
||||
createNode('C'),
|
||||
createNode('D', [{ target: 'E' }]),
|
||||
createNode('E'),
|
||||
])
|
||||
|
||||
const dirtySet = computeDirtySet(dag, 'B')
|
||||
|
||||
expect(dirtySet.has('A')).toBe(false)
|
||||
expect(dirtySet.has('B')).toBe(true)
|
||||
expect(dirtySet.has('C')).toBe(true)
|
||||
expect(dirtySet.has('D')).toBe(true)
|
||||
expect(dirtySet.has('E')).toBe(true)
|
||||
expect(dirtySet.size).toBe(4)
|
||||
})
|
||||
|
||||
it('handles convergence points', () => {
|
||||
// A → C
|
||||
// B → C → D
|
||||
const dag = createDAG([
|
||||
createNode('A', [{ target: 'C' }]),
|
||||
createNode('B', [{ target: 'C' }]),
|
||||
createNode('C', [{ target: 'D' }]),
|
||||
createNode('D'),
|
||||
])
|
||||
|
||||
// Run from A: should include A, C, D (but not B)
|
||||
const dirtySet = computeDirtySet(dag, 'A')
|
||||
|
||||
expect(dirtySet.has('A')).toBe(true)
|
||||
expect(dirtySet.has('B')).toBe(false)
|
||||
expect(dirtySet.has('C')).toBe(true)
|
||||
expect(dirtySet.has('D')).toBe(true)
|
||||
expect(dirtySet.size).toBe(3)
|
||||
})
|
||||
|
||||
it('handles diamond pattern', () => {
|
||||
// B
|
||||
// ↗ ↘
|
||||
// A D
|
||||
// ↘ ↗
|
||||
// C
|
||||
const dag = createDAG([
|
||||
createNode('A', [{ target: 'B' }, { target: 'C' }]),
|
||||
createNode('B', [{ target: 'D' }]),
|
||||
createNode('C', [{ target: 'D' }]),
|
||||
createNode('D'),
|
||||
])
|
||||
|
||||
const dirtySet = computeDirtySet(dag, 'A')
|
||||
|
||||
expect(dirtySet.has('A')).toBe(true)
|
||||
expect(dirtySet.has('B')).toBe(true)
|
||||
expect(dirtySet.has('C')).toBe(true)
|
||||
expect(dirtySet.has('D')).toBe(true)
|
||||
expect(dirtySet.size).toBe(4)
|
||||
})
|
||||
|
||||
it('stops at graph boundaries', () => {
|
||||
// A → B C → D (disconnected)
|
||||
const dag = createDAG([
|
||||
createNode('A', [{ target: 'B' }]),
|
||||
createNode('B'),
|
||||
createNode('C', [{ target: 'D' }]),
|
||||
createNode('D'),
|
||||
])
|
||||
|
||||
const dirtySet = computeDirtySet(dag, 'A')
|
||||
|
||||
expect(dirtySet.has('A')).toBe(true)
|
||||
expect(dirtySet.has('B')).toBe(true)
|
||||
expect(dirtySet.has('C')).toBe(false)
|
||||
expect(dirtySet.has('D')).toBe(false)
|
||||
expect(dirtySet.size).toBe(2)
|
||||
})
|
||||
|
||||
it('handles single node workflow', () => {
|
||||
const dag = createDAG([createNode('A')])
|
||||
|
||||
const dirtySet = computeDirtySet(dag, 'A')
|
||||
|
||||
expect(dirtySet.has('A')).toBe(true)
|
||||
expect(dirtySet.size).toBe(1)
|
||||
})
|
||||
|
||||
it('handles node not in DAG gracefully', () => {
|
||||
const dag = createDAG([createNode('A'), createNode('B')])
|
||||
|
||||
const dirtySet = computeDirtySet(dag, 'nonexistent')
|
||||
|
||||
// Should just contain the start block ID even if not found
|
||||
expect(dirtySet.has('nonexistent')).toBe(true)
|
||||
expect(dirtySet.size).toBe(1)
|
||||
})
|
||||
|
||||
it('includes convergent block when running from one branch of parallel', () => {
|
||||
// Parallel branches converging:
|
||||
// A → B → D
|
||||
// A → C → D
|
||||
// Running from B should include B and D (but not A or C)
|
||||
const dag = createDAG([
|
||||
createNode('A', [{ target: 'B' }, { target: 'C' }]),
|
||||
createNode('B', [{ target: 'D' }]),
|
||||
createNode('C', [{ target: 'D' }]),
|
||||
createNode('D'),
|
||||
])
|
||||
|
||||
const dirtySet = computeDirtySet(dag, 'B')
|
||||
|
||||
expect(dirtySet.has('A')).toBe(false)
|
||||
expect(dirtySet.has('B')).toBe(true)
|
||||
expect(dirtySet.has('C')).toBe(false)
|
||||
expect(dirtySet.has('D')).toBe(true)
|
||||
expect(dirtySet.size).toBe(2)
|
||||
})
|
||||
|
||||
it('handles running from convergent block itself (all upstream non-dirty)', () => {
|
||||
// A → C
|
||||
// B → C
|
||||
// Running from C should only include C
|
||||
const dag = createDAG([
|
||||
createNode('A', [{ target: 'C' }]),
|
||||
createNode('B', [{ target: 'C' }]),
|
||||
createNode('C', [{ target: 'D' }]),
|
||||
createNode('D'),
|
||||
])
|
||||
|
||||
const dirtySet = computeDirtySet(dag, 'C')
|
||||
|
||||
expect(dirtySet.has('A')).toBe(false)
|
||||
expect(dirtySet.has('B')).toBe(false)
|
||||
expect(dirtySet.has('C')).toBe(true)
|
||||
expect(dirtySet.has('D')).toBe(true)
|
||||
expect(dirtySet.size).toBe(2)
|
||||
})
|
||||
|
||||
it('handles deep downstream chains', () => {
|
||||
// A → B → C → D → E → F
|
||||
// Running from C should include C, D, E, F
|
||||
const dag = createDAG([
|
||||
createNode('A', [{ target: 'B' }]),
|
||||
createNode('B', [{ target: 'C' }]),
|
||||
createNode('C', [{ target: 'D' }]),
|
||||
createNode('D', [{ target: 'E' }]),
|
||||
createNode('E', [{ target: 'F' }]),
|
||||
createNode('F'),
|
||||
])
|
||||
|
||||
const dirtySet = computeDirtySet(dag, 'C')
|
||||
|
||||
expect(dirtySet.has('A')).toBe(false)
|
||||
expect(dirtySet.has('B')).toBe(false)
|
||||
expect(dirtySet.has('C')).toBe(true)
|
||||
expect(dirtySet.has('D')).toBe(true)
|
||||
expect(dirtySet.has('E')).toBe(true)
|
||||
expect(dirtySet.has('F')).toBe(true)
|
||||
expect(dirtySet.size).toBe(4)
|
||||
})
|
||||
})
|
||||
|
||||
describe('validateRunFromBlock', () => {
|
||||
it('accepts valid block', () => {
|
||||
const dag = createDAG([createNode('A'), createNode('B')])
|
||||
const executedBlocks = new Set(['A', 'B'])
|
||||
|
||||
const result = validateRunFromBlock('A', dag, executedBlocks)
|
||||
|
||||
expect(result.valid).toBe(true)
|
||||
expect(result.error).toBeUndefined()
|
||||
})
|
||||
|
||||
it('rejects block not found in DAG', () => {
|
||||
const dag = createDAG([createNode('A')])
|
||||
const executedBlocks = new Set(['A', 'B'])
|
||||
|
||||
const result = validateRunFromBlock('B', dag, executedBlocks)
|
||||
|
||||
expect(result.valid).toBe(false)
|
||||
expect(result.error).toContain('Block not found')
|
||||
})
|
||||
|
||||
it('rejects blocks inside loops', () => {
|
||||
const dag = createDAG([createNode('A', [], { isLoopNode: true, loopId: 'loop-1' })])
|
||||
const executedBlocks = new Set(['A'])
|
||||
|
||||
const result = validateRunFromBlock('A', dag, executedBlocks)
|
||||
|
||||
expect(result.valid).toBe(false)
|
||||
expect(result.error).toContain('inside loop')
|
||||
expect(result.error).toContain('loop-1')
|
||||
})
|
||||
|
||||
it('rejects blocks inside parallels', () => {
|
||||
const dag = createDAG([createNode('A', [], { isParallelBranch: true, parallelId: 'parallel-1' })])
|
||||
const executedBlocks = new Set(['A'])
|
||||
|
||||
const result = validateRunFromBlock('A', dag, executedBlocks)
|
||||
|
||||
expect(result.valid).toBe(false)
|
||||
expect(result.error).toContain('inside parallel')
|
||||
expect(result.error).toContain('parallel-1')
|
||||
})
|
||||
|
||||
it('rejects sentinel nodes', () => {
|
||||
const dag = createDAG([createNode('A', [], { isSentinel: true, sentinelType: 'start' })])
|
||||
const executedBlocks = new Set(['A'])
|
||||
|
||||
const result = validateRunFromBlock('A', dag, executedBlocks)
|
||||
|
||||
expect(result.valid).toBe(false)
|
||||
expect(result.error).toContain('sentinel')
|
||||
})
|
||||
|
||||
it('rejects unexecuted blocks', () => {
|
||||
const dag = createDAG([createNode('A'), createNode('B')])
|
||||
const executedBlocks = new Set(['A']) // B was not executed
|
||||
|
||||
const result = validateRunFromBlock('B', dag, executedBlocks)
|
||||
|
||||
expect(result.valid).toBe(false)
|
||||
expect(result.error).toContain('was not executed')
|
||||
})
|
||||
|
||||
it('accepts regular executed block', () => {
|
||||
const dag = createDAG([
|
||||
createNode('trigger', [{ target: 'A' }]),
|
||||
createNode('A', [{ target: 'B' }]),
|
||||
createNode('B'),
|
||||
])
|
||||
const executedBlocks = new Set(['trigger', 'A', 'B'])
|
||||
|
||||
const result = validateRunFromBlock('A', dag, executedBlocks)
|
||||
|
||||
expect(result.valid).toBe(true)
|
||||
})
|
||||
})
|
||||
@@ -1,110 +0,0 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import type { DAG } from '@/executor/dag/builder'
|
||||
|
||||
const logger = createLogger('run-from-block')
|
||||
|
||||
/**
|
||||
* Result of validating a block for run-from-block execution.
|
||||
*/
|
||||
export interface RunFromBlockValidation {
|
||||
valid: boolean
|
||||
error?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Context for run-from-block execution mode.
|
||||
*/
|
||||
export interface RunFromBlockContext {
|
||||
/** The block ID to start execution from */
|
||||
startBlockId: string
|
||||
/** Set of block IDs that need re-execution (start block + all downstream) */
|
||||
dirtySet: Set<string>
|
||||
}
|
||||
|
||||
/**
|
||||
* Computes all blocks that need re-execution when running from a specific block.
|
||||
* Uses BFS to find all downstream blocks reachable via outgoing edges.
|
||||
*
|
||||
* @param dag - The workflow DAG
|
||||
* @param startBlockId - The block to start execution from
|
||||
* @returns Set of block IDs that are "dirty" and need re-execution
|
||||
*/
|
||||
export function computeDirtySet(dag: DAG, startBlockId: string): Set<string> {
|
||||
const dirty = new Set<string>([startBlockId])
|
||||
const queue = [startBlockId]
|
||||
|
||||
while (queue.length > 0) {
|
||||
const nodeId = queue.shift()!
|
||||
const node = dag.nodes.get(nodeId)
|
||||
if (!node) continue
|
||||
|
||||
for (const [, edge] of node.outgoingEdges) {
|
||||
if (!dirty.has(edge.target)) {
|
||||
dirty.add(edge.target)
|
||||
queue.push(edge.target)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug('Computed dirty set', {
|
||||
startBlockId,
|
||||
dirtySetSize: dirty.size,
|
||||
dirtyBlocks: Array.from(dirty),
|
||||
})
|
||||
|
||||
return dirty
|
||||
}
|
||||
|
||||
/**
|
||||
* Validates that a block can be used as a run-from-block starting point.
|
||||
*
|
||||
* Validation rules:
|
||||
* - Block must exist in the DAG
|
||||
* - Block cannot be inside a loop
|
||||
* - Block cannot be inside a parallel
|
||||
* - Block cannot be a sentinel node
|
||||
* - Block must have been executed in the source run
|
||||
*
|
||||
* @param blockId - The block ID to validate
|
||||
* @param dag - The workflow DAG
|
||||
* @param executedBlocks - Set of blocks that were executed in the source run
|
||||
* @returns Validation result with error message if invalid
|
||||
*/
|
||||
export function validateRunFromBlock(
|
||||
blockId: string,
|
||||
dag: DAG,
|
||||
executedBlocks: Set<string>
|
||||
): RunFromBlockValidation {
|
||||
const node = dag.nodes.get(blockId)
|
||||
|
||||
if (!node) {
|
||||
return { valid: false, error: `Block not found in workflow: ${blockId}` }
|
||||
}
|
||||
|
||||
if (node.metadata.isLoopNode) {
|
||||
return {
|
||||
valid: false,
|
||||
error: `Cannot run from block inside loop: ${node.metadata.loopId}`,
|
||||
}
|
||||
}
|
||||
|
||||
if (node.metadata.isParallelBranch) {
|
||||
return {
|
||||
valid: false,
|
||||
error: `Cannot run from block inside parallel: ${node.metadata.parallelId}`,
|
||||
}
|
||||
}
|
||||
|
||||
if (node.metadata.isSentinel) {
|
||||
return { valid: false, error: 'Cannot run from sentinel node' }
|
||||
}
|
||||
|
||||
if (!executedBlocks.has(blockId)) {
|
||||
return {
|
||||
valid: false,
|
||||
error: `Block was not executed in source run: ${blockId}`,
|
||||
}
|
||||
}
|
||||
|
||||
return { valid: true }
|
||||
}
|
||||
@@ -378,30 +378,8 @@ function buildManualTriggerOutput(
|
||||
return mergeFilesIntoOutput(output, workflowInput)
|
||||
}
|
||||
|
||||
function buildIntegrationTriggerOutput(
|
||||
workflowInput: unknown,
|
||||
structuredInput: Record<string, unknown>,
|
||||
hasStructured: boolean
|
||||
): NormalizedBlockOutput {
|
||||
const output: NormalizedBlockOutput = {}
|
||||
|
||||
if (hasStructured) {
|
||||
for (const [key, value] of Object.entries(structuredInput)) {
|
||||
output[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
if (isPlainObject(workflowInput)) {
|
||||
for (const [key, value] of Object.entries(workflowInput)) {
|
||||
if (value !== undefined && value !== null) {
|
||||
output[key] = value
|
||||
} else if (!Object.hasOwn(output, key)) {
|
||||
output[key] = value
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return mergeFilesIntoOutput(output, workflowInput)
|
||||
function buildIntegrationTriggerOutput(workflowInput: unknown): NormalizedBlockOutput {
|
||||
return isPlainObject(workflowInput) ? (workflowInput as NormalizedBlockOutput) : {}
|
||||
}
|
||||
|
||||
function extractSubBlocks(block: SerializedBlock): Record<string, unknown> | undefined {
|
||||
@@ -450,7 +428,7 @@ export function buildStartBlockOutput(options: StartBlockOutputOptions): Normali
|
||||
return buildManualTriggerOutput(finalInput, workflowInput)
|
||||
|
||||
case StartBlockPath.EXTERNAL_TRIGGER:
|
||||
return buildIntegrationTriggerOutput(workflowInput, structuredInput, hasStructured)
|
||||
return buildIntegrationTriggerOutput(workflowInput)
|
||||
|
||||
case StartBlockPath.LEGACY_STARTER:
|
||||
return buildLegacyStarterOutput(
|
||||
|
||||
@@ -157,14 +157,7 @@ export class VariableResolver {
|
||||
|
||||
let replacementError: Error | null = null
|
||||
|
||||
const blockType = block?.metadata?.id
|
||||
const language =
|
||||
blockType === BlockType.FUNCTION
|
||||
? ((block?.config?.params as Record<string, unknown> | undefined)?.language as
|
||||
| string
|
||||
| undefined)
|
||||
: undefined
|
||||
|
||||
// Use generic utility for smart variable reference replacement
|
||||
let result = replaceValidReferences(template, (match) => {
|
||||
if (replacementError) return match
|
||||
|
||||
@@ -174,7 +167,14 @@ export class VariableResolver {
|
||||
return match
|
||||
}
|
||||
|
||||
return this.blockResolver.formatValueForBlock(resolved, blockType, language)
|
||||
const blockType = block?.metadata?.id
|
||||
const isInTemplateLiteral =
|
||||
blockType === BlockType.FUNCTION &&
|
||||
template.includes('${') &&
|
||||
template.includes('}') &&
|
||||
template.includes('`')
|
||||
|
||||
return this.blockResolver.formatValueForBlock(resolved, blockType, isInTemplateLiteral)
|
||||
} catch (error) {
|
||||
replacementError = error instanceof Error ? error : new Error(String(error))
|
||||
return match
|
||||
|
||||
@@ -257,9 +257,15 @@ describe('BlockResolver', () => {
|
||||
expect(result).toBe('"hello"')
|
||||
})
|
||||
|
||||
it.concurrent('should format object for function block', () => {
|
||||
it.concurrent('should format string for function block in template literal', () => {
|
||||
const resolver = new BlockResolver(createTestWorkflow())
|
||||
const result = resolver.formatValueForBlock({ a: 1 }, 'function')
|
||||
const result = resolver.formatValueForBlock('hello', 'function', true)
|
||||
expect(result).toBe('hello')
|
||||
})
|
||||
|
||||
it.concurrent('should format object for function block in template literal', () => {
|
||||
const resolver = new BlockResolver(createTestWorkflow())
|
||||
const result = resolver.formatValueForBlock({ a: 1 }, 'function', true)
|
||||
expect(result).toBe('{"a":1}')
|
||||
})
|
||||
|
||||
|
||||
@@ -1,16 +1,15 @@
|
||||
import { getBlockOutputs } from '@/lib/workflows/blocks/block-outputs'
|
||||
import {
|
||||
isReference,
|
||||
normalizeName,
|
||||
parseReferencePath,
|
||||
SPECIAL_REFERENCE_PREFIXES,
|
||||
} from '@/executor/constants'
|
||||
import { getBlockSchema } from '@/executor/utils/block-data'
|
||||
import {
|
||||
InvalidFieldError,
|
||||
type OutputSchema,
|
||||
resolveBlockReference,
|
||||
} from '@/executor/utils/block-reference'
|
||||
import { formatLiteralForCode } from '@/executor/utils/code-formatting'
|
||||
import {
|
||||
navigatePath,
|
||||
type ResolutionContext,
|
||||
@@ -68,9 +67,15 @@ export class BlockResolver implements Resolver {
|
||||
blockData[blockId] = output
|
||||
}
|
||||
|
||||
const blockType = block.metadata?.id
|
||||
const params = block.config?.params as Record<string, unknown> | undefined
|
||||
const subBlocks = params
|
||||
? Object.fromEntries(Object.entries(params).map(([k, v]) => [k, { value: v }]))
|
||||
: undefined
|
||||
const toolId = block.config?.tool
|
||||
const toolConfig = toolId ? getTool(toolId) : undefined
|
||||
const outputSchema = getBlockSchema(block, toolConfig)
|
||||
const outputSchema =
|
||||
toolConfig?.outputs ?? (blockType ? getBlockOutputs(blockType, subBlocks) : block.outputs)
|
||||
|
||||
if (outputSchema && Object.keys(outputSchema).length > 0) {
|
||||
blockOutputSchemas[blockId] = outputSchema
|
||||
@@ -160,13 +165,17 @@ export class BlockResolver implements Resolver {
|
||||
return this.nameToBlockId.get(normalizeName(name))
|
||||
}
|
||||
|
||||
public formatValueForBlock(value: any, blockType: string | undefined, language?: string): string {
|
||||
public formatValueForBlock(
|
||||
value: any,
|
||||
blockType: string | undefined,
|
||||
isInTemplateLiteral = false
|
||||
): string {
|
||||
if (blockType === 'condition') {
|
||||
return this.stringifyForCondition(value)
|
||||
}
|
||||
|
||||
if (blockType === 'function') {
|
||||
return this.formatValueForCodeContext(value, language)
|
||||
return this.formatValueForCodeContext(value, isInTemplateLiteral)
|
||||
}
|
||||
|
||||
if (blockType === 'response') {
|
||||
@@ -207,7 +216,29 @@ export class BlockResolver implements Resolver {
|
||||
return String(value)
|
||||
}
|
||||
|
||||
private formatValueForCodeContext(value: any, language?: string): string {
|
||||
return formatLiteralForCode(value, language === 'python' ? 'python' : 'javascript')
|
||||
private formatValueForCodeContext(value: any, isInTemplateLiteral: boolean): string {
|
||||
if (isInTemplateLiteral) {
|
||||
if (typeof value === 'string') {
|
||||
return value
|
||||
}
|
||||
if (typeof value === 'object' && value !== null) {
|
||||
return JSON.stringify(value)
|
||||
}
|
||||
return String(value)
|
||||
}
|
||||
|
||||
if (typeof value === 'string') {
|
||||
return JSON.stringify(value)
|
||||
}
|
||||
if (typeof value === 'object' && value !== null) {
|
||||
return JSON.stringify(value)
|
||||
}
|
||||
if (value === undefined) {
|
||||
return 'undefined'
|
||||
}
|
||||
if (value === null) {
|
||||
return 'null'
|
||||
}
|
||||
return String(value)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -30,10 +30,7 @@ export function navigatePath(obj: any, path: string[]): any {
|
||||
const arrayMatch = part.match(/^([^[]+)(\[.+)$/)
|
||||
if (arrayMatch) {
|
||||
const [, prop, bracketsPart] = arrayMatch
|
||||
current =
|
||||
typeof current === 'object' && current !== null
|
||||
? (current as Record<string, unknown>)[prop]
|
||||
: undefined
|
||||
current = current[prop]
|
||||
if (current === undefined || current === null) {
|
||||
return undefined
|
||||
}
|
||||
@@ -52,10 +49,7 @@ export function navigatePath(obj: any, path: string[]): any {
|
||||
const index = Number.parseInt(part, 10)
|
||||
current = Array.isArray(current) ? current[index] : undefined
|
||||
} else {
|
||||
current =
|
||||
typeof current === 'object' && current !== null
|
||||
? (current as Record<string, unknown>)[part]
|
||||
: undefined
|
||||
current = current[part]
|
||||
}
|
||||
}
|
||||
return current
|
||||
|
||||
@@ -1,6 +1,5 @@
|
||||
import { useCallback } from 'react'
|
||||
import type { Node, ReactFlowInstance } from 'reactflow'
|
||||
import { BLOCK_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions'
|
||||
|
||||
interface VisibleBounds {
|
||||
width: number
|
||||
@@ -140,8 +139,8 @@ export function useCanvasViewport(reactFlowInstance: ReactFlowInstance | null) {
|
||||
let maxY = Number.NEGATIVE_INFINITY
|
||||
|
||||
nodes.forEach((node) => {
|
||||
const nodeWidth = node.width ?? BLOCK_DIMENSIONS.FIXED_WIDTH
|
||||
const nodeHeight = node.height ?? BLOCK_DIMENSIONS.MIN_HEIGHT
|
||||
const nodeWidth = node.width ?? 200
|
||||
const nodeHeight = node.height ?? 100
|
||||
|
||||
minX = Math.min(minX, node.position.x)
|
||||
minY = Math.min(minY, node.position.y)
|
||||
|
||||
@@ -680,10 +680,6 @@ export function useCollaborativeWorkflow() {
|
||||
previousPositions?: Map<string, { x: number; y: number; parentId?: string }>
|
||||
}
|
||||
) => {
|
||||
if (isBaselineDiffView) {
|
||||
return
|
||||
}
|
||||
|
||||
if (!isInActiveRoom()) {
|
||||
logger.debug('Skipping batch position update - not in active workflow')
|
||||
return
|
||||
@@ -729,7 +725,7 @@ export function useCollaborativeWorkflow() {
|
||||
}
|
||||
}
|
||||
},
|
||||
[isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
|
||||
[addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
|
||||
)
|
||||
|
||||
const collaborativeUpdateBlockName = useCallback(
|
||||
@@ -821,10 +817,6 @@ export function useCollaborativeWorkflow() {
|
||||
|
||||
const collaborativeBatchToggleBlockEnabled = useCallback(
|
||||
(ids: string[]) => {
|
||||
if (isBaselineDiffView) {
|
||||
return
|
||||
}
|
||||
|
||||
if (ids.length === 0) return
|
||||
|
||||
const previousStates: Record<string, boolean> = {}
|
||||
@@ -857,7 +849,7 @@ export function useCollaborativeWorkflow() {
|
||||
|
||||
undoRedo.recordBatchToggleEnabled(validIds, previousStates)
|
||||
},
|
||||
[isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
|
||||
[addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
|
||||
)
|
||||
|
||||
const collaborativeBatchUpdateParent = useCallback(
|
||||
@@ -869,10 +861,6 @@ export function useCollaborativeWorkflow() {
|
||||
affectedEdges: Edge[]
|
||||
}>
|
||||
) => {
|
||||
if (isBaselineDiffView) {
|
||||
return
|
||||
}
|
||||
|
||||
if (!isInActiveRoom()) {
|
||||
logger.debug('Skipping batch update parent - not in active workflow')
|
||||
return
|
||||
@@ -943,7 +931,7 @@ export function useCollaborativeWorkflow() {
|
||||
|
||||
logger.debug('Batch updated parent for blocks', { updateCount: updates.length })
|
||||
},
|
||||
[isBaselineDiffView, isInActiveRoom, undoRedo, addToQueue, activeWorkflowId, session?.user?.id]
|
||||
[isInActiveRoom, undoRedo, addToQueue, activeWorkflowId, session?.user?.id]
|
||||
)
|
||||
|
||||
const collaborativeToggleBlockAdvancedMode = useCallback(
|
||||
@@ -963,37 +951,18 @@ export function useCollaborativeWorkflow() {
|
||||
|
||||
const collaborativeSetBlockCanonicalMode = useCallback(
|
||||
(id: string, canonicalId: string, canonicalMode: 'basic' | 'advanced') => {
|
||||
if (isBaselineDiffView) {
|
||||
return
|
||||
}
|
||||
|
||||
useWorkflowStore.getState().setBlockCanonicalMode(id, canonicalId, canonicalMode)
|
||||
|
||||
if (!activeWorkflowId) {
|
||||
return
|
||||
}
|
||||
|
||||
const operationId = crypto.randomUUID()
|
||||
addToQueue({
|
||||
id: operationId,
|
||||
operation: {
|
||||
operation: BLOCK_OPERATIONS.UPDATE_CANONICAL_MODE,
|
||||
target: OPERATION_TARGETS.BLOCK,
|
||||
payload: { id, canonicalId, canonicalMode },
|
||||
},
|
||||
workflowId: activeWorkflowId,
|
||||
userId: session?.user?.id || 'unknown',
|
||||
})
|
||||
executeQueuedOperation(
|
||||
BLOCK_OPERATIONS.UPDATE_CANONICAL_MODE,
|
||||
OPERATION_TARGETS.BLOCK,
|
||||
{ id, canonicalId, canonicalMode },
|
||||
() => useWorkflowStore.getState().setBlockCanonicalMode(id, canonicalId, canonicalMode)
|
||||
)
|
||||
},
|
||||
[isBaselineDiffView, activeWorkflowId, addToQueue, session?.user?.id]
|
||||
[executeQueuedOperation]
|
||||
)
|
||||
|
||||
const collaborativeBatchToggleBlockHandles = useCallback(
|
||||
(ids: string[]) => {
|
||||
if (isBaselineDiffView) {
|
||||
return
|
||||
}
|
||||
|
||||
if (ids.length === 0) return
|
||||
|
||||
const previousStates: Record<string, boolean> = {}
|
||||
@@ -1026,15 +995,11 @@ export function useCollaborativeWorkflow() {
|
||||
|
||||
undoRedo.recordBatchToggleHandles(validIds, previousStates)
|
||||
},
|
||||
[isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
|
||||
[addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
|
||||
)
|
||||
|
||||
const collaborativeBatchAddEdges = useCallback(
|
||||
(edges: Edge[], options?: { skipUndoRedo?: boolean }) => {
|
||||
if (isBaselineDiffView) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (!isInActiveRoom()) {
|
||||
logger.debug('Skipping batch add edges - not in active workflow')
|
||||
return false
|
||||
@@ -1070,15 +1035,11 @@ export function useCollaborativeWorkflow() {
|
||||
|
||||
return true
|
||||
},
|
||||
[isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
|
||||
[addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
|
||||
)
|
||||
|
||||
const collaborativeBatchRemoveEdges = useCallback(
|
||||
(edgeIds: string[], options?: { skipUndoRedo?: boolean }) => {
|
||||
if (isBaselineDiffView) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (!isInActiveRoom()) {
|
||||
logger.debug('Skipping batch remove edges - not in active workflow')
|
||||
return false
|
||||
@@ -1128,7 +1089,7 @@ export function useCollaborativeWorkflow() {
|
||||
logger.info('Batch removed edges', { count: validEdgeIds.length })
|
||||
return true
|
||||
},
|
||||
[isBaselineDiffView, isInActiveRoom, addToQueue, activeWorkflowId, session, undoRedo]
|
||||
[isInActiveRoom, addToQueue, activeWorkflowId, session, undoRedo]
|
||||
)
|
||||
|
||||
const collaborativeSetSubblockValue = useCallback(
|
||||
@@ -1204,10 +1165,6 @@ export function useCollaborativeWorkflow() {
|
||||
(blockId: string, subblockId: string, value: any) => {
|
||||
if (isApplyingRemoteChange.current) return
|
||||
|
||||
if (isBaselineDiffView) {
|
||||
return
|
||||
}
|
||||
|
||||
if (!isInActiveRoom()) {
|
||||
logger.debug('Skipping tag selection - not in active workflow', {
|
||||
currentWorkflowId,
|
||||
@@ -1235,14 +1192,7 @@ export function useCollaborativeWorkflow() {
|
||||
userId: session?.user?.id || 'unknown',
|
||||
})
|
||||
},
|
||||
[
|
||||
isBaselineDiffView,
|
||||
addToQueue,
|
||||
currentWorkflowId,
|
||||
activeWorkflowId,
|
||||
session?.user?.id,
|
||||
isInActiveRoom,
|
||||
]
|
||||
[addToQueue, currentWorkflowId, activeWorkflowId, session?.user?.id, isInActiveRoom]
|
||||
)
|
||||
|
||||
const collaborativeUpdateLoopType = useCallback(
|
||||
@@ -1588,10 +1538,6 @@ export function useCollaborativeWorkflow() {
|
||||
|
||||
const collaborativeBatchRemoveBlocks = useCallback(
|
||||
(blockIds: string[], options?: { skipUndoRedo?: boolean }) => {
|
||||
if (isBaselineDiffView) {
|
||||
return false
|
||||
}
|
||||
|
||||
if (!isInActiveRoom()) {
|
||||
logger.debug('Skipping batch remove blocks - not in active workflow')
|
||||
return false
|
||||
@@ -1673,7 +1619,6 @@ export function useCollaborativeWorkflow() {
|
||||
return true
|
||||
},
|
||||
[
|
||||
isBaselineDiffView,
|
||||
addToQueue,
|
||||
activeWorkflowId,
|
||||
session?.user?.id,
|
||||
|
||||
@@ -1,85 +1,10 @@
|
||||
import { useCallback, useRef } from 'react'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import type { ExecutionEvent } from '@/lib/workflows/executor/execution-events'
|
||||
import type { SerializableExecutionState } from '@/executor/execution/types'
|
||||
import type { SubflowType } from '@/stores/workflows/workflow/types'
|
||||
|
||||
const logger = createLogger('useExecutionStream')
|
||||
|
||||
/**
|
||||
* Processes SSE events from a response body and invokes appropriate callbacks.
|
||||
*/
|
||||
async function processSSEStream(
|
||||
reader: ReadableStreamDefaultReader<Uint8Array>,
|
||||
callbacks: ExecutionStreamCallbacks,
|
||||
logPrefix: string
|
||||
): Promise<void> {
|
||||
const decoder = new TextDecoder()
|
||||
let buffer = ''
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
|
||||
if (done) break
|
||||
|
||||
buffer += decoder.decode(value, { stream: true })
|
||||
const lines = buffer.split('\n\n')
|
||||
buffer = lines.pop() || ''
|
||||
|
||||
for (const line of lines) {
|
||||
if (!line.trim() || !line.startsWith('data: ')) continue
|
||||
|
||||
const data = line.substring(6).trim()
|
||||
if (data === '[DONE]') {
|
||||
logger.info(`${logPrefix} stream completed`)
|
||||
continue
|
||||
}
|
||||
|
||||
try {
|
||||
const event = JSON.parse(data) as ExecutionEvent
|
||||
|
||||
switch (event.type) {
|
||||
case 'execution:started':
|
||||
callbacks.onExecutionStarted?.(event.data)
|
||||
break
|
||||
case 'execution:completed':
|
||||
callbacks.onExecutionCompleted?.(event.data)
|
||||
break
|
||||
case 'execution:error':
|
||||
callbacks.onExecutionError?.(event.data)
|
||||
break
|
||||
case 'execution:cancelled':
|
||||
callbacks.onExecutionCancelled?.(event.data)
|
||||
break
|
||||
case 'block:started':
|
||||
callbacks.onBlockStarted?.(event.data)
|
||||
break
|
||||
case 'block:completed':
|
||||
callbacks.onBlockCompleted?.(event.data)
|
||||
break
|
||||
case 'block:error':
|
||||
callbacks.onBlockError?.(event.data)
|
||||
break
|
||||
case 'stream:chunk':
|
||||
callbacks.onStreamChunk?.(event.data)
|
||||
break
|
||||
case 'stream:done':
|
||||
callbacks.onStreamDone?.(event.data)
|
||||
break
|
||||
default:
|
||||
logger.warn('Unknown event type:', (event as any).type)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to parse SSE event:', error, { data })
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock()
|
||||
}
|
||||
}
|
||||
|
||||
export interface ExecutionStreamCallbacks {
|
||||
onExecutionStarted?: (data: { startTime: string }) => void
|
||||
onExecutionCompleted?: (data: {
|
||||
@@ -146,13 +71,6 @@ export interface ExecuteStreamOptions {
|
||||
callbacks?: ExecutionStreamCallbacks
|
||||
}
|
||||
|
||||
export interface ExecuteFromBlockOptions {
|
||||
workflowId: string
|
||||
startBlockId: string
|
||||
sourceSnapshot: SerializableExecutionState
|
||||
callbacks?: ExecutionStreamCallbacks
|
||||
}
|
||||
|
||||
/**
|
||||
* Hook for executing workflows via server-side SSE streaming
|
||||
*/
|
||||
@@ -201,7 +119,91 @@ export function useExecutionStream() {
|
||||
}
|
||||
|
||||
const reader = response.body.getReader()
|
||||
await processSSEStream(reader, callbacks, 'Execution')
|
||||
const decoder = new TextDecoder()
|
||||
let buffer = ''
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
|
||||
if (done) {
|
||||
break
|
||||
}
|
||||
|
||||
buffer += decoder.decode(value, { stream: true })
|
||||
|
||||
const lines = buffer.split('\n\n')
|
||||
|
||||
buffer = lines.pop() || ''
|
||||
|
||||
for (const line of lines) {
|
||||
if (!line.trim() || !line.startsWith('data: ')) {
|
||||
continue
|
||||
}
|
||||
|
||||
const data = line.substring(6).trim()
|
||||
|
||||
if (data === '[DONE]') {
|
||||
logger.info('Stream completed')
|
||||
continue
|
||||
}
|
||||
|
||||
try {
|
||||
const event = JSON.parse(data) as ExecutionEvent
|
||||
|
||||
logger.info('📡 SSE Event received:', {
|
||||
type: event.type,
|
||||
executionId: event.executionId,
|
||||
data: event.data,
|
||||
})
|
||||
|
||||
switch (event.type) {
|
||||
case 'execution:started':
|
||||
logger.info('🚀 Execution started')
|
||||
callbacks.onExecutionStarted?.(event.data)
|
||||
break
|
||||
case 'execution:completed':
|
||||
logger.info('✅ Execution completed')
|
||||
callbacks.onExecutionCompleted?.(event.data)
|
||||
break
|
||||
case 'execution:error':
|
||||
logger.error('❌ Execution error')
|
||||
callbacks.onExecutionError?.(event.data)
|
||||
break
|
||||
case 'execution:cancelled':
|
||||
logger.warn('🛑 Execution cancelled')
|
||||
callbacks.onExecutionCancelled?.(event.data)
|
||||
break
|
||||
case 'block:started':
|
||||
logger.info('🔷 Block started:', event.data.blockId)
|
||||
callbacks.onBlockStarted?.(event.data)
|
||||
break
|
||||
case 'block:completed':
|
||||
logger.info('✓ Block completed:', event.data.blockId)
|
||||
callbacks.onBlockCompleted?.(event.data)
|
||||
break
|
||||
case 'block:error':
|
||||
logger.error('✗ Block error:', event.data.blockId)
|
||||
callbacks.onBlockError?.(event.data)
|
||||
break
|
||||
case 'stream:chunk':
|
||||
callbacks.onStreamChunk?.(event.data)
|
||||
break
|
||||
case 'stream:done':
|
||||
logger.info('Stream done:', event.data.blockId)
|
||||
callbacks.onStreamDone?.(event.data)
|
||||
break
|
||||
default:
|
||||
logger.warn('Unknown event type:', (event as any).type)
|
||||
}
|
||||
} catch (error) {
|
||||
logger.error('Failed to parse SSE event:', error, { data })
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock()
|
||||
}
|
||||
} catch (error: any) {
|
||||
if (error.name === 'AbortError') {
|
||||
logger.info('Execution stream cancelled')
|
||||
@@ -220,65 +222,6 @@ export function useExecutionStream() {
|
||||
}
|
||||
}, [])
|
||||
|
||||
const executeFromBlock = useCallback(async (options: ExecuteFromBlockOptions) => {
|
||||
const { workflowId, startBlockId, sourceSnapshot, callbacks = {} } = options
|
||||
|
||||
if (abortControllerRef.current) {
|
||||
abortControllerRef.current.abort()
|
||||
}
|
||||
|
||||
const abortController = new AbortController()
|
||||
abortControllerRef.current = abortController
|
||||
currentExecutionRef.current = null
|
||||
|
||||
try {
|
||||
const response = await fetch(`/api/workflows/${workflowId}/execute-from-block`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({ startBlockId, sourceSnapshot }),
|
||||
signal: abortController.signal,
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorResponse = await response.json()
|
||||
const error = new Error(errorResponse.error || 'Failed to start execution')
|
||||
if (errorResponse && typeof errorResponse === 'object') {
|
||||
Object.assign(error, { executionResult: errorResponse })
|
||||
}
|
||||
throw error
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
throw new Error('No response body')
|
||||
}
|
||||
|
||||
const executionId = response.headers.get('X-Execution-Id')
|
||||
if (executionId) {
|
||||
currentExecutionRef.current = { workflowId, executionId }
|
||||
}
|
||||
|
||||
const reader = response.body.getReader()
|
||||
await processSSEStream(reader, callbacks, 'Run-from-block')
|
||||
} catch (error: any) {
|
||||
if (error.name === 'AbortError') {
|
||||
logger.info('Run-from-block execution cancelled')
|
||||
callbacks.onExecutionCancelled?.({ duration: 0 })
|
||||
} else {
|
||||
logger.error('Run-from-block execution error:', error)
|
||||
callbacks.onExecutionError?.({
|
||||
error: error.message || 'Unknown error',
|
||||
duration: 0,
|
||||
})
|
||||
}
|
||||
throw error
|
||||
} finally {
|
||||
abortControllerRef.current = null
|
||||
currentExecutionRef.current = null
|
||||
}
|
||||
}, [])
|
||||
|
||||
const cancel = useCallback(() => {
|
||||
const execution = currentExecutionRef.current
|
||||
if (execution) {
|
||||
@@ -296,7 +239,6 @@ export function useExecutionStream() {
|
||||
|
||||
return {
|
||||
execute,
|
||||
executeFromBlock,
|
||||
cancel,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -21,7 +21,6 @@ export const COPILOT_MODEL_IDS = [
|
||||
'claude-4.5-opus',
|
||||
'claude-4.1-opus',
|
||||
'gemini-3-pro',
|
||||
'auto',
|
||||
] as const
|
||||
|
||||
export type CopilotModelId = (typeof COPILOT_MODEL_IDS)[number]
|
||||
|
||||
@@ -11,10 +11,9 @@ import { extractAndPersistCustomTools } from '@/lib/workflows/persistence/custom
|
||||
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
|
||||
import { isValidKey } from '@/lib/workflows/sanitization/key-validation'
|
||||
import { validateWorkflowState } from '@/lib/workflows/sanitization/validation'
|
||||
import { buildCanonicalIndex, isCanonicalPair } from '@/lib/workflows/subblocks/visibility'
|
||||
import { TriggerUtils } from '@/lib/workflows/triggers/triggers'
|
||||
import { getAllBlocks, getBlock } from '@/blocks/registry'
|
||||
import type { BlockConfig, SubBlockConfig } from '@/blocks/types'
|
||||
import type { SubBlockConfig } from '@/blocks/types'
|
||||
import { EDGE, normalizeName, RESERVED_BLOCK_NAMES } from '@/executor/constants'
|
||||
import { getUserPermissionConfig } from '@/executor/utils/permission-check'
|
||||
import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils'
|
||||
@@ -668,47 +667,11 @@ function createBlockFromParams(
|
||||
}
|
||||
}
|
||||
})
|
||||
|
||||
if (validatedInputs) {
|
||||
updateCanonicalModesForInputs(blockState, Object.keys(validatedInputs), blockConfig)
|
||||
}
|
||||
}
|
||||
|
||||
return blockState
|
||||
}
|
||||
|
||||
function updateCanonicalModesForInputs(
|
||||
block: { data?: { canonicalModes?: Record<string, 'basic' | 'advanced'> } },
|
||||
inputKeys: string[],
|
||||
blockConfig: BlockConfig
|
||||
): void {
|
||||
if (!blockConfig.subBlocks?.length) return
|
||||
|
||||
const canonicalIndex = buildCanonicalIndex(blockConfig.subBlocks)
|
||||
const canonicalModeUpdates: Record<string, 'basic' | 'advanced'> = {}
|
||||
|
||||
for (const inputKey of inputKeys) {
|
||||
const canonicalId = canonicalIndex.canonicalIdBySubBlockId[inputKey]
|
||||
if (!canonicalId) continue
|
||||
|
||||
const group = canonicalIndex.groupsById[canonicalId]
|
||||
if (!group || !isCanonicalPair(group)) continue
|
||||
|
||||
const isAdvanced = group.advancedIds.includes(inputKey)
|
||||
const existingMode = canonicalModeUpdates[canonicalId]
|
||||
|
||||
if (!existingMode || isAdvanced) {
|
||||
canonicalModeUpdates[canonicalId] = isAdvanced ? 'advanced' : 'basic'
|
||||
}
|
||||
}
|
||||
|
||||
if (Object.keys(canonicalModeUpdates).length > 0) {
|
||||
if (!block.data) block.data = {}
|
||||
if (!block.data.canonicalModes) block.data.canonicalModes = {}
|
||||
Object.assign(block.data.canonicalModes, canonicalModeUpdates)
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Normalize tools array by adding back fields that were sanitized for training
|
||||
*/
|
||||
@@ -1691,15 +1654,6 @@ function applyOperationsToWorkflowState(
|
||||
block.data.collection = params.inputs.collection
|
||||
}
|
||||
}
|
||||
|
||||
const editBlockConfig = getBlock(block.type)
|
||||
if (editBlockConfig) {
|
||||
updateCanonicalModesForInputs(
|
||||
block,
|
||||
Object.keys(validationResult.validInputs),
|
||||
editBlockConfig
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Update basic properties
|
||||
@@ -2302,15 +2256,6 @@ function applyOperationsToWorkflowState(
|
||||
existingBlock.subBlocks[key].value = sanitizedValue
|
||||
}
|
||||
})
|
||||
|
||||
const existingBlockConfig = getBlock(existingBlock.type)
|
||||
if (existingBlockConfig) {
|
||||
updateCanonicalModesForInputs(
|
||||
existingBlock,
|
||||
Object.keys(validationResult.validInputs),
|
||||
existingBlockConfig
|
||||
)
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Special container types (loop, parallel) are not in the block registry but are valid
|
||||
|
||||
@@ -132,8 +132,6 @@ async function executeCode(request) {
|
||||
for (const [key, value] of Object.entries(contextVariables)) {
|
||||
if (value === undefined) {
|
||||
await jail.set(key, undefined)
|
||||
} else if (value === null) {
|
||||
await jail.set(key, null)
|
||||
} else {
|
||||
await jail.set(key, new ivm.ExternalCopy(value).copyInto())
|
||||
}
|
||||
|
||||
@@ -8,17 +8,6 @@ const logger = createLogger('EmbeddingUtils')
|
||||
|
||||
const MAX_TOKENS_PER_REQUEST = 8000
|
||||
const MAX_CONCURRENT_BATCHES = env.KB_CONFIG_CONCURRENCY_LIMIT || 50
|
||||
const EMBEDDING_DIMENSIONS = 1536
|
||||
|
||||
/**
|
||||
* Check if the model supports custom dimensions.
|
||||
* text-embedding-3-* models support the dimensions parameter.
|
||||
* Checks for 'embedding-3' to handle Azure deployments with custom naming conventions.
|
||||
*/
|
||||
function supportsCustomDimensions(modelName: string): boolean {
|
||||
const name = modelName.toLowerCase()
|
||||
return name.includes('embedding-3') && !name.includes('ada')
|
||||
}
|
||||
|
||||
export class EmbeddingAPIError extends Error {
|
||||
public status: number
|
||||
@@ -104,19 +93,15 @@ async function getEmbeddingConfig(
|
||||
async function callEmbeddingAPI(inputs: string[], config: EmbeddingConfig): Promise<number[][]> {
|
||||
return retryWithExponentialBackoff(
|
||||
async () => {
|
||||
const useDimensions = supportsCustomDimensions(config.modelName)
|
||||
|
||||
const requestBody = config.useAzure
|
||||
? {
|
||||
input: inputs,
|
||||
encoding_format: 'float',
|
||||
...(useDimensions && { dimensions: EMBEDDING_DIMENSIONS }),
|
||||
}
|
||||
: {
|
||||
input: inputs,
|
||||
model: config.modelName,
|
||||
encoding_format: 'float',
|
||||
...(useDimensions && { dimensions: EMBEDDING_DIMENSIONS }),
|
||||
}
|
||||
|
||||
const response = await fetch(config.apiUrl, {
|
||||
|
||||
@@ -18,52 +18,6 @@ const logger = createLogger('BlobClient')
|
||||
|
||||
let _blobServiceClient: BlobServiceClientInstance | null = null
|
||||
|
||||
interface ParsedCredentials {
|
||||
accountName: string
|
||||
accountKey: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Extract account name and key from an Azure connection string.
|
||||
* Connection strings have the format: DefaultEndpointsProtocol=https;AccountName=...;AccountKey=...;EndpointSuffix=...
|
||||
*/
|
||||
function parseConnectionString(connectionString: string): ParsedCredentials {
|
||||
const accountNameMatch = connectionString.match(/AccountName=([^;]+)/)
|
||||
if (!accountNameMatch) {
|
||||
throw new Error('Cannot extract account name from connection string')
|
||||
}
|
||||
|
||||
const accountKeyMatch = connectionString.match(/AccountKey=([^;]+)/)
|
||||
if (!accountKeyMatch) {
|
||||
throw new Error('Cannot extract account key from connection string')
|
||||
}
|
||||
|
||||
return {
|
||||
accountName: accountNameMatch[1],
|
||||
accountKey: accountKeyMatch[1],
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get account credentials from BLOB_CONFIG, extracting from connection string if necessary.
|
||||
*/
|
||||
function getAccountCredentials(): ParsedCredentials {
|
||||
if (BLOB_CONFIG.connectionString) {
|
||||
return parseConnectionString(BLOB_CONFIG.connectionString)
|
||||
}
|
||||
|
||||
if (BLOB_CONFIG.accountName && BLOB_CONFIG.accountKey) {
|
||||
return {
|
||||
accountName: BLOB_CONFIG.accountName,
|
||||
accountKey: BLOB_CONFIG.accountKey,
|
||||
}
|
||||
}
|
||||
|
||||
throw new Error(
|
||||
'Azure Blob Storage credentials are missing – set AZURE_CONNECTION_STRING or both AZURE_ACCOUNT_NAME and AZURE_ACCOUNT_KEY'
|
||||
)
|
||||
}
|
||||
|
||||
export async function getBlobServiceClient(): Promise<BlobServiceClientInstance> {
|
||||
if (_blobServiceClient) return _blobServiceClient
|
||||
|
||||
@@ -173,8 +127,6 @@ export async function getPresignedUrl(key: string, expiresIn = 3600) {
|
||||
const containerClient = blobServiceClient.getContainerClient(BLOB_CONFIG.containerName)
|
||||
const blockBlobClient = containerClient.getBlockBlobClient(key)
|
||||
|
||||
const { accountName, accountKey } = getAccountCredentials()
|
||||
|
||||
const sasOptions = {
|
||||
containerName: BLOB_CONFIG.containerName,
|
||||
blobName: key,
|
||||
@@ -185,7 +137,13 @@ export async function getPresignedUrl(key: string, expiresIn = 3600) {
|
||||
|
||||
const sasToken = generateBlobSASQueryParameters(
|
||||
sasOptions,
|
||||
new StorageSharedKeyCredential(accountName, accountKey)
|
||||
new StorageSharedKeyCredential(
|
||||
BLOB_CONFIG.accountName,
|
||||
BLOB_CONFIG.accountKey ??
|
||||
(() => {
|
||||
throw new Error('AZURE_ACCOUNT_KEY is required when using account name authentication')
|
||||
})()
|
||||
)
|
||||
).toString()
|
||||
|
||||
return `${blockBlobClient.url}?${sasToken}`
|
||||
@@ -210,14 +168,9 @@ export async function getPresignedUrlWithConfig(
|
||||
StorageSharedKeyCredential,
|
||||
} = await import('@azure/storage-blob')
|
||||
let tempBlobServiceClient: BlobServiceClientInstance
|
||||
let accountName: string
|
||||
let accountKey: string
|
||||
|
||||
if (customConfig.connectionString) {
|
||||
tempBlobServiceClient = BlobServiceClient.fromConnectionString(customConfig.connectionString)
|
||||
const credentials = parseConnectionString(customConfig.connectionString)
|
||||
accountName = credentials.accountName
|
||||
accountKey = credentials.accountKey
|
||||
} else if (customConfig.accountName && customConfig.accountKey) {
|
||||
const sharedKeyCredential = new StorageSharedKeyCredential(
|
||||
customConfig.accountName,
|
||||
@@ -227,8 +180,6 @@ export async function getPresignedUrlWithConfig(
|
||||
`https://${customConfig.accountName}.blob.core.windows.net`,
|
||||
sharedKeyCredential
|
||||
)
|
||||
accountName = customConfig.accountName
|
||||
accountKey = customConfig.accountKey
|
||||
} else {
|
||||
throw new Error(
|
||||
'Custom blob config must include either connectionString or accountName + accountKey'
|
||||
@@ -248,7 +199,13 @@ export async function getPresignedUrlWithConfig(
|
||||
|
||||
const sasToken = generateBlobSASQueryParameters(
|
||||
sasOptions,
|
||||
new StorageSharedKeyCredential(accountName, accountKey)
|
||||
new StorageSharedKeyCredential(
|
||||
customConfig.accountName,
|
||||
customConfig.accountKey ??
|
||||
(() => {
|
||||
throw new Error('Account key is required when using account name authentication')
|
||||
})()
|
||||
)
|
||||
).toString()
|
||||
|
||||
return `${blockBlobClient.url}?${sasToken}`
|
||||
@@ -446,9 +403,13 @@ export async function getMultipartPartUrls(
|
||||
if (customConfig) {
|
||||
if (customConfig.connectionString) {
|
||||
blobServiceClient = BlobServiceClient.fromConnectionString(customConfig.connectionString)
|
||||
const credentials = parseConnectionString(customConfig.connectionString)
|
||||
accountName = credentials.accountName
|
||||
accountKey = credentials.accountKey
|
||||
const match = customConfig.connectionString.match(/AccountName=([^;]+)/)
|
||||
if (!match) throw new Error('Cannot extract account name from connection string')
|
||||
accountName = match[1]
|
||||
|
||||
const keyMatch = customConfig.connectionString.match(/AccountKey=([^;]+)/)
|
||||
if (!keyMatch) throw new Error('Cannot extract account key from connection string')
|
||||
accountKey = keyMatch[1]
|
||||
} else if (customConfig.accountName && customConfig.accountKey) {
|
||||
const credential = new StorageSharedKeyCredential(
|
||||
customConfig.accountName,
|
||||
@@ -467,9 +428,12 @@ export async function getMultipartPartUrls(
|
||||
} else {
|
||||
blobServiceClient = await getBlobServiceClient()
|
||||
containerName = BLOB_CONFIG.containerName
|
||||
const credentials = getAccountCredentials()
|
||||
accountName = credentials.accountName
|
||||
accountKey = credentials.accountKey
|
||||
accountName = BLOB_CONFIG.accountName
|
||||
accountKey =
|
||||
BLOB_CONFIG.accountKey ||
|
||||
(() => {
|
||||
throw new Error('AZURE_ACCOUNT_KEY is required')
|
||||
})()
|
||||
}
|
||||
|
||||
const containerClient = blobServiceClient.getContainerClient(containerName)
|
||||
@@ -537,10 +501,12 @@ export async function completeMultipartUpload(
|
||||
const containerClient = blobServiceClient.getContainerClient(containerName)
|
||||
const blockBlobClient = containerClient.getBlockBlobClient(key)
|
||||
|
||||
// Sort parts by part number and extract block IDs
|
||||
const sortedBlockIds = parts
|
||||
.sort((a, b) => a.partNumber - b.partNumber)
|
||||
.map((part) => part.blockId)
|
||||
|
||||
// Commit the block list to create the final blob
|
||||
await blockBlobClient.commitBlockList(sortedBlockIds, {
|
||||
metadata: {
|
||||
multipartUpload: 'completed',
|
||||
@@ -591,8 +557,10 @@ export async function abortMultipartUpload(key: string, customConfig?: BlobConfi
|
||||
const blockBlobClient = containerClient.getBlockBlobClient(key)
|
||||
|
||||
try {
|
||||
// Delete the blob if it exists (this also cleans up any uncommitted blocks)
|
||||
await blockBlobClient.deleteIfExists()
|
||||
} catch (error) {
|
||||
// Ignore errors since we're just cleaning up
|
||||
logger.warn('Error cleaning up multipart upload:', error)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -618,6 +618,13 @@ export function getToolOutputs(
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Generates output paths for a tool-based block.
|
||||
*
|
||||
* @param blockConfig - The block configuration containing tools config
|
||||
* @param subBlocks - SubBlock values for tool selection and condition evaluation
|
||||
* @returns Array of output paths for the tool, or empty array on error
|
||||
*/
|
||||
export function getToolOutputPaths(
|
||||
blockConfig: BlockConfig,
|
||||
subBlocks?: Record<string, SubBlockWithValue>
|
||||
@@ -627,22 +634,12 @@ export function getToolOutputPaths(
|
||||
if (!outputs || Object.keys(outputs).length === 0) return []
|
||||
|
||||
if (subBlocks && blockConfig.outputs) {
|
||||
const filteredBlockOutputs = filterOutputsByCondition(blockConfig.outputs, subBlocks)
|
||||
const allowedKeys = new Set(Object.keys(filteredBlockOutputs))
|
||||
|
||||
const filteredOutputs: Record<string, any> = {}
|
||||
|
||||
for (const [key, value] of Object.entries(outputs)) {
|
||||
const blockOutput = blockConfig.outputs[key]
|
||||
|
||||
if (!blockOutput || typeof blockOutput !== 'object') {
|
||||
filteredOutputs[key] = value
|
||||
continue
|
||||
}
|
||||
|
||||
const condition = 'condition' in blockOutput ? blockOutput.condition : undefined
|
||||
if (condition) {
|
||||
if (evaluateOutputCondition(condition, subBlocks)) {
|
||||
filteredOutputs[key] = value
|
||||
}
|
||||
} else {
|
||||
if (allowedKeys.has(key)) {
|
||||
filteredOutputs[key] = value
|
||||
}
|
||||
}
|
||||
|
||||
@@ -26,7 +26,7 @@ describe('VariableManager', () => {
|
||||
it.concurrent('should handle boolean type variables', () => {
|
||||
expect(VariableManager.parseInputForStorage('true', 'boolean')).toBe(true)
|
||||
expect(VariableManager.parseInputForStorage('false', 'boolean')).toBe(false)
|
||||
expect(VariableManager.parseInputForStorage('1', 'boolean')).toBe(false)
|
||||
expect(VariableManager.parseInputForStorage('1', 'boolean')).toBe(true)
|
||||
expect(VariableManager.parseInputForStorage('0', 'boolean')).toBe(false)
|
||||
expect(VariableManager.parseInputForStorage('"true"', 'boolean')).toBe(true)
|
||||
expect(VariableManager.parseInputForStorage("'false'", 'boolean')).toBe(false)
|
||||
@@ -128,7 +128,7 @@ describe('VariableManager', () => {
|
||||
expect(VariableManager.resolveForExecution(false, 'boolean')).toBe(false)
|
||||
expect(VariableManager.resolveForExecution('true', 'boolean')).toBe(true)
|
||||
expect(VariableManager.resolveForExecution('false', 'boolean')).toBe(false)
|
||||
expect(VariableManager.resolveForExecution('1', 'boolean')).toBe(false)
|
||||
expect(VariableManager.resolveForExecution('1', 'boolean')).toBe(true)
|
||||
expect(VariableManager.resolveForExecution('0', 'boolean')).toBe(false)
|
||||
})
|
||||
|
||||
|
||||
@@ -61,7 +61,7 @@ export class VariableManager {
|
||||
// Special case for 'anything else' in the test
|
||||
if (unquoted === 'anything else') return true
|
||||
const normalized = String(unquoted).toLowerCase().trim()
|
||||
return normalized === 'true'
|
||||
return normalized === 'true' || normalized === '1'
|
||||
}
|
||||
|
||||
case 'object':
|
||||
|
||||
@@ -24,7 +24,7 @@
|
||||
},
|
||||
"dependencies": {
|
||||
"@a2a-js/sdk": "0.3.7",
|
||||
"@anthropic-ai/sdk": "0.71.2",
|
||||
"@anthropic-ai/sdk": "^0.39.0",
|
||||
"@aws-sdk/client-bedrock-runtime": "3.940.0",
|
||||
"@aws-sdk/client-dynamodb": "3.940.0",
|
||||
"@aws-sdk/client-rds-data": "3.940.0",
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import Anthropic from '@anthropic-ai/sdk'
|
||||
import { transformJSONSchema } from '@anthropic-ai/sdk/lib/transform-json-schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import type { StreamingExecution } from '@/executor/types'
|
||||
import { MAX_TOOL_ITERATIONS } from '@/providers'
|
||||
@@ -186,10 +185,13 @@ export const anthropicProvider: ProviderConfig = {
|
||||
const schema = request.responseFormat.schema || request.responseFormat
|
||||
|
||||
if (useNativeStructuredOutputs) {
|
||||
const transformedSchema = transformJSONSchema(schema)
|
||||
const schemaWithConstraints = {
|
||||
...schema,
|
||||
additionalProperties: false,
|
||||
}
|
||||
payload.output_format = {
|
||||
type: 'json_schema',
|
||||
schema: transformedSchema,
|
||||
schema: schemaWithConstraints,
|
||||
}
|
||||
logger.info(`Using native structured outputs for model: ${modelId}`)
|
||||
} else {
|
||||
|
||||
@@ -35,23 +35,4 @@ export const useExecutionStore = create<ExecutionState & ExecutionActions>()((se
|
||||
},
|
||||
clearRunPath: () => set({ lastRunPath: new Map(), lastRunEdges: new Map() }),
|
||||
reset: () => set(initialState),
|
||||
|
||||
setLastExecutionSnapshot: (workflowId, snapshot) => {
|
||||
const { lastExecutionSnapshots } = get()
|
||||
const newSnapshots = new Map(lastExecutionSnapshots)
|
||||
newSnapshots.set(workflowId, snapshot)
|
||||
set({ lastExecutionSnapshots: newSnapshots })
|
||||
},
|
||||
|
||||
getLastExecutionSnapshot: (workflowId) => {
|
||||
const { lastExecutionSnapshots } = get()
|
||||
return lastExecutionSnapshots.get(workflowId)
|
||||
},
|
||||
|
||||
clearLastExecutionSnapshot: (workflowId) => {
|
||||
const { lastExecutionSnapshots } = get()
|
||||
const newSnapshots = new Map(lastExecutionSnapshots)
|
||||
newSnapshots.delete(workflowId)
|
||||
set({ lastExecutionSnapshots: newSnapshots })
|
||||
},
|
||||
}))
|
||||
|
||||
@@ -1,5 +1,4 @@
|
||||
import type { Executor } from '@/executor'
|
||||
import type { SerializableExecutionState } from '@/executor/execution/types'
|
||||
import type { ExecutionContext } from '@/executor/types'
|
||||
|
||||
/**
|
||||
@@ -29,11 +28,6 @@ export interface ExecutionState {
|
||||
* Cleared when a new run starts. Used to show run path indicators on edges.
|
||||
*/
|
||||
lastRunEdges: Map<string, EdgeRunStatus>
|
||||
/**
|
||||
* Stores the last successful execution snapshot per workflow.
|
||||
* Used for run-from-block functionality.
|
||||
*/
|
||||
lastExecutionSnapshots: Map<string, SerializableExecutionState>
|
||||
}
|
||||
|
||||
export interface ExecutionActions {
|
||||
@@ -47,18 +41,6 @@ export interface ExecutionActions {
|
||||
setEdgeRunStatus: (edgeId: string, status: EdgeRunStatus) => void
|
||||
clearRunPath: () => void
|
||||
reset: () => void
|
||||
/**
|
||||
* Store the execution snapshot for a workflow after successful execution.
|
||||
*/
|
||||
setLastExecutionSnapshot: (workflowId: string, snapshot: SerializableExecutionState) => void
|
||||
/**
|
||||
* Get the last execution snapshot for a workflow.
|
||||
*/
|
||||
getLastExecutionSnapshot: (workflowId: string) => SerializableExecutionState | undefined
|
||||
/**
|
||||
* Clear the execution snapshot for a workflow.
|
||||
*/
|
||||
clearLastExecutionSnapshot: (workflowId: string) => void
|
||||
}
|
||||
|
||||
export const initialState: ExecutionState = {
|
||||
@@ -70,5 +52,4 @@ export const initialState: ExecutionState = {
|
||||
debugContext: null,
|
||||
lastRunPath: new Map(),
|
||||
lastRunEdges: new Map(),
|
||||
lastExecutionSnapshots: new Map(),
|
||||
}
|
||||
|
||||
@@ -27,9 +27,6 @@ export function registerEmitFunctions(
|
||||
emitSubblockUpdate = subblockEmit
|
||||
emitVariableUpdate = variableEmit
|
||||
currentRegisteredWorkflowId = workflowId
|
||||
if (workflowId) {
|
||||
useOperationQueueStore.getState().processNextOperation()
|
||||
}
|
||||
}
|
||||
|
||||
let currentRegisteredWorkflowId: string | null = null
|
||||
@@ -265,14 +262,16 @@ export const useOperationQueueStore = create<OperationQueueState>((set, get) =>
|
||||
return
|
||||
}
|
||||
|
||||
if (!currentRegisteredWorkflowId) {
|
||||
const nextOperation = currentRegisteredWorkflowId
|
||||
? state.operations.find(
|
||||
(op) => op.status === 'pending' && op.workflowId === currentRegisteredWorkflowId
|
||||
)
|
||||
: state.operations.find((op) => op.status === 'pending')
|
||||
if (!nextOperation) {
|
||||
return
|
||||
}
|
||||
|
||||
const nextOperation = state.operations.find(
|
||||
(op) => op.status === 'pending' && op.workflowId === currentRegisteredWorkflowId
|
||||
)
|
||||
if (!nextOperation) {
|
||||
if (currentRegisteredWorkflowId && nextOperation.workflowId !== currentRegisteredWorkflowId) {
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -38,12 +38,11 @@ export const storageUploadTool: ToolConfig<
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Optional folder path (e.g., "folder/subfolder/")',
|
||||
},
|
||||
fileData: {
|
||||
type: 'json',
|
||||
fileContent: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description:
|
||||
'File to upload - UserFile object (basic mode) or string content (advanced mode: base64 or plain text). Supports data URLs.',
|
||||
description: 'The file content (base64 encoded for binary files, or plain text)',
|
||||
},
|
||||
contentType: {
|
||||
type: 'string',
|
||||
@@ -66,28 +65,65 @@ export const storageUploadTool: ToolConfig<
|
||||
},
|
||||
|
||||
request: {
|
||||
url: '/api/tools/supabase/storage-upload',
|
||||
url: (params) => {
|
||||
// Combine folder path and fileName, ensuring proper formatting
|
||||
let fullPath = params.fileName
|
||||
if (params.path) {
|
||||
// Ensure path ends with / and doesn't have double slashes
|
||||
const folderPath = params.path.endsWith('/') ? params.path : `${params.path}/`
|
||||
fullPath = `${folderPath}${params.fileName}`
|
||||
}
|
||||
return `https://${params.projectId}.supabase.co/storage/v1/object/${params.bucket}/${fullPath}`
|
||||
},
|
||||
method: 'POST',
|
||||
headers: () => ({
|
||||
'Content-Type': 'application/json',
|
||||
}),
|
||||
body: (params) => ({
|
||||
projectId: params.projectId,
|
||||
apiKey: params.apiKey,
|
||||
bucket: params.bucket,
|
||||
fileName: params.fileName,
|
||||
path: params.path,
|
||||
fileData: params.fileData,
|
||||
contentType: params.contentType,
|
||||
upsert: params.upsert,
|
||||
}),
|
||||
headers: (params) => {
|
||||
const headers: Record<string, string> = {
|
||||
apikey: params.apiKey,
|
||||
Authorization: `Bearer ${params.apiKey}`,
|
||||
}
|
||||
|
||||
if (params.contentType) {
|
||||
headers['Content-Type'] = params.contentType
|
||||
}
|
||||
|
||||
if (params.upsert) {
|
||||
headers['x-upsert'] = 'true'
|
||||
}
|
||||
|
||||
return headers
|
||||
},
|
||||
body: (params) => {
|
||||
// Return the file content wrapped in an object
|
||||
// The actual upload will need to handle this appropriately
|
||||
return {
|
||||
content: params.fileContent,
|
||||
}
|
||||
},
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
let data
|
||||
try {
|
||||
data = await response.json()
|
||||
} catch (parseError) {
|
||||
throw new Error(`Failed to parse Supabase storage upload response: ${parseError}`)
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
message: 'Successfully uploaded file to storage',
|
||||
results: data,
|
||||
},
|
||||
error: undefined,
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
message: { type: 'string', description: 'Operation status message' },
|
||||
results: {
|
||||
type: 'object',
|
||||
description: 'Upload result including file path, bucket, and public URL',
|
||||
description: 'Upload result including file path and metadata',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -136,7 +136,7 @@ export interface SupabaseStorageUploadParams {
|
||||
bucket: string
|
||||
fileName: string
|
||||
path?: string
|
||||
fileData: any // UserFile object (basic mode) or string (advanced mode: base64/plain text)
|
||||
fileContent: string
|
||||
contentType?: string
|
||||
upsert?: boolean
|
||||
}
|
||||
|
||||
37
bun.lock
@@ -1,5 +1,6 @@
|
||||
{
|
||||
"lockfileVersion": 1,
|
||||
"configVersion": 0,
|
||||
"workspaces": {
|
||||
"": {
|
||||
"name": "simstudio",
|
||||
@@ -54,7 +55,7 @@
|
||||
"version": "0.1.0",
|
||||
"dependencies": {
|
||||
"@a2a-js/sdk": "0.3.7",
|
||||
"@anthropic-ai/sdk": "0.71.2",
|
||||
"@anthropic-ai/sdk": "^0.39.0",
|
||||
"@aws-sdk/client-bedrock-runtime": "3.940.0",
|
||||
"@aws-sdk/client-dynamodb": "3.940.0",
|
||||
"@aws-sdk/client-rds-data": "3.940.0",
|
||||
@@ -362,7 +363,7 @@
|
||||
|
||||
"@ampproject/remapping": ["@ampproject/remapping@2.3.0", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw=="],
|
||||
|
||||
"@anthropic-ai/sdk": ["@anthropic-ai/sdk@0.71.2", "", { "dependencies": { "json-schema-to-ts": "^3.1.1" }, "peerDependencies": { "zod": "^3.25.0 || ^4.0.0" }, "optionalPeers": ["zod"], "bin": { "anthropic-ai-sdk": "bin/cli" } }, "sha512-TGNDEUuEstk/DKu0/TflXAEt+p+p/WhTlFzEnoosvbaDU2LTjm42igSdlL0VijrKpWejtOKxX0b8A7uc+XiSAQ=="],
|
||||
"@anthropic-ai/sdk": ["@anthropic-ai/sdk@0.39.0", "", { "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", "node-fetch": "^2.6.7" } }, "sha512-eMyDIPRZbt1CCLErRCi3exlAvNkBtRe+kW5vvJyef93PmNr/clstYgHhtvmkxN82nlKgzyGPCyGxrm0JQ1ZIdg=="],
|
||||
|
||||
"@ark/schema": ["@ark/schema@0.56.0", "", { "dependencies": { "@ark/util": "0.56.0" } }, "sha512-ECg3hox/6Z/nLajxXqNhgPtNdHWC9zNsDyskwO28WinoFEnWow4IsERNz9AnXRhTZJnYIlAJ4uGn3nlLk65vZA=="],
|
||||
|
||||
@@ -546,8 +547,6 @@
|
||||
|
||||
"@babel/plugin-transform-react-jsx-source": ["@babel/plugin-transform-react-jsx-source@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw=="],
|
||||
|
||||
"@babel/runtime": ["@babel/runtime@7.28.6", "", {}, "sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA=="],
|
||||
|
||||
"@babel/template": ["@babel/template@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/parser": "^7.28.6", "@babel/types": "^7.28.6" } }, "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ=="],
|
||||
|
||||
"@babel/traverse": ["@babel/traverse@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/generator": "^7.28.6", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.28.6", "@babel/template": "^7.28.6", "@babel/types": "^7.28.6", "debug": "^4.3.1" } }, "sha512-fgWX62k02qtjqdSNTAGxmKYY/7FSL9WAS1o2Hu5+I5m9T0yxZzr4cnrfXQ/MX0rIifthCSs6FKTlzYbJcPtMNg=="],
|
||||
@@ -2444,8 +2443,6 @@
|
||||
|
||||
"json-schema": ["json-schema@0.4.0", "", {}, "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA=="],
|
||||
|
||||
"json-schema-to-ts": ["json-schema-to-ts@3.1.1", "", { "dependencies": { "@babel/runtime": "^7.18.3", "ts-algebra": "^2.0.0" } }, "sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g=="],
|
||||
|
||||
"json-schema-traverse": ["json-schema-traverse@0.4.1", "", {}, "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="],
|
||||
|
||||
"json5": ["json5@2.2.3", "", { "bin": { "json5": "lib/cli.js" } }, "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="],
|
||||
@@ -3390,8 +3387,6 @@
|
||||
|
||||
"trough": ["trough@2.2.0", "", {}, "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw=="],
|
||||
|
||||
"ts-algebra": ["ts-algebra@2.0.0", "", {}, "sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw=="],
|
||||
|
||||
"ts-interface-checker": ["ts-interface-checker@0.1.13", "", {}, "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA=="],
|
||||
|
||||
"tsafe": ["tsafe@1.8.12", "", {}, "sha512-nFRqW0ttu/2o6XTXsHiVZWJBCOaxhVqZLg7dgs3coZNsCMPXPfwz+zPHAQA+70fNnVJLAPg1EgGIqK9Q84tvAw=="],
|
||||
@@ -3598,6 +3593,10 @@
|
||||
|
||||
"zwitch": ["zwitch@2.0.4", "", {}, "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A=="],
|
||||
|
||||
"@anthropic-ai/sdk/@types/node": ["@types/node@18.19.130", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg=="],
|
||||
|
||||
"@anthropic-ai/sdk/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
|
||||
|
||||
"@asamuzakjp/css-color/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="],
|
||||
|
||||
"@aws-crypto/crc32/@aws-sdk/types": ["@aws-sdk/types@3.969.0", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-7IIzM5TdiXn+VtgPdVLjmE6uUBUtnga0f4RiSEI1WW10RPuNvZ9U+pL3SwDiRDAdoGrOF9tSLJOFZmfuwYuVYQ=="],
|
||||
@@ -3714,8 +3713,6 @@
|
||||
|
||||
"@browserbasehq/sdk/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
|
||||
|
||||
"@browserbasehq/stagehand/@anthropic-ai/sdk": ["@anthropic-ai/sdk@0.39.0", "", { "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", "node-fetch": "^2.6.7" } }, "sha512-eMyDIPRZbt1CCLErRCi3exlAvNkBtRe+kW5vvJyef93PmNr/clstYgHhtvmkxN82nlKgzyGPCyGxrm0JQ1ZIdg=="],
|
||||
|
||||
"@cerebras/cerebras_cloud_sdk/@types/node": ["@types/node@18.19.130", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg=="],
|
||||
|
||||
"@cerebras/cerebras_cloud_sdk/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
|
||||
@@ -4218,6 +4215,10 @@
|
||||
|
||||
"xml2js/xmlbuilder": ["xmlbuilder@11.0.1", "", {}, "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA=="],
|
||||
|
||||
"@anthropic-ai/sdk/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="],
|
||||
|
||||
"@anthropic-ai/sdk/node-fetch/whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
|
||||
|
||||
"@aws-crypto/sha1-browser/@smithy/util-utf8/@smithy/util-buffer-from": ["@smithy/util-buffer-from@2.2.0", "", { "dependencies": { "@smithy/is-array-buffer": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA=="],
|
||||
|
||||
"@aws-crypto/sha256-browser/@smithy/util-utf8/@smithy/util-buffer-from": ["@smithy/util-buffer-from@2.2.0", "", { "dependencies": { "@smithy/is-array-buffer": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA=="],
|
||||
@@ -4274,10 +4275,6 @@
|
||||
|
||||
"@browserbasehq/sdk/node-fetch/whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
|
||||
|
||||
"@browserbasehq/stagehand/@anthropic-ai/sdk/@types/node": ["@types/node@18.19.130", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg=="],
|
||||
|
||||
"@browserbasehq/stagehand/@anthropic-ai/sdk/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
|
||||
|
||||
"@cerebras/cerebras_cloud_sdk/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="],
|
||||
|
||||
"@cerebras/cerebras_cloud_sdk/node-fetch/whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
|
||||
@@ -4688,6 +4685,10 @@
|
||||
|
||||
"vite/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.2", "", { "os": "win32", "cpu": "x64" }, "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ=="],
|
||||
|
||||
"@anthropic-ai/sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="],
|
||||
|
||||
"@anthropic-ai/sdk/node-fetch/whatwg-url/webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
|
||||
|
||||
"@aws-crypto/sha1-browser/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="],
|
||||
|
||||
"@aws-crypto/sha256-browser/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="],
|
||||
@@ -4736,10 +4737,6 @@
|
||||
|
||||
"@browserbasehq/sdk/node-fetch/whatwg-url/webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
|
||||
|
||||
"@browserbasehq/stagehand/@anthropic-ai/sdk/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="],
|
||||
|
||||
"@browserbasehq/stagehand/@anthropic-ai/sdk/node-fetch/whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
|
||||
|
||||
"@cerebras/cerebras_cloud_sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="],
|
||||
|
||||
"@cerebras/cerebras_cloud_sdk/node-fetch/whatwg-url/webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
|
||||
@@ -4832,10 +4829,6 @@
|
||||
|
||||
"@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.947.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.947.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-user-agent": "3.947.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.947.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.7", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.14", "@smithy/middleware-retry": "^4.4.14", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.13", "@smithy/util-defaults-mode-node": "^4.2.16", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-DjRJEYNnHUTu9kGPPQDTSXquwSEd6myKR4ssI4FaYLFhdT3ldWpj73yYt807H3tdmhS7vPmdVqchSJnjurUQAw=="],
|
||||
|
||||
"@browserbasehq/stagehand/@anthropic-ai/sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="],
|
||||
|
||||
"@browserbasehq/stagehand/@anthropic-ai/sdk/node-fetch/whatwg-url/webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
|
||||
|
||||
"@trigger.dev/core/socket.io/engine.io/@types/node/undici-types": ["undici-types@7.10.0", "", {}, "sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag=="],
|
||||
|
||||
"lint-staged/listr2/cli-truncate/string-width/strip-ansi": ["strip-ansi@7.1.2", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA=="],
|
||||
|
||||