mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-26 07:18:38 -05:00
Compare commits
36 Commits
fix/hackat
...
v0.5.69
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
2b74a2626a | ||
|
|
e9c4251c1c | ||
|
|
cc2be33d6b | ||
|
|
45371e521e | ||
|
|
0ce0f98aa5 | ||
|
|
dff1c9d083 | ||
|
|
b09f683072 | ||
|
|
a8bb0db660 | ||
|
|
af82820a28 | ||
|
|
4372841797 | ||
|
|
5e8c843241 | ||
|
|
7bf3d73ee6 | ||
|
|
7ffc11a738 | ||
|
|
be578e2ed7 | ||
|
|
f415e5edc4 | ||
|
|
13a6e6c3fa | ||
|
|
f5ab7f21ae | ||
|
|
bfb6fffe38 | ||
|
|
4fbec0a43f | ||
|
|
585f5e365b | ||
|
|
3792bdd252 | ||
|
|
eb5d1f3e5b | ||
|
|
54ab82c8dd | ||
|
|
f895bf469b | ||
|
|
dd3209af06 | ||
|
|
b6ba3b50a7 | ||
|
|
b304233062 | ||
|
|
57e4b49bd6 | ||
|
|
e12dd204ed | ||
|
|
3d9d9cbc54 | ||
|
|
0f4ec962ad | ||
|
|
4827866f9a | ||
|
|
3e697d9ed9 | ||
|
|
4431a1a484 | ||
|
|
4d1a9a3f22 | ||
|
|
eb07a080fb |
5
.github/workflows/ci.yml
vendored
5
.github/workflows/ci.yml
vendored
@@ -27,11 +27,10 @@ jobs:
|
|||||||
steps:
|
steps:
|
||||||
- name: Extract version from commit message
|
- name: Extract version from commit message
|
||||||
id: extract
|
id: extract
|
||||||
env:
|
|
||||||
COMMIT_MSG: ${{ github.event.head_commit.message }}
|
|
||||||
run: |
|
run: |
|
||||||
|
COMMIT_MSG="${{ github.event.head_commit.message }}"
|
||||||
# Only tag versions on main branch
|
# Only tag versions on main branch
|
||||||
if [ "$GITHUB_REF" = "refs/heads/main" ] && [[ "$COMMIT_MSG" =~ ^(v[0-9]+\.[0-9]+\.[0-9]+): ]]; then
|
if [ "${{ github.ref }}" = "refs/heads/main" ] && [[ "$COMMIT_MSG" =~ ^(v[0-9]+\.[0-9]+\.[0-9]+): ]]; then
|
||||||
VERSION="${BASH_REMATCH[1]}"
|
VERSION="${BASH_REMATCH[1]}"
|
||||||
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
echo "version=${VERSION}" >> $GITHUB_OUTPUT
|
||||||
echo "is_release=true" >> $GITHUB_OUTPUT
|
echo "is_release=true" >> $GITHUB_OUTPUT
|
||||||
|
|||||||
@@ -119,19 +119,6 @@ aside#nd-sidebar {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
/* Hide TOC popover on tablet/medium screens (768px - 1279px) */
|
|
||||||
/* Keeps it visible on mobile (<768px) for easy navigation */
|
|
||||||
/* Desktop (>=1280px) already hides it via fumadocs xl:hidden */
|
|
||||||
@media (min-width: 768px) and (max-width: 1279px) {
|
|
||||||
#nd-docs-layout {
|
|
||||||
--fd-toc-popover-height: 0px !important;
|
|
||||||
}
|
|
||||||
|
|
||||||
[data-toc-popover] {
|
|
||||||
display: none !important;
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Desktop only: Apply custom navbar offset, sidebar width and margin offsets */
|
/* Desktop only: Apply custom navbar offset, sidebar width and margin offsets */
|
||||||
/* On mobile, let fumadocs handle the layout natively */
|
/* On mobile, let fumadocs handle the layout natively */
|
||||||
@media (min-width: 1024px) {
|
@media (min-width: 1024px) {
|
||||||
|
|||||||
@@ -5,24 +5,44 @@ title: Copilot
|
|||||||
import { Callout } from 'fumadocs-ui/components/callout'
|
import { Callout } from 'fumadocs-ui/components/callout'
|
||||||
import { Card, Cards } from 'fumadocs-ui/components/card'
|
import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||||
import { Image } from '@/components/ui/image'
|
import { Image } from '@/components/ui/image'
|
||||||
import { MessageCircle, Hammer, Zap, Globe, Paperclip, History, RotateCcw, Brain } from 'lucide-react'
|
import { MessageCircle, Package, Zap, Infinity as InfinityIcon, Brain, BrainCircuit } from 'lucide-react'
|
||||||
|
|
||||||
Copilot is your in-editor assistant that helps you build and edit workflows. It can:
|
Copilot is your in-editor assistant that helps you build and edit workflows with Sim Copilot, as well as understand and improve them. It can:
|
||||||
|
|
||||||
- **Explain**: Answer questions about Sim and your current workflow
|
- **Explain**: Answer questions about Sim and your current workflow
|
||||||
- **Guide**: Suggest edits and best practices
|
- **Guide**: Suggest edits and best practices
|
||||||
- **Build**: Add blocks, wire connections, and configure settings
|
- **Edit**: Make changes to blocks, connections, and settings when you approve
|
||||||
- **Debug**: Analyze execution issues and optimize performance
|
|
||||||
|
|
||||||
<Callout type="info">
|
<Callout type="info">
|
||||||
Copilot is a Sim-managed service. For self-hosted deployments:
|
Copilot is a Sim-managed service. For self-hosted deployments, generate a Copilot API key in the hosted app (sim.ai → Settings → Copilot)
|
||||||
1. Go to [sim.ai](https://sim.ai) → Settings → Copilot and generate a Copilot API key
|
1. Go to [sim.ai](https://sim.ai) → Settings → Copilot and generate a Copilot API key
|
||||||
2. Set `COPILOT_API_KEY` in your self-hosted environment
|
2. Set `COPILOT_API_KEY` in your self-hosted environment to that value
|
||||||
</Callout>
|
</Callout>
|
||||||
|
|
||||||
## Modes
|
## Context Menu (@)
|
||||||
|
|
||||||
Switch between modes using the mode selector at the bottom of the input area.
|
Use the `@` symbol to reference various resources and give Copilot more context about your workspace:
|
||||||
|
|
||||||
|
<Image
|
||||||
|
src="/static/copilot/copilot-menu.png"
|
||||||
|
alt="Copilot context menu showing available reference options"
|
||||||
|
width={600}
|
||||||
|
height={400}
|
||||||
|
/>
|
||||||
|
|
||||||
|
The `@` menu provides access to:
|
||||||
|
- **Chats**: Reference previous copilot conversations
|
||||||
|
- **All workflows**: Reference any workflow in your workspace
|
||||||
|
- **Workflow Blocks**: Reference specific blocks from workflows
|
||||||
|
- **Blocks**: Reference block types and templates
|
||||||
|
- **Knowledge**: Reference your uploaded documents and knowledgebase
|
||||||
|
- **Docs**: Reference Sim documentation
|
||||||
|
- **Templates**: Reference workflow templates
|
||||||
|
- **Logs**: Reference execution logs and results
|
||||||
|
|
||||||
|
This contextual information helps Copilot provide more accurate and relevant assistance for your specific use case.
|
||||||
|
|
||||||
|
## Modes
|
||||||
|
|
||||||
<Cards>
|
<Cards>
|
||||||
<Card
|
<Card
|
||||||
@@ -40,153 +60,113 @@ Switch between modes using the mode selector at the bottom of the input area.
|
|||||||
<Card
|
<Card
|
||||||
title={
|
title={
|
||||||
<span className="inline-flex items-center gap-2">
|
<span className="inline-flex items-center gap-2">
|
||||||
<Hammer className="h-4 w-4 text-muted-foreground" />
|
<Package className="h-4 w-4 text-muted-foreground" />
|
||||||
Build
|
Agent
|
||||||
</span>
|
</span>
|
||||||
}
|
}
|
||||||
>
|
>
|
||||||
<div className="m-0 text-sm">
|
<div className="m-0 text-sm">
|
||||||
Workflow building mode. Copilot can add blocks, wire connections, edit configurations, and debug issues.
|
Build-and-edit mode. Copilot proposes specific edits (add blocks, wire variables, tweak settings) and applies them when you approve.
|
||||||
</div>
|
</div>
|
||||||
</Card>
|
</Card>
|
||||||
</Cards>
|
</Cards>
|
||||||
|
|
||||||
## Models
|
<div className="flex justify-center">
|
||||||
|
<Image
|
||||||
|
src="/static/copilot/copilot-mode.png"
|
||||||
|
alt="Copilot mode selection interface"
|
||||||
|
width={600}
|
||||||
|
height={400}
|
||||||
|
className="my-6"
|
||||||
|
/>
|
||||||
|
</div>
|
||||||
|
|
||||||
Select your preferred AI model using the model selector at the bottom right of the input area.
|
## Depth Levels
|
||||||
|
|
||||||
**Available Models:**
|
<Cards>
|
||||||
- Claude 4.5 Opus, Sonnet (default), Haiku
|
<Card
|
||||||
- GPT 5.2 Codex, Pro
|
title={
|
||||||
- Gemini 3 Pro
|
<span className="inline-flex items-center gap-2">
|
||||||
|
<Zap className="h-4 w-4 text-muted-foreground" />
|
||||||
|
Fast
|
||||||
|
</span>
|
||||||
|
}
|
||||||
|
>
|
||||||
|
<div className="m-0 text-sm">Quickest and cheapest. Best for small edits, simple workflows, and minor tweaks.</div>
|
||||||
|
</Card>
|
||||||
|
<Card
|
||||||
|
title={
|
||||||
|
<span className="inline-flex items-center gap-2">
|
||||||
|
<InfinityIcon className="h-4 w-4 text-muted-foreground" />
|
||||||
|
Auto
|
||||||
|
</span>
|
||||||
|
}
|
||||||
|
>
|
||||||
|
<div className="m-0 text-sm">Balanced speed and reasoning. Recommended default for most tasks.</div>
|
||||||
|
</Card>
|
||||||
|
<Card
|
||||||
|
title={
|
||||||
|
<span className="inline-flex items-center gap-2">
|
||||||
|
<Brain className="h-4 w-4 text-muted-foreground" />
|
||||||
|
Advanced
|
||||||
|
</span>
|
||||||
|
}
|
||||||
|
>
|
||||||
|
<div className="m-0 text-sm">More reasoning for larger workflows and complex edits while staying performant.</div>
|
||||||
|
</Card>
|
||||||
|
<Card
|
||||||
|
title={
|
||||||
|
<span className="inline-flex items-center gap-2">
|
||||||
|
<BrainCircuit className="h-4 w-4 text-muted-foreground" />
|
||||||
|
Behemoth
|
||||||
|
</span>
|
||||||
|
}
|
||||||
|
>
|
||||||
|
<div className="m-0 text-sm">Maximum reasoning for deep planning, debugging, and complex architectural changes.</div>
|
||||||
|
</Card>
|
||||||
|
</Cards>
|
||||||
|
|
||||||
Choose based on your needs: faster models for simple tasks, more capable models for complex workflows.
|
### Mode Selection Interface
|
||||||
|
|
||||||
## Context Menu (@)
|
You can easily switch between different reasoning modes using the mode selector in the Copilot interface:
|
||||||
|
|
||||||
Use the `@` symbol to reference resources and give Copilot more context:
|
<Image
|
||||||
|
src="/static/copilot/copilot-models.png"
|
||||||
|
alt="Copilot mode selection showing Advanced mode with MAX toggle"
|
||||||
|
width={600}
|
||||||
|
height={300}
|
||||||
|
/>
|
||||||
|
|
||||||
| Reference | Description |
|
The interface allows you to:
|
||||||
|-----------|-------------|
|
- **Select reasoning level**: Choose from Fast, Auto, Advanced, or Behemoth
|
||||||
| **Chats** | Previous copilot conversations |
|
- **Enable MAX mode**: Toggle for maximum reasoning capabilities when you need the most thorough analysis
|
||||||
| **Workflows** | Any workflow in your workspace |
|
- **See mode descriptions**: Understand what each mode is optimized for
|
||||||
| **Workflow Blocks** | Blocks in the current workflow |
|
|
||||||
| **Blocks** | Block types and templates |
|
|
||||||
| **Knowledge** | Uploaded documents and knowledge bases |
|
|
||||||
| **Docs** | Sim documentation |
|
|
||||||
| **Templates** | Workflow templates |
|
|
||||||
| **Logs** | Execution logs and results |
|
|
||||||
|
|
||||||
Type `@` in the input field to open the context menu, then search or browse to find what you need.
|
Choose your mode based on the complexity of your task - use Fast for simple questions and Behemoth for complex architectural changes.
|
||||||
|
|
||||||
## Slash Commands (/)
|
## Billing and Cost Calculation
|
||||||
|
|
||||||
Use slash commands for quick actions:
|
### How Costs Are Calculated
|
||||||
|
|
||||||
| Command | Description |
|
Copilot usage is billed per token from the underlying LLM:
|
||||||
|---------|-------------|
|
|
||||||
| `/fast` | Fast mode execution |
|
|
||||||
| `/research` | Research and exploration mode |
|
|
||||||
| `/actions` | Execute agent actions |
|
|
||||||
|
|
||||||
**Web Commands:**
|
- **Input tokens**: billed at the provider's base rate (**at-cost**)
|
||||||
|
- **Output tokens**: billed at **1.5×** the provider's base output rate
|
||||||
|
|
||||||
| Command | Description |
|
```javascript
|
||||||
|---------|-------------|
|
copilotCost = (inputTokens × inputPrice + outputTokens × (outputPrice × 1.5)) / 1,000,000
|
||||||
| `/search` | Search the web |
|
```
|
||||||
| `/read` | Read a specific URL |
|
|
||||||
| `/scrape` | Scrape web page content |
|
|
||||||
| `/crawl` | Crawl multiple pages |
|
|
||||||
|
|
||||||
Type `/` in the input field to see available commands.
|
| Component | Rate Applied |
|
||||||
|
|----------|----------------------|
|
||||||
|
| Input | inputPrice |
|
||||||
|
| Output | outputPrice × 1.5 |
|
||||||
|
|
||||||
## Chat Management
|
<Callout type="warning">
|
||||||
|
Pricing shown reflects rates as of September 4, 2025. Check provider documentation for current pricing.
|
||||||
### Starting a New Chat
|
</Callout>
|
||||||
|
|
||||||
Click the **+** button in the Copilot header to start a fresh conversation.
|
|
||||||
|
|
||||||
### Chat History
|
|
||||||
|
|
||||||
Click **History** to view previous conversations grouped by date. You can:
|
|
||||||
- Click a chat to resume it
|
|
||||||
- Delete chats you no longer need
|
|
||||||
|
|
||||||
### Editing Messages
|
|
||||||
|
|
||||||
Hover over any of your messages and click **Edit** to modify and resend it. This is useful for refining your prompts.
|
|
||||||
|
|
||||||
### Message Queue
|
|
||||||
|
|
||||||
If you send a message while Copilot is still responding, it gets queued. You can:
|
|
||||||
- View queued messages in the expandable queue panel
|
|
||||||
- Send a queued message immediately (aborts current response)
|
|
||||||
- Remove messages from the queue
|
|
||||||
|
|
||||||
## File Attachments
|
|
||||||
|
|
||||||
Click the attachment icon to upload files with your message. Supported file types include:
|
|
||||||
- Images (preview thumbnails shown)
|
|
||||||
- PDFs
|
|
||||||
- Text files, JSON, XML
|
|
||||||
- Other document formats
|
|
||||||
|
|
||||||
Files are displayed as clickable thumbnails that open in a new tab.
|
|
||||||
|
|
||||||
## Checkpoints & Changes
|
|
||||||
|
|
||||||
When Copilot makes changes to your workflow, it saves checkpoints so you can revert if needed.
|
|
||||||
|
|
||||||
### Viewing Checkpoints
|
|
||||||
|
|
||||||
Hover over a Copilot message and click the checkpoints icon to see saved workflow states for that message.
|
|
||||||
|
|
||||||
### Reverting Changes
|
|
||||||
|
|
||||||
Click **Revert** on any checkpoint to restore your workflow to that state. A confirmation dialog will warn that this action cannot be undone.
|
|
||||||
|
|
||||||
### Accepting Changes
|
|
||||||
|
|
||||||
When Copilot proposes changes, you can:
|
|
||||||
- **Accept**: Apply the proposed changes (`Mod+Shift+Enter`)
|
|
||||||
- **Reject**: Dismiss the changes and keep your current workflow
|
|
||||||
|
|
||||||
## Thinking Blocks
|
|
||||||
|
|
||||||
For complex requests, Copilot may show its reasoning process in expandable thinking blocks:
|
|
||||||
|
|
||||||
- Blocks auto-expand while Copilot is thinking
|
|
||||||
- Click to manually expand/collapse
|
|
||||||
- Shows duration of the thinking process
|
|
||||||
- Helps you understand how Copilot arrived at its solution
|
|
||||||
|
|
||||||
## Options Selection
|
|
||||||
|
|
||||||
When Copilot presents multiple options, you can select using:
|
|
||||||
|
|
||||||
| Control | Action |
|
|
||||||
|---------|--------|
|
|
||||||
| **1-9** | Select option by number |
|
|
||||||
| **Arrow Up/Down** | Navigate between options |
|
|
||||||
| **Enter** | Select highlighted option |
|
|
||||||
|
|
||||||
Selected options are highlighted; unselected options appear struck through.
|
|
||||||
|
|
||||||
## Keyboard Shortcuts
|
|
||||||
|
|
||||||
| Shortcut | Action |
|
|
||||||
|----------|--------|
|
|
||||||
| `@` | Open context menu |
|
|
||||||
| `/` | Open slash commands |
|
|
||||||
| `Arrow Up/Down` | Navigate menu items |
|
|
||||||
| `Enter` | Select menu item |
|
|
||||||
| `Esc` | Close menus |
|
|
||||||
| `Mod+Shift+Enter` | Accept Copilot changes |
|
|
||||||
|
|
||||||
## Usage Limits
|
|
||||||
|
|
||||||
Copilot usage is billed per token from the underlying LLM. If you reach your usage limit, Copilot will prompt you to increase your limit. You can add usage in increments ($50, $100) from your current base.
|
|
||||||
|
|
||||||
<Callout type="info">
|
<Callout type="info">
|
||||||
See the [Cost Calculation page](/execution/costs) for billing details.
|
Model prices are per million tokens. The calculation divides by 1,000,000 to get the actual cost. See <a href="/execution/costs">the Cost Calculation page</a> for background and examples.
|
||||||
</Callout>
|
</Callout>
|
||||||
|
|
||||||
|
|||||||
@@ -34,8 +34,6 @@ Speed up your workflow building with these keyboard shortcuts and mouse controls
|
|||||||
| `Mod` + `V` | Paste blocks |
|
| `Mod` + `V` | Paste blocks |
|
||||||
| `Delete` or `Backspace` | Delete selected blocks or edges |
|
| `Delete` or `Backspace` | Delete selected blocks or edges |
|
||||||
| `Shift` + `L` | Auto-layout canvas |
|
| `Shift` + `L` | Auto-layout canvas |
|
||||||
| `Mod` + `Shift` + `F` | Fit to view |
|
|
||||||
| `Mod` + `Shift` + `Enter` | Accept Copilot changes |
|
|
||||||
|
|
||||||
## Panel Navigation
|
## Panel Navigation
|
||||||
|
|
||||||
|
|||||||
@@ -3,7 +3,6 @@
|
|||||||
"pages": [
|
"pages": [
|
||||||
"./introduction/index",
|
"./introduction/index",
|
||||||
"./getting-started/index",
|
"./getting-started/index",
|
||||||
"./quick-reference/index",
|
|
||||||
"triggers",
|
"triggers",
|
||||||
"blocks",
|
"blocks",
|
||||||
"tools",
|
"tools",
|
||||||
|
|||||||
@@ -1,136 +0,0 @@
|
|||||||
---
|
|
||||||
title: Quick Reference
|
|
||||||
description: Essential actions for navigating and using the Sim workflow editor
|
|
||||||
---
|
|
||||||
|
|
||||||
import { Callout } from 'fumadocs-ui/components/callout'
|
|
||||||
|
|
||||||
A quick lookup for everyday actions in the Sim workflow editor. For keyboard shortcuts, see [Keyboard Shortcuts](/keyboard-shortcuts).
|
|
||||||
|
|
||||||
<Callout type="info">
|
|
||||||
**Mod** refers to `Cmd` on macOS and `Ctrl` on Windows/Linux.
|
|
||||||
</Callout>
|
|
||||||
|
|
||||||
## Workspaces
|
|
||||||
|
|
||||||
| Action | How |
|
|
||||||
|--------|-----|
|
|
||||||
| Create a workspace | Click workspace dropdown in sidebar → **New Workspace** |
|
|
||||||
| Rename a workspace | Workspace settings → Edit name |
|
|
||||||
| Switch workspaces | Click workspace dropdown in sidebar → Select workspace |
|
|
||||||
| Invite team members | Workspace settings → **Team** → **Invite** |
|
|
||||||
|
|
||||||
## Workflows
|
|
||||||
|
|
||||||
| Action | How |
|
|
||||||
|--------|-----|
|
|
||||||
| Create a workflow | Click **New Workflow** button or `Mod+Shift+A` |
|
|
||||||
| Rename a workflow | Double-click workflow name in sidebar, or right-click → **Rename** |
|
|
||||||
| Duplicate a workflow | Right-click workflow → **Duplicate** |
|
|
||||||
| Reorder workflows | Drag workflow up/down in the sidebar list |
|
|
||||||
| Import a workflow | Sidebar menu → **Import** → Select file |
|
|
||||||
| Create a folder | Right-click in sidebar → **New Folder** |
|
|
||||||
| Rename a folder | Right-click folder → **Rename** |
|
|
||||||
| Delete a folder | Right-click folder → **Delete** |
|
|
||||||
| Collapse/expand folder | Click folder arrow, or double-click folder |
|
|
||||||
| Move workflow to folder | Drag workflow onto folder in sidebar |
|
|
||||||
| Delete a workflow | Right-click workflow → **Delete** |
|
|
||||||
| Export a workflow | Right-click workflow → **Export** |
|
|
||||||
| Assign workflow color | Right-click workflow → **Change Color** |
|
|
||||||
| Multi-select workflows | `Mod+Click` or `Shift+Click` workflows in sidebar |
|
|
||||||
| Open in new tab | Right-click workflow → **Open in New Tab** |
|
|
||||||
|
|
||||||
## Blocks
|
|
||||||
|
|
||||||
| Action | How |
|
|
||||||
|--------|-----|
|
|
||||||
| Add a block | Drag from Toolbar panel, or right-click canvas → **Add Block** |
|
|
||||||
| Select a block | Click on the block |
|
|
||||||
| Multi-select blocks | `Mod+Click` additional blocks, or right-drag to draw selection box |
|
|
||||||
| Move blocks | Drag selected block(s) to new position |
|
|
||||||
| Copy blocks | `Mod+C` with blocks selected |
|
|
||||||
| Paste blocks | `Mod+V` to paste copied blocks |
|
|
||||||
| Duplicate blocks | Right-click → **Duplicate** |
|
|
||||||
| Delete blocks | `Delete` or `Backspace` key, or right-click → **Delete** |
|
|
||||||
| Rename a block | Click block name in header, or edit in the Editor panel |
|
|
||||||
| Enable/Disable a block | Right-click → **Enable/Disable** |
|
|
||||||
| Toggle handle orientation | Right-click → **Toggle Handles** |
|
|
||||||
| Toggle trigger mode | Right-click trigger block → **Toggle Trigger Mode** |
|
|
||||||
| Configure a block | Select block → use Editor panel on right |
|
|
||||||
|
|
||||||
## Connections
|
|
||||||
|
|
||||||
| Action | How |
|
|
||||||
|--------|-----|
|
|
||||||
| Create a connection | Drag from output handle to input handle |
|
|
||||||
| Delete a connection | Click edge to select → `Delete` key |
|
|
||||||
| Use output in another block | Drag connection tag into input field |
|
|
||||||
|
|
||||||
## Canvas Navigation
|
|
||||||
|
|
||||||
| Action | How |
|
|
||||||
|--------|-----|
|
|
||||||
| Pan/move canvas | Left-drag on empty space, or scroll/trackpad |
|
|
||||||
| Zoom in/out | Scroll wheel or pinch gesture |
|
|
||||||
| Auto-layout | `Shift+L` |
|
|
||||||
| Draw selection box | Right-drag on empty canvas area |
|
|
||||||
|
|
||||||
## Panels & Views
|
|
||||||
|
|
||||||
| Action | How |
|
|
||||||
|--------|-----|
|
|
||||||
| Open Copilot tab | Press `C` or click Copilot tab |
|
|
||||||
| Open Toolbar tab | Press `T` or click Toolbar tab |
|
|
||||||
| Open Editor tab | Press `E` or click Editor tab |
|
|
||||||
| Search toolbar | `Mod+F` |
|
|
||||||
| Toggle advanced mode | Click toggle button on input fields |
|
|
||||||
| Resize panels | Drag panel edge |
|
|
||||||
| Collapse/expand sidebar | Click collapse button on sidebar |
|
|
||||||
|
|
||||||
## Running & Testing
|
|
||||||
|
|
||||||
| Action | How |
|
|
||||||
|--------|-----|
|
|
||||||
| Run workflow | Click Play button or `Mod+Enter` |
|
|
||||||
| Stop workflow | Click Stop button or `Mod+Enter` while running |
|
|
||||||
| Test with chat | Use Chat panel on the right side |
|
|
||||||
| Select output to view | Click dropdown in Chat panel → Select block output |
|
|
||||||
| Clear chat history | Click clear button in Chat panel |
|
|
||||||
| View execution logs | Open terminal panel at bottom, or `Mod+L` |
|
|
||||||
| Filter logs by block | Click block filter in terminal |
|
|
||||||
| Filter logs by status | Click status filter in terminal |
|
|
||||||
| Search logs | Use search field in terminal |
|
|
||||||
| Copy log entry | Right-click log entry → **Copy** |
|
|
||||||
| Clear terminal | `Mod+D` |
|
|
||||||
|
|
||||||
## Deployment
|
|
||||||
|
|
||||||
| Action | How |
|
|
||||||
|--------|-----|
|
|
||||||
| Deploy a workflow | Click **Deploy** button in Deploy tab |
|
|
||||||
| Update deployment | Click **Update** when changes are detected |
|
|
||||||
| View deployment status | Check status indicator (Live/Update/Deploy) in Deploy tab |
|
|
||||||
| Revert deployment | Access previous versions in Deploy tab |
|
|
||||||
| Copy webhook URL | Deploy tab → Copy webhook URL |
|
|
||||||
| Copy API endpoint | Deploy tab → Copy API endpoint URL |
|
|
||||||
| Set up a schedule | Add Schedule trigger block → Configure interval |
|
|
||||||
|
|
||||||
## Variables
|
|
||||||
|
|
||||||
| Action | How |
|
|
||||||
|--------|-----|
|
|
||||||
| Add workflow variable | Variables tab → **Add Variable** |
|
|
||||||
| Edit workflow variable | Variables tab → Click variable to edit |
|
|
||||||
| Delete workflow variable | Variables tab → Click delete icon on variable |
|
|
||||||
| Add environment variable | Settings → **Environment Variables** → **Add** |
|
|
||||||
| Reference a variable | Use `{{variableName}}` syntax in block inputs |
|
|
||||||
|
|
||||||
## Credentials
|
|
||||||
|
|
||||||
| Action | How |
|
|
||||||
|--------|-----|
|
|
||||||
| Add API key | Block credential field → **Add Credential** → Enter API key |
|
|
||||||
| Connect OAuth account | Block credential field → **Connect** → Authorize with provider |
|
|
||||||
| Manage credentials | Settings → **Credentials** |
|
|
||||||
| Remove credential | Settings → **Credentials** → Delete credential |
|
|
||||||
|
|
||||||
204
apps/sim/app/api/organizations/[id]/workspaces/route.ts
Normal file
204
apps/sim/app/api/organizations/[id]/workspaces/route.ts
Normal file
@@ -0,0 +1,204 @@
|
|||||||
|
import { db } from '@sim/db'
|
||||||
|
import { member, permissions, user, workspace } from '@sim/db/schema'
|
||||||
|
import { createLogger } from '@sim/logger'
|
||||||
|
import { and, eq, or } from 'drizzle-orm'
|
||||||
|
import { type NextRequest, NextResponse } from 'next/server'
|
||||||
|
import { getSession } from '@/lib/auth'
|
||||||
|
|
||||||
|
const logger = createLogger('OrganizationWorkspacesAPI')
|
||||||
|
|
||||||
|
/**
|
||||||
|
* GET /api/organizations/[id]/workspaces
|
||||||
|
* Get workspaces related to the organization with optional filtering
|
||||||
|
* Query parameters:
|
||||||
|
* - ?available=true - Only workspaces where user can invite others (admin permissions)
|
||||||
|
* - ?member=userId - Workspaces where specific member has access
|
||||||
|
*/
|
||||||
|
export async function GET(request: NextRequest, { params }: { params: Promise<{ id: string }> }) {
|
||||||
|
try {
|
||||||
|
const session = await getSession()
|
||||||
|
|
||||||
|
if (!session?.user?.id) {
|
||||||
|
return NextResponse.json({ error: 'Unauthorized' }, { status: 401 })
|
||||||
|
}
|
||||||
|
|
||||||
|
const { id: organizationId } = await params
|
||||||
|
const url = new URL(request.url)
|
||||||
|
const availableOnly = url.searchParams.get('available') === 'true'
|
||||||
|
const memberId = url.searchParams.get('member')
|
||||||
|
|
||||||
|
// Verify user is a member of this organization
|
||||||
|
const memberEntry = await db
|
||||||
|
.select()
|
||||||
|
.from(member)
|
||||||
|
.where(and(eq(member.organizationId, organizationId), eq(member.userId, session.user.id)))
|
||||||
|
.limit(1)
|
||||||
|
|
||||||
|
if (memberEntry.length === 0) {
|
||||||
|
return NextResponse.json(
|
||||||
|
{
|
||||||
|
error: 'Forbidden - Not a member of this organization',
|
||||||
|
},
|
||||||
|
{ status: 403 }
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
const userRole = memberEntry[0].role
|
||||||
|
const hasAdminAccess = ['owner', 'admin'].includes(userRole)
|
||||||
|
|
||||||
|
if (availableOnly) {
|
||||||
|
// Get workspaces where user has admin permissions (can invite others)
|
||||||
|
const availableWorkspaces = await db
|
||||||
|
.select({
|
||||||
|
id: workspace.id,
|
||||||
|
name: workspace.name,
|
||||||
|
ownerId: workspace.ownerId,
|
||||||
|
createdAt: workspace.createdAt,
|
||||||
|
isOwner: eq(workspace.ownerId, session.user.id),
|
||||||
|
permissionType: permissions.permissionType,
|
||||||
|
})
|
||||||
|
.from(workspace)
|
||||||
|
.leftJoin(
|
||||||
|
permissions,
|
||||||
|
and(
|
||||||
|
eq(permissions.entityType, 'workspace'),
|
||||||
|
eq(permissions.entityId, workspace.id),
|
||||||
|
eq(permissions.userId, session.user.id)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.where(
|
||||||
|
or(
|
||||||
|
// User owns the workspace
|
||||||
|
eq(workspace.ownerId, session.user.id),
|
||||||
|
// User has admin permission on the workspace
|
||||||
|
and(
|
||||||
|
eq(permissions.userId, session.user.id),
|
||||||
|
eq(permissions.entityType, 'workspace'),
|
||||||
|
eq(permissions.permissionType, 'admin')
|
||||||
|
)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
// Filter and format the results
|
||||||
|
const workspacesWithInvitePermission = availableWorkspaces
|
||||||
|
.filter((workspace) => {
|
||||||
|
// Include if user owns the workspace OR has admin permission
|
||||||
|
return workspace.isOwner || workspace.permissionType === 'admin'
|
||||||
|
})
|
||||||
|
.map((workspace) => ({
|
||||||
|
id: workspace.id,
|
||||||
|
name: workspace.name,
|
||||||
|
isOwner: workspace.isOwner,
|
||||||
|
canInvite: true, // All returned workspaces have invite permission
|
||||||
|
createdAt: workspace.createdAt,
|
||||||
|
}))
|
||||||
|
|
||||||
|
logger.info('Retrieved available workspaces for organization member', {
|
||||||
|
organizationId,
|
||||||
|
userId: session.user.id,
|
||||||
|
workspaceCount: workspacesWithInvitePermission.length,
|
||||||
|
})
|
||||||
|
|
||||||
|
return NextResponse.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
workspaces: workspacesWithInvitePermission,
|
||||||
|
totalCount: workspacesWithInvitePermission.length,
|
||||||
|
filter: 'available',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
if (memberId && hasAdminAccess) {
|
||||||
|
// Get workspaces where specific member has access (admin only)
|
||||||
|
const memberWorkspaces = await db
|
||||||
|
.select({
|
||||||
|
id: workspace.id,
|
||||||
|
name: workspace.name,
|
||||||
|
ownerId: workspace.ownerId,
|
||||||
|
isOwner: eq(workspace.ownerId, memberId),
|
||||||
|
permissionType: permissions.permissionType,
|
||||||
|
createdAt: permissions.createdAt,
|
||||||
|
})
|
||||||
|
.from(workspace)
|
||||||
|
.leftJoin(
|
||||||
|
permissions,
|
||||||
|
and(
|
||||||
|
eq(permissions.entityType, 'workspace'),
|
||||||
|
eq(permissions.entityId, workspace.id),
|
||||||
|
eq(permissions.userId, memberId)
|
||||||
|
)
|
||||||
|
)
|
||||||
|
.where(
|
||||||
|
or(
|
||||||
|
// Member owns the workspace
|
||||||
|
eq(workspace.ownerId, memberId),
|
||||||
|
// Member has permissions on the workspace
|
||||||
|
and(eq(permissions.userId, memberId), eq(permissions.entityType, 'workspace'))
|
||||||
|
)
|
||||||
|
)
|
||||||
|
|
||||||
|
const formattedWorkspaces = memberWorkspaces.map((workspace) => ({
|
||||||
|
id: workspace.id,
|
||||||
|
name: workspace.name,
|
||||||
|
isOwner: workspace.isOwner,
|
||||||
|
permission: workspace.permissionType,
|
||||||
|
joinedAt: workspace.createdAt,
|
||||||
|
createdAt: workspace.createdAt,
|
||||||
|
}))
|
||||||
|
|
||||||
|
return NextResponse.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
workspaces: formattedWorkspaces,
|
||||||
|
totalCount: formattedWorkspaces.length,
|
||||||
|
filter: 'member',
|
||||||
|
memberId,
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// Default: Get all workspaces (basic info only for regular members)
|
||||||
|
if (!hasAdminAccess) {
|
||||||
|
return NextResponse.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
workspaces: [],
|
||||||
|
totalCount: 0,
|
||||||
|
message: 'Workspace access information is only available to organization admins',
|
||||||
|
},
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
// For admins: Get summary of all workspaces
|
||||||
|
const allWorkspaces = await db
|
||||||
|
.select({
|
||||||
|
id: workspace.id,
|
||||||
|
name: workspace.name,
|
||||||
|
ownerId: workspace.ownerId,
|
||||||
|
createdAt: workspace.createdAt,
|
||||||
|
ownerName: user.name,
|
||||||
|
})
|
||||||
|
.from(workspace)
|
||||||
|
.leftJoin(user, eq(workspace.ownerId, user.id))
|
||||||
|
|
||||||
|
return NextResponse.json({
|
||||||
|
success: true,
|
||||||
|
data: {
|
||||||
|
workspaces: allWorkspaces,
|
||||||
|
totalCount: allWorkspaces.length,
|
||||||
|
filter: 'all',
|
||||||
|
},
|
||||||
|
userRole,
|
||||||
|
hasAdminAccess,
|
||||||
|
})
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Failed to get organization workspaces', { error })
|
||||||
|
return NextResponse.json(
|
||||||
|
{
|
||||||
|
error: 'Internal server error',
|
||||||
|
},
|
||||||
|
{ status: 500 }
|
||||||
|
)
|
||||||
|
}
|
||||||
|
}
|
||||||
@@ -1,257 +0,0 @@
|
|||||||
import { createLogger } from '@sim/logger'
|
|
||||||
import { type NextRequest, NextResponse } from 'next/server'
|
|
||||||
import { z } from 'zod'
|
|
||||||
import { checkInternalAuth } from '@/lib/auth/hybrid'
|
|
||||||
import { generateRequestId } from '@/lib/core/utils/request'
|
|
||||||
import { processSingleFileToUserFile } from '@/lib/uploads/utils/file-utils'
|
|
||||||
import { downloadFileFromStorage } from '@/lib/uploads/utils/file-utils.server'
|
|
||||||
|
|
||||||
export const dynamic = 'force-dynamic'
|
|
||||||
|
|
||||||
const logger = createLogger('SupabaseStorageUploadAPI')
|
|
||||||
|
|
||||||
const SupabaseStorageUploadSchema = z.object({
|
|
||||||
projectId: z.string().min(1, 'Project ID is required'),
|
|
||||||
apiKey: z.string().min(1, 'API key is required'),
|
|
||||||
bucket: z.string().min(1, 'Bucket name is required'),
|
|
||||||
fileName: z.string().min(1, 'File name is required'),
|
|
||||||
path: z.string().optional().nullable(),
|
|
||||||
fileData: z.any(),
|
|
||||||
contentType: z.string().optional().nullable(),
|
|
||||||
upsert: z.boolean().optional().default(false),
|
|
||||||
})
|
|
||||||
|
|
||||||
export async function POST(request: NextRequest) {
|
|
||||||
const requestId = generateRequestId()
|
|
||||||
|
|
||||||
try {
|
|
||||||
const authResult = await checkInternalAuth(request, { requireWorkflowId: false })
|
|
||||||
|
|
||||||
if (!authResult.success) {
|
|
||||||
logger.warn(
|
|
||||||
`[${requestId}] Unauthorized Supabase storage upload attempt: ${authResult.error}`
|
|
||||||
)
|
|
||||||
return NextResponse.json(
|
|
||||||
{
|
|
||||||
success: false,
|
|
||||||
error: authResult.error || 'Authentication required',
|
|
||||||
},
|
|
||||||
{ status: 401 }
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info(
|
|
||||||
`[${requestId}] Authenticated Supabase storage upload request via ${authResult.authType}`,
|
|
||||||
{
|
|
||||||
userId: authResult.userId,
|
|
||||||
}
|
|
||||||
)
|
|
||||||
|
|
||||||
const body = await request.json()
|
|
||||||
const validatedData = SupabaseStorageUploadSchema.parse(body)
|
|
||||||
|
|
||||||
const fileData = validatedData.fileData
|
|
||||||
const isStringInput = typeof fileData === 'string'
|
|
||||||
|
|
||||||
logger.info(`[${requestId}] Uploading to Supabase Storage`, {
|
|
||||||
bucket: validatedData.bucket,
|
|
||||||
fileName: validatedData.fileName,
|
|
||||||
path: validatedData.path,
|
|
||||||
fileDataType: isStringInput ? 'string' : 'object',
|
|
||||||
})
|
|
||||||
|
|
||||||
if (!fileData) {
|
|
||||||
return NextResponse.json(
|
|
||||||
{
|
|
||||||
success: false,
|
|
||||||
error: 'fileData is required',
|
|
||||||
},
|
|
||||||
{ status: 400 }
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
let uploadBody: Buffer
|
|
||||||
let uploadContentType: string | undefined
|
|
||||||
|
|
||||||
if (isStringInput) {
|
|
||||||
let content = fileData as string
|
|
||||||
|
|
||||||
const dataUrlMatch = content.match(/^data:([^;]+);base64,(.+)$/s)
|
|
||||||
if (dataUrlMatch) {
|
|
||||||
const [, mimeType, base64Data] = dataUrlMatch
|
|
||||||
content = base64Data
|
|
||||||
if (!validatedData.contentType) {
|
|
||||||
uploadContentType = mimeType
|
|
||||||
}
|
|
||||||
logger.info(`[${requestId}] Extracted base64 from data URL (MIME: ${mimeType})`)
|
|
||||||
}
|
|
||||||
|
|
||||||
const cleanedContent = content.replace(/[\s\r\n]/g, '')
|
|
||||||
const isLikelyBase64 = /^[A-Za-z0-9+/]*={0,2}$/.test(cleanedContent)
|
|
||||||
|
|
||||||
if (isLikelyBase64 && cleanedContent.length >= 4) {
|
|
||||||
try {
|
|
||||||
uploadBody = Buffer.from(cleanedContent, 'base64')
|
|
||||||
|
|
||||||
const expectedMinSize = Math.floor(cleanedContent.length * 0.7)
|
|
||||||
const expectedMaxSize = Math.ceil(cleanedContent.length * 0.8)
|
|
||||||
|
|
||||||
if (
|
|
||||||
uploadBody.length >= expectedMinSize &&
|
|
||||||
uploadBody.length <= expectedMaxSize &&
|
|
||||||
uploadBody.length > 0
|
|
||||||
) {
|
|
||||||
logger.info(
|
|
||||||
`[${requestId}] Decoded base64 content: ${cleanedContent.length} chars -> ${uploadBody.length} bytes`
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
const reEncoded = uploadBody.toString('base64')
|
|
||||||
if (reEncoded !== cleanedContent) {
|
|
||||||
logger.info(
|
|
||||||
`[${requestId}] Content looked like base64 but re-encoding didn't match, using as plain text`
|
|
||||||
)
|
|
||||||
uploadBody = Buffer.from(content, 'utf-8')
|
|
||||||
} else {
|
|
||||||
logger.info(
|
|
||||||
`[${requestId}] Decoded base64 content (verified): ${uploadBody.length} bytes`
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
} catch (decodeError) {
|
|
||||||
logger.info(
|
|
||||||
`[${requestId}] Failed to decode as base64, using as plain text: ${decodeError}`
|
|
||||||
)
|
|
||||||
uploadBody = Buffer.from(content, 'utf-8')
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
uploadBody = Buffer.from(content, 'utf-8')
|
|
||||||
logger.info(`[${requestId}] Using content as plain text (${uploadBody.length} bytes)`)
|
|
||||||
}
|
|
||||||
|
|
||||||
uploadContentType =
|
|
||||||
uploadContentType || validatedData.contentType || 'application/octet-stream'
|
|
||||||
} else {
|
|
||||||
const rawFile = fileData
|
|
||||||
logger.info(`[${requestId}] Processing file object: ${rawFile.name || 'unknown'}`)
|
|
||||||
|
|
||||||
let userFile
|
|
||||||
try {
|
|
||||||
userFile = processSingleFileToUserFile(rawFile, requestId, logger)
|
|
||||||
} catch (error) {
|
|
||||||
return NextResponse.json(
|
|
||||||
{
|
|
||||||
success: false,
|
|
||||||
error: error instanceof Error ? error.message : 'Failed to process file',
|
|
||||||
},
|
|
||||||
{ status: 400 }
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
const buffer = await downloadFileFromStorage(userFile, requestId, logger)
|
|
||||||
|
|
||||||
uploadBody = buffer
|
|
||||||
uploadContentType = validatedData.contentType || userFile.type || 'application/octet-stream'
|
|
||||||
}
|
|
||||||
|
|
||||||
let fullPath = validatedData.fileName
|
|
||||||
if (validatedData.path) {
|
|
||||||
const folderPath = validatedData.path.endsWith('/')
|
|
||||||
? validatedData.path
|
|
||||||
: `${validatedData.path}/`
|
|
||||||
fullPath = `${folderPath}${validatedData.fileName}`
|
|
||||||
}
|
|
||||||
|
|
||||||
const supabaseUrl = `https://${validatedData.projectId}.supabase.co/storage/v1/object/${validatedData.bucket}/${fullPath}`
|
|
||||||
|
|
||||||
const headers: Record<string, string> = {
|
|
||||||
apikey: validatedData.apiKey,
|
|
||||||
Authorization: `Bearer ${validatedData.apiKey}`,
|
|
||||||
'Content-Type': uploadContentType,
|
|
||||||
}
|
|
||||||
|
|
||||||
if (validatedData.upsert) {
|
|
||||||
headers['x-upsert'] = 'true'
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.info(`[${requestId}] Sending to Supabase: ${supabaseUrl}`, {
|
|
||||||
contentType: uploadContentType,
|
|
||||||
bodySize: uploadBody.length,
|
|
||||||
upsert: validatedData.upsert,
|
|
||||||
})
|
|
||||||
|
|
||||||
const response = await fetch(supabaseUrl, {
|
|
||||||
method: 'POST',
|
|
||||||
headers,
|
|
||||||
body: new Uint8Array(uploadBody),
|
|
||||||
})
|
|
||||||
|
|
||||||
if (!response.ok) {
|
|
||||||
const errorText = await response.text()
|
|
||||||
let errorData
|
|
||||||
try {
|
|
||||||
errorData = JSON.parse(errorText)
|
|
||||||
} catch {
|
|
||||||
errorData = { message: errorText }
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.error(`[${requestId}] Supabase Storage upload failed:`, {
|
|
||||||
status: response.status,
|
|
||||||
statusText: response.statusText,
|
|
||||||
error: errorData,
|
|
||||||
})
|
|
||||||
|
|
||||||
return NextResponse.json(
|
|
||||||
{
|
|
||||||
success: false,
|
|
||||||
error: errorData.message || errorData.error || `Upload failed: ${response.statusText}`,
|
|
||||||
details: errorData,
|
|
||||||
},
|
|
||||||
{ status: response.status }
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
const result = await response.json()
|
|
||||||
|
|
||||||
logger.info(`[${requestId}] File uploaded successfully to Supabase Storage`, {
|
|
||||||
bucket: validatedData.bucket,
|
|
||||||
path: fullPath,
|
|
||||||
})
|
|
||||||
|
|
||||||
const publicUrl = `https://${validatedData.projectId}.supabase.co/storage/v1/object/public/${validatedData.bucket}/${fullPath}`
|
|
||||||
|
|
||||||
return NextResponse.json({
|
|
||||||
success: true,
|
|
||||||
output: {
|
|
||||||
message: 'Successfully uploaded file to storage',
|
|
||||||
results: {
|
|
||||||
...result,
|
|
||||||
path: fullPath,
|
|
||||||
bucket: validatedData.bucket,
|
|
||||||
publicUrl,
|
|
||||||
},
|
|
||||||
},
|
|
||||||
})
|
|
||||||
} catch (error) {
|
|
||||||
if (error instanceof z.ZodError) {
|
|
||||||
logger.warn(`[${requestId}] Invalid request data`, { errors: error.errors })
|
|
||||||
return NextResponse.json(
|
|
||||||
{
|
|
||||||
success: false,
|
|
||||||
error: 'Invalid request data',
|
|
||||||
details: error.errors,
|
|
||||||
},
|
|
||||||
{ status: 400 }
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
logger.error(`[${requestId}] Error uploading to Supabase Storage:`, error)
|
|
||||||
|
|
||||||
return NextResponse.json(
|
|
||||||
{
|
|
||||||
success: false,
|
|
||||||
error: error instanceof Error ? error.message : 'Internal server error',
|
|
||||||
},
|
|
||||||
{ status: 500 }
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
@@ -284,37 +284,22 @@ const renderLabel = (
|
|||||||
</>
|
</>
|
||||||
)}
|
)}
|
||||||
{showCanonicalToggle && (
|
{showCanonicalToggle && (
|
||||||
<Tooltip.Root>
|
<button
|
||||||
<Tooltip.Trigger asChild>
|
type='button'
|
||||||
<button
|
className='flex h-[12px] w-[12px] flex-shrink-0 items-center justify-center bg-transparent p-0 disabled:cursor-not-allowed disabled:opacity-50'
|
||||||
type='button'
|
onClick={canonicalToggle?.onToggle}
|
||||||
className='flex h-[12px] w-[12px] flex-shrink-0 items-center justify-center bg-transparent p-0 disabled:cursor-not-allowed disabled:opacity-50'
|
disabled={canonicalToggleDisabledResolved}
|
||||||
onClick={canonicalToggle?.onToggle}
|
aria-label={canonicalToggle?.mode === 'advanced' ? 'Use selector' : 'Enter manual ID'}
|
||||||
disabled={canonicalToggleDisabledResolved}
|
>
|
||||||
aria-label={
|
<ArrowLeftRight
|
||||||
canonicalToggle?.mode === 'advanced'
|
className={cn(
|
||||||
? 'Switch to selector'
|
'!h-[12px] !w-[12px]',
|
||||||
: 'Switch to manual ID'
|
canonicalToggle?.mode === 'advanced'
|
||||||
}
|
? 'text-[var(--text-primary)]'
|
||||||
>
|
: 'text-[var(--text-secondary)]'
|
||||||
<ArrowLeftRight
|
)}
|
||||||
className={cn(
|
/>
|
||||||
'!h-[12px] !w-[12px]',
|
</button>
|
||||||
canonicalToggle?.mode === 'advanced'
|
|
||||||
? 'text-[var(--text-primary)]'
|
|
||||||
: 'text-[var(--text-secondary)]'
|
|
||||||
)}
|
|
||||||
/>
|
|
||||||
</button>
|
|
||||||
</Tooltip.Trigger>
|
|
||||||
<Tooltip.Content side='top'>
|
|
||||||
<p>
|
|
||||||
{canonicalToggle?.mode === 'advanced'
|
|
||||||
? 'Switch to selector'
|
|
||||||
: 'Switch to manual ID'}
|
|
||||||
</p>
|
|
||||||
</Tooltip.Content>
|
|
||||||
</Tooltip.Root>
|
|
||||||
)}
|
)}
|
||||||
</div>
|
</div>
|
||||||
</div>
|
</div>
|
||||||
@@ -338,11 +323,6 @@ const arePropsEqual = (prevProps: SubBlockProps, nextProps: SubBlockProps): bool
|
|||||||
const configEqual =
|
const configEqual =
|
||||||
prevProps.config.id === nextProps.config.id && prevProps.config.type === nextProps.config.type
|
prevProps.config.id === nextProps.config.id && prevProps.config.type === nextProps.config.type
|
||||||
|
|
||||||
const canonicalToggleEqual =
|
|
||||||
!!prevProps.canonicalToggle === !!nextProps.canonicalToggle &&
|
|
||||||
prevProps.canonicalToggle?.mode === nextProps.canonicalToggle?.mode &&
|
|
||||||
prevProps.canonicalToggle?.disabled === nextProps.canonicalToggle?.disabled
|
|
||||||
|
|
||||||
return (
|
return (
|
||||||
prevProps.blockId === nextProps.blockId &&
|
prevProps.blockId === nextProps.blockId &&
|
||||||
configEqual &&
|
configEqual &&
|
||||||
@@ -351,7 +331,8 @@ const arePropsEqual = (prevProps: SubBlockProps, nextProps: SubBlockProps): bool
|
|||||||
prevProps.disabled === nextProps.disabled &&
|
prevProps.disabled === nextProps.disabled &&
|
||||||
prevProps.fieldDiffStatus === nextProps.fieldDiffStatus &&
|
prevProps.fieldDiffStatus === nextProps.fieldDiffStatus &&
|
||||||
prevProps.allowExpandInPreview === nextProps.allowExpandInPreview &&
|
prevProps.allowExpandInPreview === nextProps.allowExpandInPreview &&
|
||||||
canonicalToggleEqual
|
prevProps.canonicalToggle?.mode === nextProps.canonicalToggle?.mode &&
|
||||||
|
prevProps.canonicalToggle?.disabled === nextProps.canonicalToggle?.disabled
|
||||||
)
|
)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -13,11 +13,7 @@ interface UseCanvasContextMenuProps {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Hook for managing workflow canvas context menus.
|
* Hook for managing workflow canvas context menus.
|
||||||
*
|
* Handles right-click events, menu state, click-outside detection, and block info extraction.
|
||||||
* Handles right-click events on nodes, pane, and selections with proper multi-select behavior.
|
|
||||||
*
|
|
||||||
* @param props - Hook configuration
|
|
||||||
* @returns Context menu state and handlers
|
|
||||||
*/
|
*/
|
||||||
export function useCanvasContextMenu({ blocks, getNodes, setNodes }: UseCanvasContextMenuProps) {
|
export function useCanvasContextMenu({ blocks, getNodes, setNodes }: UseCanvasContextMenuProps) {
|
||||||
const [activeMenu, setActiveMenu] = useState<MenuType>(null)
|
const [activeMenu, setActiveMenu] = useState<MenuType>(null)
|
||||||
@@ -50,29 +46,19 @@ export function useCanvasContextMenu({ blocks, getNodes, setNodes }: UseCanvasCo
|
|||||||
event.stopPropagation()
|
event.stopPropagation()
|
||||||
|
|
||||||
const isMultiSelect = event.shiftKey || event.metaKey || event.ctrlKey
|
const isMultiSelect = event.shiftKey || event.metaKey || event.ctrlKey
|
||||||
const currentSelectedNodes = getNodes().filter((n) => n.selected)
|
setNodes((nodes) =>
|
||||||
const isClickedNodeSelected = currentSelectedNodes.some((n) => n.id === node.id)
|
nodes.map((n) => ({
|
||||||
|
...n,
|
||||||
|
selected: isMultiSelect ? (n.id === node.id ? true : n.selected) : n.id === node.id,
|
||||||
|
}))
|
||||||
|
)
|
||||||
|
|
||||||
let nodesToUse: Node[]
|
const selectedNodes = getNodes().filter((n) => n.selected)
|
||||||
if (isClickedNodeSelected) {
|
const nodesToUse = isMultiSelect
|
||||||
nodesToUse = currentSelectedNodes
|
? selectedNodes.some((n) => n.id === node.id)
|
||||||
} else if (isMultiSelect) {
|
? selectedNodes
|
||||||
nodesToUse = [...currentSelectedNodes, node]
|
: [...selectedNodes, node]
|
||||||
setNodes((nodes) =>
|
: [node]
|
||||||
nodes.map((n) => ({
|
|
||||||
...n,
|
|
||||||
selected: n.id === node.id ? true : n.selected,
|
|
||||||
}))
|
|
||||||
)
|
|
||||||
} else {
|
|
||||||
nodesToUse = [node]
|
|
||||||
setNodes((nodes) =>
|
|
||||||
nodes.map((n) => ({
|
|
||||||
...n,
|
|
||||||
selected: n.id === node.id,
|
|
||||||
}))
|
|
||||||
)
|
|
||||||
}
|
|
||||||
|
|
||||||
setPosition({ x: event.clientX, y: event.clientY })
|
setPosition({ x: event.clientX, y: event.clientY })
|
||||||
setSelectedBlocks(nodesToBlockInfos(nodesToUse))
|
setSelectedBlocks(nodesToBlockInfos(nodesToUse))
|
||||||
|
|||||||
@@ -27,13 +27,18 @@ export function useContextMenu({ onContextMenu }: UseContextMenuProps = {}) {
|
|||||||
const [isOpen, setIsOpen] = useState(false)
|
const [isOpen, setIsOpen] = useState(false)
|
||||||
const [position, setPosition] = useState<ContextMenuPosition>({ x: 0, y: 0 })
|
const [position, setPosition] = useState<ContextMenuPosition>({ x: 0, y: 0 })
|
||||||
const menuRef = useRef<HTMLDivElement>(null)
|
const menuRef = useRef<HTMLDivElement>(null)
|
||||||
|
// Used to prevent click-outside dismissal when trigger is clicked
|
||||||
const dismissPreventedRef = useRef(false)
|
const dismissPreventedRef = useRef(false)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Handle right-click event
|
||||||
|
*/
|
||||||
const handleContextMenu = useCallback(
|
const handleContextMenu = useCallback(
|
||||||
(e: React.MouseEvent) => {
|
(e: React.MouseEvent) => {
|
||||||
e.preventDefault()
|
e.preventDefault()
|
||||||
e.stopPropagation()
|
e.stopPropagation()
|
||||||
|
|
||||||
|
// Calculate position relative to viewport
|
||||||
const x = e.clientX
|
const x = e.clientX
|
||||||
const y = e.clientY
|
const y = e.clientY
|
||||||
|
|
||||||
@@ -45,10 +50,17 @@ export function useContextMenu({ onContextMenu }: UseContextMenuProps = {}) {
|
|||||||
[onContextMenu]
|
[onContextMenu]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Close the context menu
|
||||||
|
*/
|
||||||
const closeMenu = useCallback(() => {
|
const closeMenu = useCallback(() => {
|
||||||
setIsOpen(false)
|
setIsOpen(false)
|
||||||
}, [])
|
}, [])
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Prevent the next click-outside from dismissing the menu.
|
||||||
|
* Call this on pointerdown of a toggle trigger to allow proper toggle behavior.
|
||||||
|
*/
|
||||||
const preventDismiss = useCallback(() => {
|
const preventDismiss = useCallback(() => {
|
||||||
dismissPreventedRef.current = true
|
dismissPreventedRef.current = true
|
||||||
}, [])
|
}, [])
|
||||||
@@ -60,6 +72,7 @@ export function useContextMenu({ onContextMenu }: UseContextMenuProps = {}) {
|
|||||||
if (!isOpen) return
|
if (!isOpen) return
|
||||||
|
|
||||||
const handleClickOutside = (e: MouseEvent) => {
|
const handleClickOutside = (e: MouseEvent) => {
|
||||||
|
// Check if dismissal was prevented (e.g., by toggle trigger's pointerdown)
|
||||||
if (dismissPreventedRef.current) {
|
if (dismissPreventedRef.current) {
|
||||||
dismissPreventedRef.current = false
|
dismissPreventedRef.current = false
|
||||||
return
|
return
|
||||||
@@ -69,6 +82,7 @@ export function useContextMenu({ onContextMenu }: UseContextMenuProps = {}) {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Small delay to prevent immediate close from the same click that opened the menu
|
||||||
const timeoutId = setTimeout(() => {
|
const timeoutId = setTimeout(() => {
|
||||||
document.addEventListener('click', handleClickOutside)
|
document.addEventListener('click', handleClickOutside)
|
||||||
}, 0)
|
}, 0)
|
||||||
|
|||||||
@@ -214,6 +214,15 @@ export const A2ABlock: BlockConfig<A2AResponse> = {
|
|||||||
],
|
],
|
||||||
config: {
|
config: {
|
||||||
tool: (params) => params.operation as string,
|
tool: (params) => params.operation as string,
|
||||||
|
params: (params) => {
|
||||||
|
const { fileUpload, fileReference, ...rest } = params
|
||||||
|
const hasFileUpload = Array.isArray(fileUpload) ? fileUpload.length > 0 : !!fileUpload
|
||||||
|
const files = hasFileUpload ? fileUpload : fileReference
|
||||||
|
return {
|
||||||
|
...rest,
|
||||||
|
...(files ? { files } : {}),
|
||||||
|
}
|
||||||
|
},
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
inputs: {
|
inputs: {
|
||||||
|
|||||||
@@ -661,25 +661,12 @@ Return ONLY the PostgREST filter expression - no explanations, no markdown, no e
|
|||||||
placeholder: 'folder/subfolder/',
|
placeholder: 'folder/subfolder/',
|
||||||
condition: { field: 'operation', value: 'storage_upload' },
|
condition: { field: 'operation', value: 'storage_upload' },
|
||||||
},
|
},
|
||||||
{
|
|
||||||
id: 'file',
|
|
||||||
title: 'File',
|
|
||||||
type: 'file-upload',
|
|
||||||
canonicalParamId: 'fileData',
|
|
||||||
placeholder: 'Upload file to storage',
|
|
||||||
condition: { field: 'operation', value: 'storage_upload' },
|
|
||||||
mode: 'basic',
|
|
||||||
multiple: false,
|
|
||||||
required: true,
|
|
||||||
},
|
|
||||||
{
|
{
|
||||||
id: 'fileContent',
|
id: 'fileContent',
|
||||||
title: 'File Content',
|
title: 'File Content',
|
||||||
type: 'code',
|
type: 'code',
|
||||||
canonicalParamId: 'fileData',
|
|
||||||
placeholder: 'Base64 encoded for binary files, or plain text',
|
placeholder: 'Base64 encoded for binary files, or plain text',
|
||||||
condition: { field: 'operation', value: 'storage_upload' },
|
condition: { field: 'operation', value: 'storage_upload' },
|
||||||
mode: 'advanced',
|
|
||||||
required: true,
|
required: true,
|
||||||
},
|
},
|
||||||
{
|
{
|
||||||
|
|||||||
@@ -4,9 +4,9 @@ import { createLogger } from '@sim/logger'
|
|||||||
import { eq } from 'drizzle-orm'
|
import { eq } from 'drizzle-orm'
|
||||||
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
import { refreshTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||||
import type { BlockOutput } from '@/blocks/types'
|
import type { BlockOutput } from '@/blocks/types'
|
||||||
import { BlockType, DEFAULTS, EVALUATOR } from '@/executor/constants'
|
import { BlockType, DEFAULTS, EVALUATOR, HTTP } from '@/executor/constants'
|
||||||
import type { BlockHandler, ExecutionContext } from '@/executor/types'
|
import type { BlockHandler, ExecutionContext } from '@/executor/types'
|
||||||
import { buildAPIUrl, buildAuthHeaders, extractAPIErrorMessage } from '@/executor/utils/http'
|
import { buildAPIUrl, extractAPIErrorMessage } from '@/executor/utils/http'
|
||||||
import { isJSONString, parseJSON, stringifyJSON } from '@/executor/utils/json'
|
import { isJSONString, parseJSON, stringifyJSON } from '@/executor/utils/json'
|
||||||
import { validateModelProvider } from '@/executor/utils/permission-check'
|
import { validateModelProvider } from '@/executor/utils/permission-check'
|
||||||
import { calculateCost, getProviderFromModel } from '@/providers/utils'
|
import { calculateCost, getProviderFromModel } from '@/providers/utils'
|
||||||
@@ -143,7 +143,9 @@ export class EvaluatorBlockHandler implements BlockHandler {
|
|||||||
|
|
||||||
const response = await fetch(url.toString(), {
|
const response = await fetch(url.toString(), {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: await buildAuthHeaders(),
|
headers: {
|
||||||
|
'Content-Type': HTTP.CONTENT_TYPE.JSON,
|
||||||
|
},
|
||||||
body: stringifyJSON(providerRequest),
|
body: stringifyJSON(providerRequest),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -9,12 +9,12 @@ import type { BlockOutput } from '@/blocks/types'
|
|||||||
import {
|
import {
|
||||||
BlockType,
|
BlockType,
|
||||||
DEFAULTS,
|
DEFAULTS,
|
||||||
|
HTTP,
|
||||||
isAgentBlockType,
|
isAgentBlockType,
|
||||||
isRouterV2BlockType,
|
isRouterV2BlockType,
|
||||||
ROUTER,
|
ROUTER,
|
||||||
} from '@/executor/constants'
|
} from '@/executor/constants'
|
||||||
import type { BlockHandler, ExecutionContext } from '@/executor/types'
|
import type { BlockHandler, ExecutionContext } from '@/executor/types'
|
||||||
import { buildAuthHeaders } from '@/executor/utils/http'
|
|
||||||
import { validateModelProvider } from '@/executor/utils/permission-check'
|
import { validateModelProvider } from '@/executor/utils/permission-check'
|
||||||
import { calculateCost, getProviderFromModel } from '@/providers/utils'
|
import { calculateCost, getProviderFromModel } from '@/providers/utils'
|
||||||
import type { SerializedBlock } from '@/serializer/types'
|
import type { SerializedBlock } from '@/serializer/types'
|
||||||
@@ -118,7 +118,9 @@ export class RouterBlockHandler implements BlockHandler {
|
|||||||
|
|
||||||
const response = await fetch(url.toString(), {
|
const response = await fetch(url.toString(), {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: await buildAuthHeaders(),
|
headers: {
|
||||||
|
'Content-Type': HTTP.CONTENT_TYPE.JSON,
|
||||||
|
},
|
||||||
body: JSON.stringify(providerRequest),
|
body: JSON.stringify(providerRequest),
|
||||||
})
|
})
|
||||||
|
|
||||||
@@ -275,7 +277,9 @@ export class RouterBlockHandler implements BlockHandler {
|
|||||||
|
|
||||||
const response = await fetch(url.toString(), {
|
const response = await fetch(url.toString(), {
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: await buildAuthHeaders(),
|
headers: {
|
||||||
|
'Content-Type': HTTP.CONTENT_TYPE.JSON,
|
||||||
|
},
|
||||||
body: JSON.stringify(providerRequest),
|
body: JSON.stringify(providerRequest),
|
||||||
})
|
})
|
||||||
|
|
||||||
|
|||||||
@@ -680,10 +680,6 @@ export function useCollaborativeWorkflow() {
|
|||||||
previousPositions?: Map<string, { x: number; y: number; parentId?: string }>
|
previousPositions?: Map<string, { x: number; y: number; parentId?: string }>
|
||||||
}
|
}
|
||||||
) => {
|
) => {
|
||||||
if (isBaselineDiffView) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!isInActiveRoom()) {
|
if (!isInActiveRoom()) {
|
||||||
logger.debug('Skipping batch position update - not in active workflow')
|
logger.debug('Skipping batch position update - not in active workflow')
|
||||||
return
|
return
|
||||||
@@ -729,7 +725,7 @@ export function useCollaborativeWorkflow() {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
},
|
},
|
||||||
[isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
|
[addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
|
||||||
)
|
)
|
||||||
|
|
||||||
const collaborativeUpdateBlockName = useCallback(
|
const collaborativeUpdateBlockName = useCallback(
|
||||||
@@ -821,10 +817,6 @@ export function useCollaborativeWorkflow() {
|
|||||||
|
|
||||||
const collaborativeBatchToggleBlockEnabled = useCallback(
|
const collaborativeBatchToggleBlockEnabled = useCallback(
|
||||||
(ids: string[]) => {
|
(ids: string[]) => {
|
||||||
if (isBaselineDiffView) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ids.length === 0) return
|
if (ids.length === 0) return
|
||||||
|
|
||||||
const previousStates: Record<string, boolean> = {}
|
const previousStates: Record<string, boolean> = {}
|
||||||
@@ -857,7 +849,7 @@ export function useCollaborativeWorkflow() {
|
|||||||
|
|
||||||
undoRedo.recordBatchToggleEnabled(validIds, previousStates)
|
undoRedo.recordBatchToggleEnabled(validIds, previousStates)
|
||||||
},
|
},
|
||||||
[isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
|
[addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
|
||||||
)
|
)
|
||||||
|
|
||||||
const collaborativeBatchUpdateParent = useCallback(
|
const collaborativeBatchUpdateParent = useCallback(
|
||||||
@@ -869,10 +861,6 @@ export function useCollaborativeWorkflow() {
|
|||||||
affectedEdges: Edge[]
|
affectedEdges: Edge[]
|
||||||
}>
|
}>
|
||||||
) => {
|
) => {
|
||||||
if (isBaselineDiffView) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!isInActiveRoom()) {
|
if (!isInActiveRoom()) {
|
||||||
logger.debug('Skipping batch update parent - not in active workflow')
|
logger.debug('Skipping batch update parent - not in active workflow')
|
||||||
return
|
return
|
||||||
@@ -943,7 +931,7 @@ export function useCollaborativeWorkflow() {
|
|||||||
|
|
||||||
logger.debug('Batch updated parent for blocks', { updateCount: updates.length })
|
logger.debug('Batch updated parent for blocks', { updateCount: updates.length })
|
||||||
},
|
},
|
||||||
[isBaselineDiffView, isInActiveRoom, undoRedo, addToQueue, activeWorkflowId, session?.user?.id]
|
[isInActiveRoom, undoRedo, addToQueue, activeWorkflowId, session?.user?.id]
|
||||||
)
|
)
|
||||||
|
|
||||||
const collaborativeToggleBlockAdvancedMode = useCallback(
|
const collaborativeToggleBlockAdvancedMode = useCallback(
|
||||||
@@ -963,37 +951,18 @@ export function useCollaborativeWorkflow() {
|
|||||||
|
|
||||||
const collaborativeSetBlockCanonicalMode = useCallback(
|
const collaborativeSetBlockCanonicalMode = useCallback(
|
||||||
(id: string, canonicalId: string, canonicalMode: 'basic' | 'advanced') => {
|
(id: string, canonicalId: string, canonicalMode: 'basic' | 'advanced') => {
|
||||||
if (isBaselineDiffView) {
|
executeQueuedOperation(
|
||||||
return
|
BLOCK_OPERATIONS.UPDATE_CANONICAL_MODE,
|
||||||
}
|
OPERATION_TARGETS.BLOCK,
|
||||||
|
{ id, canonicalId, canonicalMode },
|
||||||
useWorkflowStore.getState().setBlockCanonicalMode(id, canonicalId, canonicalMode)
|
() => useWorkflowStore.getState().setBlockCanonicalMode(id, canonicalId, canonicalMode)
|
||||||
|
)
|
||||||
if (!activeWorkflowId) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
const operationId = crypto.randomUUID()
|
|
||||||
addToQueue({
|
|
||||||
id: operationId,
|
|
||||||
operation: {
|
|
||||||
operation: BLOCK_OPERATIONS.UPDATE_CANONICAL_MODE,
|
|
||||||
target: OPERATION_TARGETS.BLOCK,
|
|
||||||
payload: { id, canonicalId, canonicalMode },
|
|
||||||
},
|
|
||||||
workflowId: activeWorkflowId,
|
|
||||||
userId: session?.user?.id || 'unknown',
|
|
||||||
})
|
|
||||||
},
|
},
|
||||||
[isBaselineDiffView, activeWorkflowId, addToQueue, session?.user?.id]
|
[executeQueuedOperation]
|
||||||
)
|
)
|
||||||
|
|
||||||
const collaborativeBatchToggleBlockHandles = useCallback(
|
const collaborativeBatchToggleBlockHandles = useCallback(
|
||||||
(ids: string[]) => {
|
(ids: string[]) => {
|
||||||
if (isBaselineDiffView) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if (ids.length === 0) return
|
if (ids.length === 0) return
|
||||||
|
|
||||||
const previousStates: Record<string, boolean> = {}
|
const previousStates: Record<string, boolean> = {}
|
||||||
@@ -1026,15 +995,11 @@ export function useCollaborativeWorkflow() {
|
|||||||
|
|
||||||
undoRedo.recordBatchToggleHandles(validIds, previousStates)
|
undoRedo.recordBatchToggleHandles(validIds, previousStates)
|
||||||
},
|
},
|
||||||
[isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
|
[addToQueue, activeWorkflowId, session?.user?.id, undoRedo]
|
||||||
)
|
)
|
||||||
|
|
||||||
const collaborativeBatchAddEdges = useCallback(
|
const collaborativeBatchAddEdges = useCallback(
|
||||||
(edges: Edge[], options?: { skipUndoRedo?: boolean }) => {
|
(edges: Edge[], options?: { skipUndoRedo?: boolean }) => {
|
||||||
if (isBaselineDiffView) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!isInActiveRoom()) {
|
if (!isInActiveRoom()) {
|
||||||
logger.debug('Skipping batch add edges - not in active workflow')
|
logger.debug('Skipping batch add edges - not in active workflow')
|
||||||
return false
|
return false
|
||||||
@@ -1070,15 +1035,11 @@ export function useCollaborativeWorkflow() {
|
|||||||
|
|
||||||
return true
|
return true
|
||||||
},
|
},
|
||||||
[isBaselineDiffView, addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
|
[addToQueue, activeWorkflowId, session?.user?.id, isInActiveRoom, undoRedo]
|
||||||
)
|
)
|
||||||
|
|
||||||
const collaborativeBatchRemoveEdges = useCallback(
|
const collaborativeBatchRemoveEdges = useCallback(
|
||||||
(edgeIds: string[], options?: { skipUndoRedo?: boolean }) => {
|
(edgeIds: string[], options?: { skipUndoRedo?: boolean }) => {
|
||||||
if (isBaselineDiffView) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!isInActiveRoom()) {
|
if (!isInActiveRoom()) {
|
||||||
logger.debug('Skipping batch remove edges - not in active workflow')
|
logger.debug('Skipping batch remove edges - not in active workflow')
|
||||||
return false
|
return false
|
||||||
@@ -1128,7 +1089,7 @@ export function useCollaborativeWorkflow() {
|
|||||||
logger.info('Batch removed edges', { count: validEdgeIds.length })
|
logger.info('Batch removed edges', { count: validEdgeIds.length })
|
||||||
return true
|
return true
|
||||||
},
|
},
|
||||||
[isBaselineDiffView, isInActiveRoom, addToQueue, activeWorkflowId, session, undoRedo]
|
[isInActiveRoom, addToQueue, activeWorkflowId, session, undoRedo]
|
||||||
)
|
)
|
||||||
|
|
||||||
const collaborativeSetSubblockValue = useCallback(
|
const collaborativeSetSubblockValue = useCallback(
|
||||||
@@ -1204,10 +1165,6 @@ export function useCollaborativeWorkflow() {
|
|||||||
(blockId: string, subblockId: string, value: any) => {
|
(blockId: string, subblockId: string, value: any) => {
|
||||||
if (isApplyingRemoteChange.current) return
|
if (isApplyingRemoteChange.current) return
|
||||||
|
|
||||||
if (isBaselineDiffView) {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!isInActiveRoom()) {
|
if (!isInActiveRoom()) {
|
||||||
logger.debug('Skipping tag selection - not in active workflow', {
|
logger.debug('Skipping tag selection - not in active workflow', {
|
||||||
currentWorkflowId,
|
currentWorkflowId,
|
||||||
@@ -1235,14 +1192,7 @@ export function useCollaborativeWorkflow() {
|
|||||||
userId: session?.user?.id || 'unknown',
|
userId: session?.user?.id || 'unknown',
|
||||||
})
|
})
|
||||||
},
|
},
|
||||||
[
|
[addToQueue, currentWorkflowId, activeWorkflowId, session?.user?.id, isInActiveRoom]
|
||||||
isBaselineDiffView,
|
|
||||||
addToQueue,
|
|
||||||
currentWorkflowId,
|
|
||||||
activeWorkflowId,
|
|
||||||
session?.user?.id,
|
|
||||||
isInActiveRoom,
|
|
||||||
]
|
|
||||||
)
|
)
|
||||||
|
|
||||||
const collaborativeUpdateLoopType = useCallback(
|
const collaborativeUpdateLoopType = useCallback(
|
||||||
@@ -1588,10 +1538,6 @@ export function useCollaborativeWorkflow() {
|
|||||||
|
|
||||||
const collaborativeBatchRemoveBlocks = useCallback(
|
const collaborativeBatchRemoveBlocks = useCallback(
|
||||||
(blockIds: string[], options?: { skipUndoRedo?: boolean }) => {
|
(blockIds: string[], options?: { skipUndoRedo?: boolean }) => {
|
||||||
if (isBaselineDiffView) {
|
|
||||||
return false
|
|
||||||
}
|
|
||||||
|
|
||||||
if (!isInActiveRoom()) {
|
if (!isInActiveRoom()) {
|
||||||
logger.debug('Skipping batch remove blocks - not in active workflow')
|
logger.debug('Skipping batch remove blocks - not in active workflow')
|
||||||
return false
|
return false
|
||||||
@@ -1673,7 +1619,6 @@ export function useCollaborativeWorkflow() {
|
|||||||
return true
|
return true
|
||||||
},
|
},
|
||||||
[
|
[
|
||||||
isBaselineDiffView,
|
|
||||||
addToQueue,
|
addToQueue,
|
||||||
activeWorkflowId,
|
activeWorkflowId,
|
||||||
session?.user?.id,
|
session?.user?.id,
|
||||||
|
|||||||
@@ -11,10 +11,9 @@ import { extractAndPersistCustomTools } from '@/lib/workflows/persistence/custom
|
|||||||
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
|
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/persistence/utils'
|
||||||
import { isValidKey } from '@/lib/workflows/sanitization/key-validation'
|
import { isValidKey } from '@/lib/workflows/sanitization/key-validation'
|
||||||
import { validateWorkflowState } from '@/lib/workflows/sanitization/validation'
|
import { validateWorkflowState } from '@/lib/workflows/sanitization/validation'
|
||||||
import { buildCanonicalIndex, isCanonicalPair } from '@/lib/workflows/subblocks/visibility'
|
|
||||||
import { TriggerUtils } from '@/lib/workflows/triggers/triggers'
|
import { TriggerUtils } from '@/lib/workflows/triggers/triggers'
|
||||||
import { getAllBlocks, getBlock } from '@/blocks/registry'
|
import { getAllBlocks, getBlock } from '@/blocks/registry'
|
||||||
import type { BlockConfig, SubBlockConfig } from '@/blocks/types'
|
import type { SubBlockConfig } from '@/blocks/types'
|
||||||
import { EDGE, normalizeName, RESERVED_BLOCK_NAMES } from '@/executor/constants'
|
import { EDGE, normalizeName, RESERVED_BLOCK_NAMES } from '@/executor/constants'
|
||||||
import { getUserPermissionConfig } from '@/executor/utils/permission-check'
|
import { getUserPermissionConfig } from '@/executor/utils/permission-check'
|
||||||
import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils'
|
import { generateLoopBlocks, generateParallelBlocks } from '@/stores/workflows/workflow/utils'
|
||||||
@@ -668,47 +667,11 @@ function createBlockFromParams(
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
if (validatedInputs) {
|
|
||||||
updateCanonicalModesForInputs(blockState, Object.keys(validatedInputs), blockConfig)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
return blockState
|
return blockState
|
||||||
}
|
}
|
||||||
|
|
||||||
function updateCanonicalModesForInputs(
|
|
||||||
block: { data?: { canonicalModes?: Record<string, 'basic' | 'advanced'> } },
|
|
||||||
inputKeys: string[],
|
|
||||||
blockConfig: BlockConfig
|
|
||||||
): void {
|
|
||||||
if (!blockConfig.subBlocks?.length) return
|
|
||||||
|
|
||||||
const canonicalIndex = buildCanonicalIndex(blockConfig.subBlocks)
|
|
||||||
const canonicalModeUpdates: Record<string, 'basic' | 'advanced'> = {}
|
|
||||||
|
|
||||||
for (const inputKey of inputKeys) {
|
|
||||||
const canonicalId = canonicalIndex.canonicalIdBySubBlockId[inputKey]
|
|
||||||
if (!canonicalId) continue
|
|
||||||
|
|
||||||
const group = canonicalIndex.groupsById[canonicalId]
|
|
||||||
if (!group || !isCanonicalPair(group)) continue
|
|
||||||
|
|
||||||
const isAdvanced = group.advancedIds.includes(inputKey)
|
|
||||||
const existingMode = canonicalModeUpdates[canonicalId]
|
|
||||||
|
|
||||||
if (!existingMode || isAdvanced) {
|
|
||||||
canonicalModeUpdates[canonicalId] = isAdvanced ? 'advanced' : 'basic'
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
if (Object.keys(canonicalModeUpdates).length > 0) {
|
|
||||||
if (!block.data) block.data = {}
|
|
||||||
if (!block.data.canonicalModes) block.data.canonicalModes = {}
|
|
||||||
Object.assign(block.data.canonicalModes, canonicalModeUpdates)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Normalize tools array by adding back fields that were sanitized for training
|
* Normalize tools array by adding back fields that were sanitized for training
|
||||||
*/
|
*/
|
||||||
@@ -1691,15 +1654,6 @@ function applyOperationsToWorkflowState(
|
|||||||
block.data.collection = params.inputs.collection
|
block.data.collection = params.inputs.collection
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
const editBlockConfig = getBlock(block.type)
|
|
||||||
if (editBlockConfig) {
|
|
||||||
updateCanonicalModesForInputs(
|
|
||||||
block,
|
|
||||||
Object.keys(validationResult.validInputs),
|
|
||||||
editBlockConfig
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Update basic properties
|
// Update basic properties
|
||||||
@@ -2302,15 +2256,6 @@ function applyOperationsToWorkflowState(
|
|||||||
existingBlock.subBlocks[key].value = sanitizedValue
|
existingBlock.subBlocks[key].value = sanitizedValue
|
||||||
}
|
}
|
||||||
})
|
})
|
||||||
|
|
||||||
const existingBlockConfig = getBlock(existingBlock.type)
|
|
||||||
if (existingBlockConfig) {
|
|
||||||
updateCanonicalModesForInputs(
|
|
||||||
existingBlock,
|
|
||||||
Object.keys(validationResult.validInputs),
|
|
||||||
existingBlockConfig
|
|
||||||
)
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
// Special container types (loop, parallel) are not in the block registry but are valid
|
// Special container types (loop, parallel) are not in the block registry but are valid
|
||||||
|
|||||||
@@ -24,7 +24,7 @@
|
|||||||
},
|
},
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@a2a-js/sdk": "0.3.7",
|
"@a2a-js/sdk": "0.3.7",
|
||||||
"@anthropic-ai/sdk": "0.71.2",
|
"@anthropic-ai/sdk": "^0.39.0",
|
||||||
"@aws-sdk/client-bedrock-runtime": "3.940.0",
|
"@aws-sdk/client-bedrock-runtime": "3.940.0",
|
||||||
"@aws-sdk/client-dynamodb": "3.940.0",
|
"@aws-sdk/client-dynamodb": "3.940.0",
|
||||||
"@aws-sdk/client-rds-data": "3.940.0",
|
"@aws-sdk/client-rds-data": "3.940.0",
|
||||||
|
|||||||
@@ -1,5 +1,4 @@
|
|||||||
import Anthropic from '@anthropic-ai/sdk'
|
import Anthropic from '@anthropic-ai/sdk'
|
||||||
import { transformJSONSchema } from '@anthropic-ai/sdk/lib/transform-json-schema'
|
|
||||||
import { createLogger } from '@sim/logger'
|
import { createLogger } from '@sim/logger'
|
||||||
import type { StreamingExecution } from '@/executor/types'
|
import type { StreamingExecution } from '@/executor/types'
|
||||||
import { MAX_TOOL_ITERATIONS } from '@/providers'
|
import { MAX_TOOL_ITERATIONS } from '@/providers'
|
||||||
@@ -186,10 +185,13 @@ export const anthropicProvider: ProviderConfig = {
|
|||||||
const schema = request.responseFormat.schema || request.responseFormat
|
const schema = request.responseFormat.schema || request.responseFormat
|
||||||
|
|
||||||
if (useNativeStructuredOutputs) {
|
if (useNativeStructuredOutputs) {
|
||||||
const transformedSchema = transformJSONSchema(schema)
|
const schemaWithConstraints = {
|
||||||
|
...schema,
|
||||||
|
additionalProperties: false,
|
||||||
|
}
|
||||||
payload.output_format = {
|
payload.output_format = {
|
||||||
type: 'json_schema',
|
type: 'json_schema',
|
||||||
schema: transformedSchema,
|
schema: schemaWithConstraints,
|
||||||
}
|
}
|
||||||
logger.info(`Using native structured outputs for model: ${modelId}`)
|
logger.info(`Using native structured outputs for model: ${modelId}`)
|
||||||
} else {
|
} else {
|
||||||
|
|||||||
@@ -27,9 +27,6 @@ export function registerEmitFunctions(
|
|||||||
emitSubblockUpdate = subblockEmit
|
emitSubblockUpdate = subblockEmit
|
||||||
emitVariableUpdate = variableEmit
|
emitVariableUpdate = variableEmit
|
||||||
currentRegisteredWorkflowId = workflowId
|
currentRegisteredWorkflowId = workflowId
|
||||||
if (workflowId) {
|
|
||||||
useOperationQueueStore.getState().processNextOperation()
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
let currentRegisteredWorkflowId: string | null = null
|
let currentRegisteredWorkflowId: string | null = null
|
||||||
@@ -265,14 +262,16 @@ export const useOperationQueueStore = create<OperationQueueState>((set, get) =>
|
|||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if (!currentRegisteredWorkflowId) {
|
const nextOperation = currentRegisteredWorkflowId
|
||||||
|
? state.operations.find(
|
||||||
|
(op) => op.status === 'pending' && op.workflowId === currentRegisteredWorkflowId
|
||||||
|
)
|
||||||
|
: state.operations.find((op) => op.status === 'pending')
|
||||||
|
if (!nextOperation) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
const nextOperation = state.operations.find(
|
if (currentRegisteredWorkflowId && nextOperation.workflowId !== currentRegisteredWorkflowId) {
|
||||||
(op) => op.status === 'pending' && op.workflowId === currentRegisteredWorkflowId
|
|
||||||
)
|
|
||||||
if (!nextOperation) {
|
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|||||||
@@ -38,12 +38,11 @@ export const storageUploadTool: ToolConfig<
|
|||||||
visibility: 'user-or-llm',
|
visibility: 'user-or-llm',
|
||||||
description: 'Optional folder path (e.g., "folder/subfolder/")',
|
description: 'Optional folder path (e.g., "folder/subfolder/")',
|
||||||
},
|
},
|
||||||
fileData: {
|
fileContent: {
|
||||||
type: 'json',
|
type: 'string',
|
||||||
required: true,
|
required: true,
|
||||||
visibility: 'user-or-llm',
|
visibility: 'user-or-llm',
|
||||||
description:
|
description: 'The file content (base64 encoded for binary files, or plain text)',
|
||||||
'File to upload - UserFile object (basic mode) or string content (advanced mode: base64 or plain text). Supports data URLs.',
|
|
||||||
},
|
},
|
||||||
contentType: {
|
contentType: {
|
||||||
type: 'string',
|
type: 'string',
|
||||||
@@ -66,28 +65,65 @@ export const storageUploadTool: ToolConfig<
|
|||||||
},
|
},
|
||||||
|
|
||||||
request: {
|
request: {
|
||||||
url: '/api/tools/supabase/storage-upload',
|
url: (params) => {
|
||||||
|
// Combine folder path and fileName, ensuring proper formatting
|
||||||
|
let fullPath = params.fileName
|
||||||
|
if (params.path) {
|
||||||
|
// Ensure path ends with / and doesn't have double slashes
|
||||||
|
const folderPath = params.path.endsWith('/') ? params.path : `${params.path}/`
|
||||||
|
fullPath = `${folderPath}${params.fileName}`
|
||||||
|
}
|
||||||
|
return `https://${params.projectId}.supabase.co/storage/v1/object/${params.bucket}/${fullPath}`
|
||||||
|
},
|
||||||
method: 'POST',
|
method: 'POST',
|
||||||
headers: () => ({
|
headers: (params) => {
|
||||||
'Content-Type': 'application/json',
|
const headers: Record<string, string> = {
|
||||||
}),
|
apikey: params.apiKey,
|
||||||
body: (params) => ({
|
Authorization: `Bearer ${params.apiKey}`,
|
||||||
projectId: params.projectId,
|
}
|
||||||
apiKey: params.apiKey,
|
|
||||||
bucket: params.bucket,
|
if (params.contentType) {
|
||||||
fileName: params.fileName,
|
headers['Content-Type'] = params.contentType
|
||||||
path: params.path,
|
}
|
||||||
fileData: params.fileData,
|
|
||||||
contentType: params.contentType,
|
if (params.upsert) {
|
||||||
upsert: params.upsert,
|
headers['x-upsert'] = 'true'
|
||||||
}),
|
}
|
||||||
|
|
||||||
|
return headers
|
||||||
|
},
|
||||||
|
body: (params) => {
|
||||||
|
// Return the file content wrapped in an object
|
||||||
|
// The actual upload will need to handle this appropriately
|
||||||
|
return {
|
||||||
|
content: params.fileContent,
|
||||||
|
}
|
||||||
|
},
|
||||||
|
},
|
||||||
|
|
||||||
|
transformResponse: async (response: Response) => {
|
||||||
|
let data
|
||||||
|
try {
|
||||||
|
data = await response.json()
|
||||||
|
} catch (parseError) {
|
||||||
|
throw new Error(`Failed to parse Supabase storage upload response: ${parseError}`)
|
||||||
|
}
|
||||||
|
|
||||||
|
return {
|
||||||
|
success: true,
|
||||||
|
output: {
|
||||||
|
message: 'Successfully uploaded file to storage',
|
||||||
|
results: data,
|
||||||
|
},
|
||||||
|
error: undefined,
|
||||||
|
}
|
||||||
},
|
},
|
||||||
|
|
||||||
outputs: {
|
outputs: {
|
||||||
message: { type: 'string', description: 'Operation status message' },
|
message: { type: 'string', description: 'Operation status message' },
|
||||||
results: {
|
results: {
|
||||||
type: 'object',
|
type: 'object',
|
||||||
description: 'Upload result including file path, bucket, and public URL',
|
description: 'Upload result including file path and metadata',
|
||||||
},
|
},
|
||||||
},
|
},
|
||||||
}
|
}
|
||||||
|
|||||||
@@ -136,7 +136,7 @@ export interface SupabaseStorageUploadParams {
|
|||||||
bucket: string
|
bucket: string
|
||||||
fileName: string
|
fileName: string
|
||||||
path?: string
|
path?: string
|
||||||
fileData: any // UserFile object (basic mode) or string (advanced mode: base64/plain text)
|
fileContent: string
|
||||||
contentType?: string
|
contentType?: string
|
||||||
upsert?: boolean
|
upsert?: boolean
|
||||||
}
|
}
|
||||||
|
|||||||
37
bun.lock
37
bun.lock
@@ -1,5 +1,6 @@
|
|||||||
{
|
{
|
||||||
"lockfileVersion": 1,
|
"lockfileVersion": 1,
|
||||||
|
"configVersion": 0,
|
||||||
"workspaces": {
|
"workspaces": {
|
||||||
"": {
|
"": {
|
||||||
"name": "simstudio",
|
"name": "simstudio",
|
||||||
@@ -54,7 +55,7 @@
|
|||||||
"version": "0.1.0",
|
"version": "0.1.0",
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@a2a-js/sdk": "0.3.7",
|
"@a2a-js/sdk": "0.3.7",
|
||||||
"@anthropic-ai/sdk": "0.71.2",
|
"@anthropic-ai/sdk": "^0.39.0",
|
||||||
"@aws-sdk/client-bedrock-runtime": "3.940.0",
|
"@aws-sdk/client-bedrock-runtime": "3.940.0",
|
||||||
"@aws-sdk/client-dynamodb": "3.940.0",
|
"@aws-sdk/client-dynamodb": "3.940.0",
|
||||||
"@aws-sdk/client-rds-data": "3.940.0",
|
"@aws-sdk/client-rds-data": "3.940.0",
|
||||||
@@ -362,7 +363,7 @@
|
|||||||
|
|
||||||
"@ampproject/remapping": ["@ampproject/remapping@2.3.0", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw=="],
|
"@ampproject/remapping": ["@ampproject/remapping@2.3.0", "", { "dependencies": { "@jridgewell/gen-mapping": "^0.3.5", "@jridgewell/trace-mapping": "^0.3.24" } }, "sha512-30iZtAPgz+LTIYoeivqYo853f02jBYSd5uGnGpkFV0M3xOt9aN73erkgYAmZU43x4VfqcnLxW9Kpg3R5LC4YYw=="],
|
||||||
|
|
||||||
"@anthropic-ai/sdk": ["@anthropic-ai/sdk@0.71.2", "", { "dependencies": { "json-schema-to-ts": "^3.1.1" }, "peerDependencies": { "zod": "^3.25.0 || ^4.0.0" }, "optionalPeers": ["zod"], "bin": { "anthropic-ai-sdk": "bin/cli" } }, "sha512-TGNDEUuEstk/DKu0/TflXAEt+p+p/WhTlFzEnoosvbaDU2LTjm42igSdlL0VijrKpWejtOKxX0b8A7uc+XiSAQ=="],
|
"@anthropic-ai/sdk": ["@anthropic-ai/sdk@0.39.0", "", { "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", "node-fetch": "^2.6.7" } }, "sha512-eMyDIPRZbt1CCLErRCi3exlAvNkBtRe+kW5vvJyef93PmNr/clstYgHhtvmkxN82nlKgzyGPCyGxrm0JQ1ZIdg=="],
|
||||||
|
|
||||||
"@ark/schema": ["@ark/schema@0.56.0", "", { "dependencies": { "@ark/util": "0.56.0" } }, "sha512-ECg3hox/6Z/nLajxXqNhgPtNdHWC9zNsDyskwO28WinoFEnWow4IsERNz9AnXRhTZJnYIlAJ4uGn3nlLk65vZA=="],
|
"@ark/schema": ["@ark/schema@0.56.0", "", { "dependencies": { "@ark/util": "0.56.0" } }, "sha512-ECg3hox/6Z/nLajxXqNhgPtNdHWC9zNsDyskwO28WinoFEnWow4IsERNz9AnXRhTZJnYIlAJ4uGn3nlLk65vZA=="],
|
||||||
|
|
||||||
@@ -546,8 +547,6 @@
|
|||||||
|
|
||||||
"@babel/plugin-transform-react-jsx-source": ["@babel/plugin-transform-react-jsx-source@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw=="],
|
"@babel/plugin-transform-react-jsx-source": ["@babel/plugin-transform-react-jsx-source@7.27.1", "", { "dependencies": { "@babel/helper-plugin-utils": "^7.27.1" }, "peerDependencies": { "@babel/core": "^7.0.0-0" } }, "sha512-zbwoTsBruTeKB9hSq73ha66iFeJHuaFkUbwvqElnygoNbj/jHRsSeokowZFN3CZ64IvEqcmmkVe89OPXc7ldAw=="],
|
||||||
|
|
||||||
"@babel/runtime": ["@babel/runtime@7.28.6", "", {}, "sha512-05WQkdpL9COIMz4LjTxGpPNCdlpyimKppYNoJ5Di5EUObifl8t4tuLuUBBZEpoLYOmfvIWrsp9fCl0HoPRVTdA=="],
|
|
||||||
|
|
||||||
"@babel/template": ["@babel/template@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/parser": "^7.28.6", "@babel/types": "^7.28.6" } }, "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ=="],
|
"@babel/template": ["@babel/template@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/parser": "^7.28.6", "@babel/types": "^7.28.6" } }, "sha512-YA6Ma2KsCdGb+WC6UpBVFJGXL58MDA6oyONbjyF/+5sBgxY/dwkhLogbMT2GXXyU84/IhRw/2D1Os1B/giz+BQ=="],
|
||||||
|
|
||||||
"@babel/traverse": ["@babel/traverse@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/generator": "^7.28.6", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.28.6", "@babel/template": "^7.28.6", "@babel/types": "^7.28.6", "debug": "^4.3.1" } }, "sha512-fgWX62k02qtjqdSNTAGxmKYY/7FSL9WAS1o2Hu5+I5m9T0yxZzr4cnrfXQ/MX0rIifthCSs6FKTlzYbJcPtMNg=="],
|
"@babel/traverse": ["@babel/traverse@7.28.6", "", { "dependencies": { "@babel/code-frame": "^7.28.6", "@babel/generator": "^7.28.6", "@babel/helper-globals": "^7.28.0", "@babel/parser": "^7.28.6", "@babel/template": "^7.28.6", "@babel/types": "^7.28.6", "debug": "^4.3.1" } }, "sha512-fgWX62k02qtjqdSNTAGxmKYY/7FSL9WAS1o2Hu5+I5m9T0yxZzr4cnrfXQ/MX0rIifthCSs6FKTlzYbJcPtMNg=="],
|
||||||
@@ -2444,8 +2443,6 @@
|
|||||||
|
|
||||||
"json-schema": ["json-schema@0.4.0", "", {}, "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA=="],
|
"json-schema": ["json-schema@0.4.0", "", {}, "sha512-es94M3nTIfsEPisRafak+HDLfHXnKBhV3vU5eqPcS3flIWqcxJWgXHXiey3YrpaNsanY5ei1VoYEbOzijuq9BA=="],
|
||||||
|
|
||||||
"json-schema-to-ts": ["json-schema-to-ts@3.1.1", "", { "dependencies": { "@babel/runtime": "^7.18.3", "ts-algebra": "^2.0.0" } }, "sha512-+DWg8jCJG2TEnpy7kOm/7/AxaYoaRbjVB4LFZLySZlWn8exGs3A4OLJR966cVvU26N7X9TWxl+Jsw7dzAqKT6g=="],
|
|
||||||
|
|
||||||
"json-schema-traverse": ["json-schema-traverse@0.4.1", "", {}, "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="],
|
"json-schema-traverse": ["json-schema-traverse@0.4.1", "", {}, "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg=="],
|
||||||
|
|
||||||
"json5": ["json5@2.2.3", "", { "bin": { "json5": "lib/cli.js" } }, "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="],
|
"json5": ["json5@2.2.3", "", { "bin": { "json5": "lib/cli.js" } }, "sha512-XmOWe7eyHYH14cLdVPoyg+GOH3rYX++KpzrylJwSW98t3Nk+U8XOl8FWKOgwtzdb8lXGf6zYwDUzeHMWfxasyg=="],
|
||||||
@@ -3390,8 +3387,6 @@
|
|||||||
|
|
||||||
"trough": ["trough@2.2.0", "", {}, "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw=="],
|
"trough": ["trough@2.2.0", "", {}, "sha512-tmMpK00BjZiUyVyvrBK7knerNgmgvcV/KLVyuma/SC+TQN167GrMRciANTz09+k3zW8L8t60jWO1GpfkZdjTaw=="],
|
||||||
|
|
||||||
"ts-algebra": ["ts-algebra@2.0.0", "", {}, "sha512-FPAhNPFMrkwz76P7cdjdmiShwMynZYN6SgOujD1urY4oNm80Ou9oMdmbR45LotcKOXoy7wSmHkRFE6Mxbrhefw=="],
|
|
||||||
|
|
||||||
"ts-interface-checker": ["ts-interface-checker@0.1.13", "", {}, "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA=="],
|
"ts-interface-checker": ["ts-interface-checker@0.1.13", "", {}, "sha512-Y/arvbn+rrz3JCKl9C4kVNfTfSm2/mEp5FSz5EsZSANGPSlQrpRI5M4PKF+mJnE52jOO90PnPSc3Ur3bTQw0gA=="],
|
||||||
|
|
||||||
"tsafe": ["tsafe@1.8.12", "", {}, "sha512-nFRqW0ttu/2o6XTXsHiVZWJBCOaxhVqZLg7dgs3coZNsCMPXPfwz+zPHAQA+70fNnVJLAPg1EgGIqK9Q84tvAw=="],
|
"tsafe": ["tsafe@1.8.12", "", {}, "sha512-nFRqW0ttu/2o6XTXsHiVZWJBCOaxhVqZLg7dgs3coZNsCMPXPfwz+zPHAQA+70fNnVJLAPg1EgGIqK9Q84tvAw=="],
|
||||||
@@ -3598,6 +3593,10 @@
|
|||||||
|
|
||||||
"zwitch": ["zwitch@2.0.4", "", {}, "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A=="],
|
"zwitch": ["zwitch@2.0.4", "", {}, "sha512-bXE4cR/kVZhKZX/RjPEflHaKVhUVl85noU3v6b8apfQEc1x4A+zBxjZ4lN8LqGd6WZ3dl98pY4o717VFmoPp+A=="],
|
||||||
|
|
||||||
|
"@anthropic-ai/sdk/@types/node": ["@types/node@18.19.130", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg=="],
|
||||||
|
|
||||||
|
"@anthropic-ai/sdk/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
|
||||||
|
|
||||||
"@asamuzakjp/css-color/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="],
|
"@asamuzakjp/css-color/lru-cache": ["lru-cache@10.4.3", "", {}, "sha512-JNAzZcXrCt42VGLuYz0zfAzDfAvJWW6AfYlDBQyDV5DClI2m5sAmK+OIO7s59XfsRsWHp02jAJrRadPRGTt6SQ=="],
|
||||||
|
|
||||||
"@aws-crypto/crc32/@aws-sdk/types": ["@aws-sdk/types@3.969.0", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-7IIzM5TdiXn+VtgPdVLjmE6uUBUtnga0f4RiSEI1WW10RPuNvZ9U+pL3SwDiRDAdoGrOF9tSLJOFZmfuwYuVYQ=="],
|
"@aws-crypto/crc32/@aws-sdk/types": ["@aws-sdk/types@3.969.0", "", { "dependencies": { "@smithy/types": "^4.12.0", "tslib": "^2.6.2" } }, "sha512-7IIzM5TdiXn+VtgPdVLjmE6uUBUtnga0f4RiSEI1WW10RPuNvZ9U+pL3SwDiRDAdoGrOF9tSLJOFZmfuwYuVYQ=="],
|
||||||
@@ -3714,8 +3713,6 @@
|
|||||||
|
|
||||||
"@browserbasehq/sdk/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
|
"@browserbasehq/sdk/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
|
||||||
|
|
||||||
"@browserbasehq/stagehand/@anthropic-ai/sdk": ["@anthropic-ai/sdk@0.39.0", "", { "dependencies": { "@types/node": "^18.11.18", "@types/node-fetch": "^2.6.4", "abort-controller": "^3.0.0", "agentkeepalive": "^4.2.1", "form-data-encoder": "1.7.2", "formdata-node": "^4.3.2", "node-fetch": "^2.6.7" } }, "sha512-eMyDIPRZbt1CCLErRCi3exlAvNkBtRe+kW5vvJyef93PmNr/clstYgHhtvmkxN82nlKgzyGPCyGxrm0JQ1ZIdg=="],
|
|
||||||
|
|
||||||
"@cerebras/cerebras_cloud_sdk/@types/node": ["@types/node@18.19.130", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg=="],
|
"@cerebras/cerebras_cloud_sdk/@types/node": ["@types/node@18.19.130", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg=="],
|
||||||
|
|
||||||
"@cerebras/cerebras_cloud_sdk/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
|
"@cerebras/cerebras_cloud_sdk/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
|
||||||
@@ -4218,6 +4215,10 @@
|
|||||||
|
|
||||||
"xml2js/xmlbuilder": ["xmlbuilder@11.0.1", "", {}, "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA=="],
|
"xml2js/xmlbuilder": ["xmlbuilder@11.0.1", "", {}, "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA=="],
|
||||||
|
|
||||||
|
"@anthropic-ai/sdk/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="],
|
||||||
|
|
||||||
|
"@anthropic-ai/sdk/node-fetch/whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
|
||||||
|
|
||||||
"@aws-crypto/sha1-browser/@smithy/util-utf8/@smithy/util-buffer-from": ["@smithy/util-buffer-from@2.2.0", "", { "dependencies": { "@smithy/is-array-buffer": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA=="],
|
"@aws-crypto/sha1-browser/@smithy/util-utf8/@smithy/util-buffer-from": ["@smithy/util-buffer-from@2.2.0", "", { "dependencies": { "@smithy/is-array-buffer": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA=="],
|
||||||
|
|
||||||
"@aws-crypto/sha256-browser/@smithy/util-utf8/@smithy/util-buffer-from": ["@smithy/util-buffer-from@2.2.0", "", { "dependencies": { "@smithy/is-array-buffer": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA=="],
|
"@aws-crypto/sha256-browser/@smithy/util-utf8/@smithy/util-buffer-from": ["@smithy/util-buffer-from@2.2.0", "", { "dependencies": { "@smithy/is-array-buffer": "^2.2.0", "tslib": "^2.6.2" } }, "sha512-IJdWBbTcMQ6DA0gdNhh/BwrLkDR+ADW5Kr1aZmd4k3DIF6ezMV4R2NIAmT08wQJ3yUK82thHWmC/TnK/wpMMIA=="],
|
||||||
@@ -4274,10 +4275,6 @@
|
|||||||
|
|
||||||
"@browserbasehq/sdk/node-fetch/whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
|
"@browserbasehq/sdk/node-fetch/whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
|
||||||
|
|
||||||
"@browserbasehq/stagehand/@anthropic-ai/sdk/@types/node": ["@types/node@18.19.130", "", { "dependencies": { "undici-types": "~5.26.4" } }, "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg=="],
|
|
||||||
|
|
||||||
"@browserbasehq/stagehand/@anthropic-ai/sdk/node-fetch": ["node-fetch@2.7.0", "", { "dependencies": { "whatwg-url": "^5.0.0" }, "peerDependencies": { "encoding": "^0.1.0" }, "optionalPeers": ["encoding"] }, "sha512-c4FRfUm/dbcWZ7U+1Wq0AwCyFL+3nt2bEw05wfxSz+DWpWsitgmSgYmy2dQdWyKC1694ELPqMs/YzUSNozLt8A=="],
|
|
||||||
|
|
||||||
"@cerebras/cerebras_cloud_sdk/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="],
|
"@cerebras/cerebras_cloud_sdk/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="],
|
||||||
|
|
||||||
"@cerebras/cerebras_cloud_sdk/node-fetch/whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
|
"@cerebras/cerebras_cloud_sdk/node-fetch/whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
|
||||||
@@ -4688,6 +4685,10 @@
|
|||||||
|
|
||||||
"vite/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.2", "", { "os": "win32", "cpu": "x64" }, "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ=="],
|
"vite/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.2", "", { "os": "win32", "cpu": "x64" }, "sha512-sRdU18mcKf7F+YgheI/zGf5alZatMUTKj/jNS6l744f9u3WFu4v7twcUI9vu4mknF4Y9aDlblIie0IM+5xxaqQ=="],
|
||||||
|
|
||||||
|
"@anthropic-ai/sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="],
|
||||||
|
|
||||||
|
"@anthropic-ai/sdk/node-fetch/whatwg-url/webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
|
||||||
|
|
||||||
"@aws-crypto/sha1-browser/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="],
|
"@aws-crypto/sha1-browser/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="],
|
||||||
|
|
||||||
"@aws-crypto/sha256-browser/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="],
|
"@aws-crypto/sha256-browser/@smithy/util-utf8/@smithy/util-buffer-from/@smithy/is-array-buffer": ["@smithy/is-array-buffer@2.2.0", "", { "dependencies": { "tslib": "^2.6.2" } }, "sha512-GGP3O9QFD24uGeAXYUjwSTXARoqpZykHadOmA8G5vfJPK0/DC67qa//0qvqrJzL1xc8WQWX7/yc7fwudjPHPhA=="],
|
||||||
@@ -4736,10 +4737,6 @@
|
|||||||
|
|
||||||
"@browserbasehq/sdk/node-fetch/whatwg-url/webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
|
"@browserbasehq/sdk/node-fetch/whatwg-url/webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
|
||||||
|
|
||||||
"@browserbasehq/stagehand/@anthropic-ai/sdk/@types/node/undici-types": ["undici-types@5.26.5", "", {}, "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA=="],
|
|
||||||
|
|
||||||
"@browserbasehq/stagehand/@anthropic-ai/sdk/node-fetch/whatwg-url": ["whatwg-url@5.0.0", "", { "dependencies": { "tr46": "~0.0.3", "webidl-conversions": "^3.0.0" } }, "sha512-saE57nupxk6v3HY35+jzBwYa0rKSy0XR8JSxZPwgLr7ys0IBzhGviA1/TUGJLmSVqs8pb9AnvICXEuOHLprYTw=="],
|
|
||||||
|
|
||||||
"@cerebras/cerebras_cloud_sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="],
|
"@cerebras/cerebras_cloud_sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="],
|
||||||
|
|
||||||
"@cerebras/cerebras_cloud_sdk/node-fetch/whatwg-url/webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
|
"@cerebras/cerebras_cloud_sdk/node-fetch/whatwg-url/webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
|
||||||
@@ -4832,10 +4829,6 @@
|
|||||||
|
|
||||||
"@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.947.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.947.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-user-agent": "3.947.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.947.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.7", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.14", "@smithy/middleware-retry": "^4.4.14", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.13", "@smithy/util-defaults-mode-node": "^4.2.16", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-DjRJEYNnHUTu9kGPPQDTSXquwSEd6myKR4ssI4FaYLFhdT3ldWpj73yYt807H3tdmhS7vPmdVqchSJnjurUQAw=="],
|
"@aws-sdk/client-sqs/@aws-sdk/credential-provider-node/@aws-sdk/credential-provider-sso/@aws-sdk/token-providers/@aws-sdk/nested-clients": ["@aws-sdk/nested-clients@3.947.0", "", { "dependencies": { "@aws-crypto/sha256-browser": "5.2.0", "@aws-crypto/sha256-js": "5.2.0", "@aws-sdk/core": "3.947.0", "@aws-sdk/middleware-host-header": "3.936.0", "@aws-sdk/middleware-logger": "3.936.0", "@aws-sdk/middleware-recursion-detection": "3.936.0", "@aws-sdk/middleware-user-agent": "3.947.0", "@aws-sdk/region-config-resolver": "3.936.0", "@aws-sdk/types": "3.936.0", "@aws-sdk/util-endpoints": "3.936.0", "@aws-sdk/util-user-agent-browser": "3.936.0", "@aws-sdk/util-user-agent-node": "3.947.0", "@smithy/config-resolver": "^4.4.3", "@smithy/core": "^3.18.7", "@smithy/fetch-http-handler": "^5.3.6", "@smithy/hash-node": "^4.2.5", "@smithy/invalid-dependency": "^4.2.5", "@smithy/middleware-content-length": "^4.2.5", "@smithy/middleware-endpoint": "^4.3.14", "@smithy/middleware-retry": "^4.4.14", "@smithy/middleware-serde": "^4.2.6", "@smithy/middleware-stack": "^4.2.5", "@smithy/node-config-provider": "^4.3.5", "@smithy/node-http-handler": "^4.4.5", "@smithy/protocol-http": "^5.3.5", "@smithy/smithy-client": "^4.9.10", "@smithy/types": "^4.9.0", "@smithy/url-parser": "^4.2.5", "@smithy/util-base64": "^4.3.0", "@smithy/util-body-length-browser": "^4.2.0", "@smithy/util-body-length-node": "^4.2.1", "@smithy/util-defaults-mode-browser": "^4.3.13", "@smithy/util-defaults-mode-node": "^4.2.16", "@smithy/util-endpoints": "^3.2.5", "@smithy/util-middleware": "^4.2.5", "@smithy/util-retry": "^4.2.5", "@smithy/util-utf8": "^4.2.0", "tslib": "^2.6.2" } }, "sha512-DjRJEYNnHUTu9kGPPQDTSXquwSEd6myKR4ssI4FaYLFhdT3ldWpj73yYt807H3tdmhS7vPmdVqchSJnjurUQAw=="],
|
||||||
|
|
||||||
"@browserbasehq/stagehand/@anthropic-ai/sdk/node-fetch/whatwg-url/tr46": ["tr46@0.0.3", "", {}, "sha512-N3WMsuqV66lT30CrXNbEjx4GEwlow3v6rr4mCcv6prnfwhS01rkgyFdjPNBYd9br7LpXV1+Emh01fHnq2Gdgrw=="],
|
|
||||||
|
|
||||||
"@browserbasehq/stagehand/@anthropic-ai/sdk/node-fetch/whatwg-url/webidl-conversions": ["webidl-conversions@3.0.1", "", {}, "sha512-2JAn3z8AR6rjK8Sm8orRC0h/bcl/DqL7tRPdGZ4I1CjdF+EaMLmYxBHyXuKL849eucPFhvBoxMsflfOb8kxaeQ=="],
|
|
||||||
|
|
||||||
"@trigger.dev/core/socket.io/engine.io/@types/node/undici-types": ["undici-types@7.10.0", "", {}, "sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag=="],
|
"@trigger.dev/core/socket.io/engine.io/@types/node/undici-types": ["undici-types@7.10.0", "", {}, "sha512-t5Fy/nfn+14LuOc2KNYg75vZqClpAiqscVvMygNnlsHBFpSXdJaYtXMcdNLpl/Qvc3P2cB3s6lOV51nqsFq4ag=="],
|
||||||
|
|
||||||
"lint-staged/listr2/cli-truncate/string-width/strip-ansi": ["strip-ansi@7.1.2", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA=="],
|
"lint-staged/listr2/cli-truncate/string-width/strip-ansi": ["strip-ansi@7.1.2", "", { "dependencies": { "ansi-regex": "^6.0.1" } }, "sha512-gmBGslpoQJtgnMAvOVqGZpEz9dyoKTCzy2nfz/n8aIFhN/jCE/rCmcxabB6jOOHV+0WNnylOxaxBQPSvcWklhA=="],
|
||||||
|
|||||||
5
packages/python-sdk/.gitignore
vendored
5
packages/python-sdk/.gitignore
vendored
@@ -81,7 +81,4 @@ Thumbs.db
|
|||||||
# mypy
|
# mypy
|
||||||
.mypy_cache/
|
.mypy_cache/
|
||||||
.dmypy.json
|
.dmypy.json
|
||||||
dmypy.json
|
dmypy.json
|
||||||
|
|
||||||
# uv
|
|
||||||
uv.lock
|
|
||||||
@@ -43,30 +43,24 @@ SimStudioClient(api_key: str, base_url: str = "https://sim.ai")
|
|||||||
|
|
||||||
#### Methods
|
#### Methods
|
||||||
|
|
||||||
##### execute_workflow(workflow_id, input=None, *, timeout=30.0, stream=None, selected_outputs=None, async_execution=None)
|
##### execute_workflow(workflow_id, input_data=None, timeout=30.0)
|
||||||
|
|
||||||
Execute a workflow with optional input data.
|
Execute a workflow with optional input data.
|
||||||
|
|
||||||
```python
|
```python
|
||||||
# With dict input (spread at root level of request body)
|
result = client.execute_workflow(
|
||||||
result = client.execute_workflow("workflow-id", {"message": "Hello, world!"})
|
"workflow-id",
|
||||||
|
input_data={"message": "Hello, world!"},
|
||||||
# With primitive input (wrapped as { input: value })
|
timeout=30.0 # 30 seconds
|
||||||
result = client.execute_workflow("workflow-id", "NVDA")
|
)
|
||||||
|
|
||||||
# With options (keyword-only arguments)
|
|
||||||
result = client.execute_workflow("workflow-id", {"message": "Hello"}, timeout=60.0)
|
|
||||||
```
|
```
|
||||||
|
|
||||||
**Parameters:**
|
**Parameters:**
|
||||||
- `workflow_id` (str): The ID of the workflow to execute
|
- `workflow_id` (str): The ID of the workflow to execute
|
||||||
- `input` (any, optional): Input data to pass to the workflow. Dicts are spread at the root level, primitives/lists are wrapped in `{ input: value }`. File objects are automatically converted to base64.
|
- `input_data` (dict, optional): Input data to pass to the workflow. File objects are automatically converted to base64.
|
||||||
- `timeout` (float, keyword-only): Timeout in seconds (default: 30.0)
|
- `timeout` (float): Timeout in seconds (default: 30.0)
|
||||||
- `stream` (bool, keyword-only): Enable streaming responses
|
|
||||||
- `selected_outputs` (list, keyword-only): Block outputs to stream (e.g., `["agent1.content"]`)
|
|
||||||
- `async_execution` (bool, keyword-only): Execute asynchronously and return execution ID
|
|
||||||
|
|
||||||
**Returns:** `WorkflowExecutionResult` or `AsyncExecutionResult`
|
**Returns:** `WorkflowExecutionResult`
|
||||||
|
|
||||||
##### get_workflow_status(workflow_id)
|
##### get_workflow_status(workflow_id)
|
||||||
|
|
||||||
@@ -98,89 +92,24 @@ if is_ready:
|
|||||||
|
|
||||||
**Returns:** `bool`
|
**Returns:** `bool`
|
||||||
|
|
||||||
##### execute_workflow_sync(workflow_id, input=None, *, timeout=30.0, stream=None, selected_outputs=None)
|
##### execute_workflow_sync(workflow_id, input_data=None, timeout=30.0)
|
||||||
|
|
||||||
Execute a workflow synchronously (ensures non-async mode).
|
Execute a workflow and poll for completion (useful for long-running workflows).
|
||||||
|
|
||||||
```python
|
```python
|
||||||
result = client.execute_workflow_sync("workflow-id", {"data": "some input"}, timeout=60.0)
|
result = client.execute_workflow_sync(
|
||||||
```
|
|
||||||
|
|
||||||
**Parameters:**
|
|
||||||
- `workflow_id` (str): The ID of the workflow to execute
|
|
||||||
- `input` (any, optional): Input data to pass to the workflow
|
|
||||||
- `timeout` (float, keyword-only): Timeout in seconds (default: 30.0)
|
|
||||||
- `stream` (bool, keyword-only): Enable streaming responses
|
|
||||||
- `selected_outputs` (list, keyword-only): Block outputs to stream (e.g., `["agent1.content"]`)
|
|
||||||
|
|
||||||
**Returns:** `WorkflowExecutionResult`
|
|
||||||
|
|
||||||
##### get_job_status(task_id)
|
|
||||||
|
|
||||||
Get the status of an async job.
|
|
||||||
|
|
||||||
```python
|
|
||||||
status = client.get_job_status("task-id-from-async-execution")
|
|
||||||
print("Job status:", status)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameters:**
|
|
||||||
- `task_id` (str): The task ID returned from async execution
|
|
||||||
|
|
||||||
**Returns:** `dict`
|
|
||||||
|
|
||||||
##### execute_with_retry(workflow_id, input=None, *, timeout=30.0, stream=None, selected_outputs=None, async_execution=None, max_retries=3, initial_delay=1.0, max_delay=30.0, backoff_multiplier=2.0)
|
|
||||||
|
|
||||||
Execute a workflow with automatic retry on rate limit errors.
|
|
||||||
|
|
||||||
```python
|
|
||||||
result = client.execute_with_retry(
|
|
||||||
"workflow-id",
|
"workflow-id",
|
||||||
{"message": "Hello"},
|
input_data={"data": "some input"},
|
||||||
timeout=30.0,
|
timeout=60.0
|
||||||
max_retries=3,
|
|
||||||
initial_delay=1.0,
|
|
||||||
max_delay=30.0,
|
|
||||||
backoff_multiplier=2.0
|
|
||||||
)
|
)
|
||||||
```
|
```
|
||||||
|
|
||||||
**Parameters:**
|
**Parameters:**
|
||||||
- `workflow_id` (str): The ID of the workflow to execute
|
- `workflow_id` (str): The ID of the workflow to execute
|
||||||
- `input` (any, optional): Input data to pass to the workflow
|
- `input_data` (dict, optional): Input data to pass to the workflow
|
||||||
- `timeout` (float, keyword-only): Timeout in seconds (default: 30.0)
|
- `timeout` (float): Timeout for the initial request in seconds
|
||||||
- `stream` (bool, keyword-only): Enable streaming responses
|
|
||||||
- `selected_outputs` (list, keyword-only): Block outputs to stream
|
|
||||||
- `async_execution` (bool, keyword-only): Execute asynchronously
|
|
||||||
- `max_retries` (int, keyword-only): Maximum retry attempts (default: 3)
|
|
||||||
- `initial_delay` (float, keyword-only): Initial delay in seconds (default: 1.0)
|
|
||||||
- `max_delay` (float, keyword-only): Maximum delay in seconds (default: 30.0)
|
|
||||||
- `backoff_multiplier` (float, keyword-only): Backoff multiplier (default: 2.0)
|
|
||||||
|
|
||||||
**Returns:** `WorkflowExecutionResult` or `AsyncExecutionResult`
|
**Returns:** `WorkflowExecutionResult`
|
||||||
|
|
||||||
##### get_rate_limit_info()
|
|
||||||
|
|
||||||
Get current rate limit information from the last API response.
|
|
||||||
|
|
||||||
```python
|
|
||||||
rate_info = client.get_rate_limit_info()
|
|
||||||
if rate_info:
|
|
||||||
print("Remaining requests:", rate_info.remaining)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Returns:** `RateLimitInfo` or `None`
|
|
||||||
|
|
||||||
##### get_usage_limits()
|
|
||||||
|
|
||||||
Get current usage limits and quota information.
|
|
||||||
|
|
||||||
```python
|
|
||||||
limits = client.get_usage_limits()
|
|
||||||
print("Current usage:", limits.usage)
|
|
||||||
```
|
|
||||||
|
|
||||||
**Returns:** `UsageLimits`
|
|
||||||
|
|
||||||
##### set_api_key(api_key)
|
##### set_api_key(api_key)
|
||||||
|
|
||||||
@@ -242,39 +171,6 @@ class SimStudioError(Exception):
|
|||||||
self.status = status
|
self.status = status
|
||||||
```
|
```
|
||||||
|
|
||||||
### AsyncExecutionResult
|
|
||||||
|
|
||||||
```python
|
|
||||||
@dataclass
|
|
||||||
class AsyncExecutionResult:
|
|
||||||
success: bool
|
|
||||||
task_id: str
|
|
||||||
status: str # 'queued'
|
|
||||||
created_at: str
|
|
||||||
links: Dict[str, str]
|
|
||||||
```
|
|
||||||
|
|
||||||
### RateLimitInfo
|
|
||||||
|
|
||||||
```python
|
|
||||||
@dataclass
|
|
||||||
class RateLimitInfo:
|
|
||||||
limit: int
|
|
||||||
remaining: int
|
|
||||||
reset: int
|
|
||||||
retry_after: Optional[int] = None
|
|
||||||
```
|
|
||||||
|
|
||||||
### UsageLimits
|
|
||||||
|
|
||||||
```python
|
|
||||||
@dataclass
|
|
||||||
class UsageLimits:
|
|
||||||
success: bool
|
|
||||||
rate_limit: Dict[str, Any]
|
|
||||||
usage: Dict[str, Any]
|
|
||||||
```
|
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### Basic Workflow Execution
|
### Basic Workflow Execution
|
||||||
@@ -295,7 +191,7 @@ def run_workflow():
|
|||||||
# Execute the workflow
|
# Execute the workflow
|
||||||
result = client.execute_workflow(
|
result = client.execute_workflow(
|
||||||
"my-workflow-id",
|
"my-workflow-id",
|
||||||
{
|
input_data={
|
||||||
"message": "Process this data",
|
"message": "Process this data",
|
||||||
"user_id": "12345"
|
"user_id": "12345"
|
||||||
}
|
}
|
||||||
@@ -402,7 +298,7 @@ client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
|||||||
with open('document.pdf', 'rb') as f:
|
with open('document.pdf', 'rb') as f:
|
||||||
result = client.execute_workflow(
|
result = client.execute_workflow(
|
||||||
'workflow-id',
|
'workflow-id',
|
||||||
{
|
input_data={
|
||||||
'documents': [f], # Must match your workflow's "files" field name
|
'documents': [f], # Must match your workflow's "files" field name
|
||||||
'instructions': 'Analyze this document'
|
'instructions': 'Analyze this document'
|
||||||
}
|
}
|
||||||
@@ -412,7 +308,7 @@ with open('document.pdf', 'rb') as f:
|
|||||||
with open('doc1.pdf', 'rb') as f1, open('doc2.pdf', 'rb') as f2:
|
with open('doc1.pdf', 'rb') as f1, open('doc2.pdf', 'rb') as f2:
|
||||||
result = client.execute_workflow(
|
result = client.execute_workflow(
|
||||||
'workflow-id',
|
'workflow-id',
|
||||||
{
|
input_data={
|
||||||
'attachments': [f1, f2], # Must match your workflow's "files" field name
|
'attachments': [f1, f2], # Must match your workflow's "files" field name
|
||||||
'query': 'Compare these documents'
|
'query': 'Compare these documents'
|
||||||
}
|
}
|
||||||
@@ -431,14 +327,14 @@ def execute_workflows_batch(workflow_data_pairs):
|
|||||||
"""Execute multiple workflows with different input data."""
|
"""Execute multiple workflows with different input data."""
|
||||||
results = []
|
results = []
|
||||||
|
|
||||||
for workflow_id, workflow_input in workflow_data_pairs:
|
for workflow_id, input_data in workflow_data_pairs:
|
||||||
try:
|
try:
|
||||||
# Validate workflow before execution
|
# Validate workflow before execution
|
||||||
if not client.validate_workflow(workflow_id):
|
if not client.validate_workflow(workflow_id):
|
||||||
print(f"Skipping {workflow_id}: not deployed")
|
print(f"Skipping {workflow_id}: not deployed")
|
||||||
continue
|
continue
|
||||||
|
|
||||||
result = client.execute_workflow(workflow_id, workflow_input)
|
result = client.execute_workflow(workflow_id, input_data)
|
||||||
results.append({
|
results.append({
|
||||||
"workflow_id": workflow_id,
|
"workflow_id": workflow_id,
|
||||||
"success": result.success,
|
"success": result.success,
|
||||||
|
|||||||
@@ -4,7 +4,7 @@ build-backend = "setuptools.build_meta"
|
|||||||
|
|
||||||
[project]
|
[project]
|
||||||
name = "simstudio-sdk"
|
name = "simstudio-sdk"
|
||||||
version = "0.1.2"
|
version = "0.1.1"
|
||||||
authors = [
|
authors = [
|
||||||
{name = "Sim", email = "help@sim.ai"},
|
{name = "Sim", email = "help@sim.ai"},
|
||||||
]
|
]
|
||||||
|
|||||||
@@ -13,7 +13,7 @@ import os
|
|||||||
import requests
|
import requests
|
||||||
|
|
||||||
|
|
||||||
__version__ = "0.1.2"
|
__version__ = "0.1.0"
|
||||||
__all__ = [
|
__all__ = [
|
||||||
"SimStudioClient",
|
"SimStudioClient",
|
||||||
"SimStudioError",
|
"SimStudioError",
|
||||||
@@ -64,6 +64,15 @@ class RateLimitInfo:
|
|||||||
retry_after: Optional[int] = None
|
retry_after: Optional[int] = None
|
||||||
|
|
||||||
|
|
||||||
|
@dataclass
|
||||||
|
class RateLimitStatus:
|
||||||
|
"""Rate limit status for sync/async requests."""
|
||||||
|
is_limited: bool
|
||||||
|
limit: int
|
||||||
|
remaining: int
|
||||||
|
reset_at: str
|
||||||
|
|
||||||
|
|
||||||
@dataclass
|
@dataclass
|
||||||
class UsageLimits:
|
class UsageLimits:
|
||||||
"""Usage limits and quota information."""
|
"""Usage limits and quota information."""
|
||||||
@@ -106,6 +115,7 @@ class SimStudioClient:
|
|||||||
Recursively processes nested dicts and lists.
|
Recursively processes nested dicts and lists.
|
||||||
"""
|
"""
|
||||||
import base64
|
import base64
|
||||||
|
import io
|
||||||
|
|
||||||
# Check if this is a file-like object
|
# Check if this is a file-like object
|
||||||
if hasattr(value, 'read') and callable(value.read):
|
if hasattr(value, 'read') and callable(value.read):
|
||||||
@@ -149,8 +159,7 @@ class SimStudioClient:
|
|||||||
def execute_workflow(
|
def execute_workflow(
|
||||||
self,
|
self,
|
||||||
workflow_id: str,
|
workflow_id: str,
|
||||||
input: Optional[Any] = None,
|
input_data: Optional[Dict[str, Any]] = None,
|
||||||
*,
|
|
||||||
timeout: float = 30.0,
|
timeout: float = 30.0,
|
||||||
stream: Optional[bool] = None,
|
stream: Optional[bool] = None,
|
||||||
selected_outputs: Optional[list] = None,
|
selected_outputs: Optional[list] = None,
|
||||||
@@ -160,13 +169,11 @@ class SimStudioClient:
|
|||||||
Execute a workflow with optional input data.
|
Execute a workflow with optional input data.
|
||||||
If async_execution is True, returns immediately with a task ID.
|
If async_execution is True, returns immediately with a task ID.
|
||||||
|
|
||||||
File objects in input will be automatically detected and converted to base64.
|
File objects in input_data will be automatically detected and converted to base64.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
workflow_id: The ID of the workflow to execute
|
workflow_id: The ID of the workflow to execute
|
||||||
input: Input data to pass to the workflow. Can be a dict (spread at root level),
|
input_data: Input data to pass to the workflow (can include file-like objects)
|
||||||
primitive value (string, number, bool), or list (wrapped in 'input' field).
|
|
||||||
File-like objects within dicts are automatically converted to base64.
|
|
||||||
timeout: Timeout in seconds (default: 30.0)
|
timeout: Timeout in seconds (default: 30.0)
|
||||||
stream: Enable streaming responses (default: None)
|
stream: Enable streaming responses (default: None)
|
||||||
selected_outputs: Block outputs to stream (e.g., ["agent1.content"])
|
selected_outputs: Block outputs to stream (e.g., ["agent1.content"])
|
||||||
@@ -186,15 +193,8 @@ class SimStudioClient:
|
|||||||
headers['X-Execution-Mode'] = 'async'
|
headers['X-Execution-Mode'] = 'async'
|
||||||
|
|
||||||
try:
|
try:
|
||||||
# Build JSON body - spread dict inputs at root level, wrap primitives/lists in 'input' field
|
# Build JSON body - spread input at root level, then add API control parameters
|
||||||
body = {}
|
body = input_data.copy() if input_data is not None else {}
|
||||||
if input is not None:
|
|
||||||
if isinstance(input, dict):
|
|
||||||
# Dict input: spread at root level (matches curl/API behavior)
|
|
||||||
body = input.copy()
|
|
||||||
else:
|
|
||||||
# Primitive or list input: wrap in 'input' field
|
|
||||||
body = {'input': input}
|
|
||||||
|
|
||||||
# Convert any file objects in the input to base64 format
|
# Convert any file objects in the input to base64 format
|
||||||
body = self._convert_files_to_base64(body)
|
body = self._convert_files_to_base64(body)
|
||||||
@@ -320,18 +320,20 @@ class SimStudioClient:
|
|||||||
def execute_workflow_sync(
|
def execute_workflow_sync(
|
||||||
self,
|
self,
|
||||||
workflow_id: str,
|
workflow_id: str,
|
||||||
input: Optional[Any] = None,
|
input_data: Optional[Dict[str, Any]] = None,
|
||||||
*,
|
|
||||||
timeout: float = 30.0,
|
timeout: float = 30.0,
|
||||||
stream: Optional[bool] = None,
|
stream: Optional[bool] = None,
|
||||||
selected_outputs: Optional[list] = None
|
selected_outputs: Optional[list] = None
|
||||||
) -> WorkflowExecutionResult:
|
) -> WorkflowExecutionResult:
|
||||||
"""
|
"""
|
||||||
Execute a workflow synchronously (ensures non-async mode).
|
Execute a workflow and poll for completion (useful for long-running workflows).
|
||||||
|
|
||||||
|
Note: Currently, the API is synchronous, so this method just calls execute_workflow.
|
||||||
|
In the future, if async execution is added, this method can be enhanced.
|
||||||
|
|
||||||
Args:
|
Args:
|
||||||
workflow_id: The ID of the workflow to execute
|
workflow_id: The ID of the workflow to execute
|
||||||
input: Input data to pass to the workflow (can include file-like objects)
|
input_data: Input data to pass to the workflow (can include file-like objects)
|
||||||
timeout: Timeout for the initial request in seconds
|
timeout: Timeout for the initial request in seconds
|
||||||
stream: Enable streaming responses (default: None)
|
stream: Enable streaming responses (default: None)
|
||||||
selected_outputs: Block outputs to stream (e.g., ["agent1.content"])
|
selected_outputs: Block outputs to stream (e.g., ["agent1.content"])
|
||||||
@@ -342,14 +344,9 @@ class SimStudioClient:
|
|||||||
Raises:
|
Raises:
|
||||||
SimStudioError: If the workflow execution fails
|
SimStudioError: If the workflow execution fails
|
||||||
"""
|
"""
|
||||||
return self.execute_workflow(
|
# For now, the API is synchronous, so we just execute directly
|
||||||
workflow_id,
|
# In the future, if async execution is added, this method can be enhanced
|
||||||
input,
|
return self.execute_workflow(workflow_id, input_data, timeout, stream, selected_outputs)
|
||||||
timeout=timeout,
|
|
||||||
stream=stream,
|
|
||||||
selected_outputs=selected_outputs,
|
|
||||||
async_execution=False
|
|
||||||
)
|
|
||||||
|
|
||||||
def set_api_key(self, api_key: str) -> None:
|
def set_api_key(self, api_key: str) -> None:
|
||||||
"""
|
"""
|
||||||
@@ -413,8 +410,7 @@ class SimStudioClient:
|
|||||||
def execute_with_retry(
|
def execute_with_retry(
|
||||||
self,
|
self,
|
||||||
workflow_id: str,
|
workflow_id: str,
|
||||||
input: Optional[Any] = None,
|
input_data: Optional[Dict[str, Any]] = None,
|
||||||
*,
|
|
||||||
timeout: float = 30.0,
|
timeout: float = 30.0,
|
||||||
stream: Optional[bool] = None,
|
stream: Optional[bool] = None,
|
||||||
selected_outputs: Optional[list] = None,
|
selected_outputs: Optional[list] = None,
|
||||||
@@ -429,7 +425,7 @@ class SimStudioClient:
|
|||||||
|
|
||||||
Args:
|
Args:
|
||||||
workflow_id: The ID of the workflow to execute
|
workflow_id: The ID of the workflow to execute
|
||||||
input: Input data to pass to the workflow (can include file-like objects)
|
input_data: Input data to pass to the workflow (can include file-like objects)
|
||||||
timeout: Timeout in seconds
|
timeout: Timeout in seconds
|
||||||
stream: Enable streaming responses
|
stream: Enable streaming responses
|
||||||
selected_outputs: Block outputs to stream
|
selected_outputs: Block outputs to stream
|
||||||
@@ -452,11 +448,11 @@ class SimStudioClient:
|
|||||||
try:
|
try:
|
||||||
return self.execute_workflow(
|
return self.execute_workflow(
|
||||||
workflow_id,
|
workflow_id,
|
||||||
input,
|
input_data,
|
||||||
timeout=timeout,
|
timeout,
|
||||||
stream=stream,
|
stream,
|
||||||
selected_outputs=selected_outputs,
|
selected_outputs,
|
||||||
async_execution=async_execution
|
async_execution
|
||||||
)
|
)
|
||||||
except SimStudioError as e:
|
except SimStudioError as e:
|
||||||
if e.code != 'RATE_LIMIT_EXCEEDED':
|
if e.code != 'RATE_LIMIT_EXCEEDED':
|
||||||
|
|||||||
@@ -91,9 +91,11 @@ def test_context_manager(mock_close):
|
|||||||
"""Test SimStudioClient as context manager."""
|
"""Test SimStudioClient as context manager."""
|
||||||
with SimStudioClient(api_key="test-api-key") as client:
|
with SimStudioClient(api_key="test-api-key") as client:
|
||||||
assert client.api_key == "test-api-key"
|
assert client.api_key == "test-api-key"
|
||||||
|
# Should close without error
|
||||||
mock_close.assert_called_once()
|
mock_close.assert_called_once()
|
||||||
|
|
||||||
|
|
||||||
|
# Tests for async execution
|
||||||
@patch('simstudio.requests.Session.post')
|
@patch('simstudio.requests.Session.post')
|
||||||
def test_async_execution_returns_task_id(mock_post):
|
def test_async_execution_returns_task_id(mock_post):
|
||||||
"""Test async execution returns AsyncExecutionResult."""
|
"""Test async execution returns AsyncExecutionResult."""
|
||||||
@@ -113,7 +115,7 @@ def test_async_execution_returns_task_id(mock_post):
|
|||||||
client = SimStudioClient(api_key="test-api-key")
|
client = SimStudioClient(api_key="test-api-key")
|
||||||
result = client.execute_workflow(
|
result = client.execute_workflow(
|
||||||
"workflow-id",
|
"workflow-id",
|
||||||
{"message": "Hello"},
|
input_data={"message": "Hello"},
|
||||||
async_execution=True
|
async_execution=True
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -122,6 +124,7 @@ def test_async_execution_returns_task_id(mock_post):
|
|||||||
assert result.status == "queued"
|
assert result.status == "queued"
|
||||||
assert result.links["status"] == "/api/jobs/task-123"
|
assert result.links["status"] == "/api/jobs/task-123"
|
||||||
|
|
||||||
|
# Verify X-Execution-Mode header was set
|
||||||
call_args = mock_post.call_args
|
call_args = mock_post.call_args
|
||||||
assert call_args[1]["headers"]["X-Execution-Mode"] == "async"
|
assert call_args[1]["headers"]["X-Execution-Mode"] == "async"
|
||||||
|
|
||||||
@@ -143,7 +146,7 @@ def test_sync_execution_returns_result(mock_post):
|
|||||||
client = SimStudioClient(api_key="test-api-key")
|
client = SimStudioClient(api_key="test-api-key")
|
||||||
result = client.execute_workflow(
|
result = client.execute_workflow(
|
||||||
"workflow-id",
|
"workflow-id",
|
||||||
{"message": "Hello"},
|
input_data={"message": "Hello"},
|
||||||
async_execution=False
|
async_execution=False
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -163,12 +166,13 @@ def test_async_header_not_set_when_false(mock_post):
|
|||||||
mock_post.return_value = mock_response
|
mock_post.return_value = mock_response
|
||||||
|
|
||||||
client = SimStudioClient(api_key="test-api-key")
|
client = SimStudioClient(api_key="test-api-key")
|
||||||
client.execute_workflow("workflow-id", {"message": "Hello"})
|
client.execute_workflow("workflow-id", input_data={"message": "Hello"})
|
||||||
|
|
||||||
call_args = mock_post.call_args
|
call_args = mock_post.call_args
|
||||||
assert "X-Execution-Mode" not in call_args[1]["headers"]
|
assert "X-Execution-Mode" not in call_args[1]["headers"]
|
||||||
|
|
||||||
|
|
||||||
|
# Tests for job status
|
||||||
@patch('simstudio.requests.Session.get')
|
@patch('simstudio.requests.Session.get')
|
||||||
def test_get_job_status_success(mock_get):
|
def test_get_job_status_success(mock_get):
|
||||||
"""Test getting job status."""
|
"""Test getting job status."""
|
||||||
@@ -218,6 +222,7 @@ def test_get_job_status_not_found(mock_get):
|
|||||||
assert "Job not found" in str(exc_info.value)
|
assert "Job not found" in str(exc_info.value)
|
||||||
|
|
||||||
|
|
||||||
|
# Tests for retry with rate limiting
|
||||||
@patch('simstudio.requests.Session.post')
|
@patch('simstudio.requests.Session.post')
|
||||||
@patch('simstudio.time.sleep')
|
@patch('simstudio.time.sleep')
|
||||||
def test_execute_with_retry_success_first_attempt(mock_sleep, mock_post):
|
def test_execute_with_retry_success_first_attempt(mock_sleep, mock_post):
|
||||||
@@ -233,7 +238,7 @@ def test_execute_with_retry_success_first_attempt(mock_sleep, mock_post):
|
|||||||
mock_post.return_value = mock_response
|
mock_post.return_value = mock_response
|
||||||
|
|
||||||
client = SimStudioClient(api_key="test-api-key")
|
client = SimStudioClient(api_key="test-api-key")
|
||||||
result = client.execute_with_retry("workflow-id", {"message": "test"})
|
result = client.execute_with_retry("workflow-id", input_data={"message": "test"})
|
||||||
|
|
||||||
assert result.success is True
|
assert result.success is True
|
||||||
assert mock_post.call_count == 1
|
assert mock_post.call_count == 1
|
||||||
@@ -273,7 +278,7 @@ def test_execute_with_retry_retries_on_rate_limit(mock_sleep, mock_post):
|
|||||||
client = SimStudioClient(api_key="test-api-key")
|
client = SimStudioClient(api_key="test-api-key")
|
||||||
result = client.execute_with_retry(
|
result = client.execute_with_retry(
|
||||||
"workflow-id",
|
"workflow-id",
|
||||||
{"message": "test"},
|
input_data={"message": "test"},
|
||||||
max_retries=3,
|
max_retries=3,
|
||||||
initial_delay=0.01
|
initial_delay=0.01
|
||||||
)
|
)
|
||||||
@@ -302,7 +307,7 @@ def test_execute_with_retry_max_retries_exceeded(mock_sleep, mock_post):
|
|||||||
with pytest.raises(SimStudioError) as exc_info:
|
with pytest.raises(SimStudioError) as exc_info:
|
||||||
client.execute_with_retry(
|
client.execute_with_retry(
|
||||||
"workflow-id",
|
"workflow-id",
|
||||||
{"message": "test"},
|
input_data={"message": "test"},
|
||||||
max_retries=2,
|
max_retries=2,
|
||||||
initial_delay=0.01
|
initial_delay=0.01
|
||||||
)
|
)
|
||||||
@@ -328,12 +333,13 @@ def test_execute_with_retry_no_retry_on_other_errors(mock_post):
|
|||||||
client = SimStudioClient(api_key="test-api-key")
|
client = SimStudioClient(api_key="test-api-key")
|
||||||
|
|
||||||
with pytest.raises(SimStudioError) as exc_info:
|
with pytest.raises(SimStudioError) as exc_info:
|
||||||
client.execute_with_retry("workflow-id", {"message": "test"})
|
client.execute_with_retry("workflow-id", input_data={"message": "test"})
|
||||||
|
|
||||||
assert "Server error" in str(exc_info.value)
|
assert "Server error" in str(exc_info.value)
|
||||||
assert mock_post.call_count == 1 # No retries
|
assert mock_post.call_count == 1 # No retries
|
||||||
|
|
||||||
|
|
||||||
|
# Tests for rate limit info
|
||||||
def test_get_rate_limit_info_returns_none_initially():
|
def test_get_rate_limit_info_returns_none_initially():
|
||||||
"""Test rate limit info is None before any API calls."""
|
"""Test rate limit info is None before any API calls."""
|
||||||
client = SimStudioClient(api_key="test-api-key")
|
client = SimStudioClient(api_key="test-api-key")
|
||||||
@@ -356,7 +362,7 @@ def test_get_rate_limit_info_after_api_call(mock_post):
|
|||||||
mock_post.return_value = mock_response
|
mock_post.return_value = mock_response
|
||||||
|
|
||||||
client = SimStudioClient(api_key="test-api-key")
|
client = SimStudioClient(api_key="test-api-key")
|
||||||
client.execute_workflow("workflow-id", {})
|
client.execute_workflow("workflow-id", input_data={})
|
||||||
|
|
||||||
info = client.get_rate_limit_info()
|
info = client.get_rate_limit_info()
|
||||||
assert info is not None
|
assert info is not None
|
||||||
@@ -365,6 +371,7 @@ def test_get_rate_limit_info_after_api_call(mock_post):
|
|||||||
assert info.reset == 1704067200
|
assert info.reset == 1704067200
|
||||||
|
|
||||||
|
|
||||||
|
# Tests for usage limits
|
||||||
@patch('simstudio.requests.Session.get')
|
@patch('simstudio.requests.Session.get')
|
||||||
def test_get_usage_limits_success(mock_get):
|
def test_get_usage_limits_success(mock_get):
|
||||||
"""Test getting usage limits."""
|
"""Test getting usage limits."""
|
||||||
@@ -428,6 +435,7 @@ def test_get_usage_limits_unauthorized(mock_get):
|
|||||||
assert "Invalid API key" in str(exc_info.value)
|
assert "Invalid API key" in str(exc_info.value)
|
||||||
|
|
||||||
|
|
||||||
|
# Tests for streaming with selectedOutputs
|
||||||
@patch('simstudio.requests.Session.post')
|
@patch('simstudio.requests.Session.post')
|
||||||
def test_execute_workflow_with_stream_and_selected_outputs(mock_post):
|
def test_execute_workflow_with_stream_and_selected_outputs(mock_post):
|
||||||
"""Test execution with stream and selectedOutputs parameters."""
|
"""Test execution with stream and selectedOutputs parameters."""
|
||||||
@@ -441,7 +449,7 @@ def test_execute_workflow_with_stream_and_selected_outputs(mock_post):
|
|||||||
client = SimStudioClient(api_key="test-api-key")
|
client = SimStudioClient(api_key="test-api-key")
|
||||||
client.execute_workflow(
|
client.execute_workflow(
|
||||||
"workflow-id",
|
"workflow-id",
|
||||||
{"message": "test"},
|
input_data={"message": "test"},
|
||||||
stream=True,
|
stream=True,
|
||||||
selected_outputs=["agent1.content", "agent2.content"]
|
selected_outputs=["agent1.content", "agent2.content"]
|
||||||
)
|
)
|
||||||
@@ -451,85 +459,4 @@ def test_execute_workflow_with_stream_and_selected_outputs(mock_post):
|
|||||||
|
|
||||||
assert request_body["message"] == "test"
|
assert request_body["message"] == "test"
|
||||||
assert request_body["stream"] is True
|
assert request_body["stream"] is True
|
||||||
assert request_body["selectedOutputs"] == ["agent1.content", "agent2.content"]
|
assert request_body["selectedOutputs"] == ["agent1.content", "agent2.content"]
|
||||||
|
|
||||||
|
|
||||||
# Tests for primitive and list inputs
|
|
||||||
@patch('simstudio.requests.Session.post')
|
|
||||||
def test_execute_workflow_with_string_input(mock_post):
|
|
||||||
"""Test execution with primitive string input wraps in input field."""
|
|
||||||
mock_response = Mock()
|
|
||||||
mock_response.ok = True
|
|
||||||
mock_response.status_code = 200
|
|
||||||
mock_response.json.return_value = {"success": True, "output": {}}
|
|
||||||
mock_response.headers.get.return_value = None
|
|
||||||
mock_post.return_value = mock_response
|
|
||||||
|
|
||||||
client = SimStudioClient(api_key="test-api-key")
|
|
||||||
client.execute_workflow("workflow-id", "NVDA")
|
|
||||||
|
|
||||||
call_args = mock_post.call_args
|
|
||||||
request_body = call_args[1]["json"]
|
|
||||||
|
|
||||||
assert request_body["input"] == "NVDA"
|
|
||||||
assert "0" not in request_body # Should not spread string characters
|
|
||||||
|
|
||||||
|
|
||||||
@patch('simstudio.requests.Session.post')
|
|
||||||
def test_execute_workflow_with_number_input(mock_post):
|
|
||||||
"""Test execution with primitive number input wraps in input field."""
|
|
||||||
mock_response = Mock()
|
|
||||||
mock_response.ok = True
|
|
||||||
mock_response.status_code = 200
|
|
||||||
mock_response.json.return_value = {"success": True, "output": {}}
|
|
||||||
mock_response.headers.get.return_value = None
|
|
||||||
mock_post.return_value = mock_response
|
|
||||||
|
|
||||||
client = SimStudioClient(api_key="test-api-key")
|
|
||||||
client.execute_workflow("workflow-id", 42)
|
|
||||||
|
|
||||||
call_args = mock_post.call_args
|
|
||||||
request_body = call_args[1]["json"]
|
|
||||||
|
|
||||||
assert request_body["input"] == 42
|
|
||||||
|
|
||||||
|
|
||||||
@patch('simstudio.requests.Session.post')
|
|
||||||
def test_execute_workflow_with_list_input(mock_post):
|
|
||||||
"""Test execution with list input wraps in input field."""
|
|
||||||
mock_response = Mock()
|
|
||||||
mock_response.ok = True
|
|
||||||
mock_response.status_code = 200
|
|
||||||
mock_response.json.return_value = {"success": True, "output": {}}
|
|
||||||
mock_response.headers.get.return_value = None
|
|
||||||
mock_post.return_value = mock_response
|
|
||||||
|
|
||||||
client = SimStudioClient(api_key="test-api-key")
|
|
||||||
client.execute_workflow("workflow-id", ["NVDA", "AAPL", "GOOG"])
|
|
||||||
|
|
||||||
call_args = mock_post.call_args
|
|
||||||
request_body = call_args[1]["json"]
|
|
||||||
|
|
||||||
assert request_body["input"] == ["NVDA", "AAPL", "GOOG"]
|
|
||||||
assert "0" not in request_body # Should not spread list
|
|
||||||
|
|
||||||
|
|
||||||
@patch('simstudio.requests.Session.post')
|
|
||||||
def test_execute_workflow_with_dict_input_spreads_at_root(mock_post):
|
|
||||||
"""Test execution with dict input spreads at root level."""
|
|
||||||
mock_response = Mock()
|
|
||||||
mock_response.ok = True
|
|
||||||
mock_response.status_code = 200
|
|
||||||
mock_response.json.return_value = {"success": True, "output": {}}
|
|
||||||
mock_response.headers.get.return_value = None
|
|
||||||
mock_post.return_value = mock_response
|
|
||||||
|
|
||||||
client = SimStudioClient(api_key="test-api-key")
|
|
||||||
client.execute_workflow("workflow-id", {"ticker": "NVDA", "quantity": 100})
|
|
||||||
|
|
||||||
call_args = mock_post.call_args
|
|
||||||
request_body = call_args[1]["json"]
|
|
||||||
|
|
||||||
assert request_body["ticker"] == "NVDA"
|
|
||||||
assert request_body["quantity"] == 100
|
|
||||||
assert "input" not in request_body # Should not wrap in input field
|
|
||||||
@@ -71,19 +71,6 @@ vi.mock('@/executor/path')
|
|||||||
vi.mock('@/executor/resolver', () => ({
|
vi.mock('@/executor/resolver', () => ({
|
||||||
InputResolver: vi.fn(),
|
InputResolver: vi.fn(),
|
||||||
}))
|
}))
|
||||||
vi.mock('@/executor/utils/http', () => ({
|
|
||||||
buildAuthHeaders: vi.fn().mockResolvedValue({ 'Content-Type': 'application/json' }),
|
|
||||||
buildAPIUrl: vi.fn((path: string) => new URL(path, 'http://localhost:3000')),
|
|
||||||
extractAPIErrorMessage: vi.fn(async (response: Response) => {
|
|
||||||
const defaultMessage = `API request failed with status ${response.status}`
|
|
||||||
try {
|
|
||||||
const errorData = await response.json()
|
|
||||||
return errorData.error || defaultMessage
|
|
||||||
} catch {
|
|
||||||
return defaultMessage
|
|
||||||
}
|
|
||||||
}),
|
|
||||||
}))
|
|
||||||
|
|
||||||
// Specific block utilities
|
// Specific block utilities
|
||||||
vi.mock('@/blocks/blocks/router')
|
vi.mock('@/blocks/blocks/router')
|
||||||
|
|||||||
@@ -47,35 +47,24 @@ new SimStudioClient(config: SimStudioConfig)
|
|||||||
|
|
||||||
#### Methods
|
#### Methods
|
||||||
|
|
||||||
##### executeWorkflow(workflowId, input?, options?)
|
##### executeWorkflow(workflowId, options?)
|
||||||
|
|
||||||
Execute a workflow with optional input data.
|
Execute a workflow with optional input data.
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
// With object input (spread at root level of request body)
|
|
||||||
const result = await client.executeWorkflow('workflow-id', {
|
const result = await client.executeWorkflow('workflow-id', {
|
||||||
message: 'Hello, world!'
|
input: { message: 'Hello, world!' },
|
||||||
});
|
timeout: 30000 // 30 seconds
|
||||||
|
|
||||||
// With primitive input (wrapped as { input: value })
|
|
||||||
const result = await client.executeWorkflow('workflow-id', 'NVDA');
|
|
||||||
|
|
||||||
// With options
|
|
||||||
const result = await client.executeWorkflow('workflow-id', { message: 'Hello' }, {
|
|
||||||
timeout: 60000
|
|
||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
**Parameters:**
|
**Parameters:**
|
||||||
- `workflowId` (string): The ID of the workflow to execute
|
- `workflowId` (string): The ID of the workflow to execute
|
||||||
- `input` (any, optional): Input data to pass to the workflow. Objects are spread at the root level, primitives/arrays are wrapped in `{ input: value }`. File objects are automatically converted to base64.
|
|
||||||
- `options` (ExecutionOptions, optional):
|
- `options` (ExecutionOptions, optional):
|
||||||
|
- `input` (any): Input data to pass to the workflow. File objects are automatically converted to base64.
|
||||||
- `timeout` (number): Timeout in milliseconds (default: 30000)
|
- `timeout` (number): Timeout in milliseconds (default: 30000)
|
||||||
- `stream` (boolean): Enable streaming responses
|
|
||||||
- `selectedOutputs` (string[]): Block outputs to stream (e.g., `["agent1.content"]`)
|
|
||||||
- `async` (boolean): Execute asynchronously and return execution ID
|
|
||||||
|
|
||||||
**Returns:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
**Returns:** `Promise<WorkflowExecutionResult>`
|
||||||
|
|
||||||
##### getWorkflowStatus(workflowId)
|
##### getWorkflowStatus(workflowId)
|
||||||
|
|
||||||
@@ -107,89 +96,25 @@ if (isReady) {
|
|||||||
|
|
||||||
**Returns:** `Promise<boolean>`
|
**Returns:** `Promise<boolean>`
|
||||||
|
|
||||||
##### executeWorkflowSync(workflowId, input?, options?)
|
##### executeWorkflowSync(workflowId, options?)
|
||||||
|
|
||||||
Execute a workflow and poll for completion (useful for long-running workflows).
|
Execute a workflow and poll for completion (useful for long-running workflows).
|
||||||
|
|
||||||
```typescript
|
```typescript
|
||||||
const result = await client.executeWorkflowSync('workflow-id', { data: 'some input' }, {
|
const result = await client.executeWorkflowSync('workflow-id', {
|
||||||
|
input: { data: 'some input' },
|
||||||
timeout: 60000
|
timeout: 60000
|
||||||
});
|
});
|
||||||
```
|
```
|
||||||
|
|
||||||
**Parameters:**
|
**Parameters:**
|
||||||
- `workflowId` (string): The ID of the workflow to execute
|
- `workflowId` (string): The ID of the workflow to execute
|
||||||
- `input` (any, optional): Input data to pass to the workflow
|
|
||||||
- `options` (ExecutionOptions, optional):
|
- `options` (ExecutionOptions, optional):
|
||||||
|
- `input` (any): Input data to pass to the workflow
|
||||||
- `timeout` (number): Timeout for the initial request in milliseconds
|
- `timeout` (number): Timeout for the initial request in milliseconds
|
||||||
|
|
||||||
**Returns:** `Promise<WorkflowExecutionResult>`
|
**Returns:** `Promise<WorkflowExecutionResult>`
|
||||||
|
|
||||||
##### getJobStatus(taskId)
|
|
||||||
|
|
||||||
Get the status of an async job.
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
const status = await client.getJobStatus('task-id-from-async-execution');
|
|
||||||
console.log('Job status:', status);
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameters:**
|
|
||||||
- `taskId` (string): The task ID returned from async execution
|
|
||||||
|
|
||||||
**Returns:** `Promise<any>`
|
|
||||||
|
|
||||||
##### executeWithRetry(workflowId, input?, options?, retryOptions?)
|
|
||||||
|
|
||||||
Execute a workflow with automatic retry on rate limit errors.
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
const result = await client.executeWithRetry('workflow-id', { message: 'Hello' }, {
|
|
||||||
timeout: 30000
|
|
||||||
}, {
|
|
||||||
maxRetries: 3,
|
|
||||||
initialDelay: 1000,
|
|
||||||
maxDelay: 30000,
|
|
||||||
backoffMultiplier: 2
|
|
||||||
});
|
|
||||||
```
|
|
||||||
|
|
||||||
**Parameters:**
|
|
||||||
- `workflowId` (string): The ID of the workflow to execute
|
|
||||||
- `input` (any, optional): Input data to pass to the workflow
|
|
||||||
- `options` (ExecutionOptions, optional): Execution options
|
|
||||||
- `retryOptions` (RetryOptions, optional):
|
|
||||||
- `maxRetries` (number): Maximum retry attempts (default: 3)
|
|
||||||
- `initialDelay` (number): Initial delay in ms (default: 1000)
|
|
||||||
- `maxDelay` (number): Maximum delay in ms (default: 30000)
|
|
||||||
- `backoffMultiplier` (number): Backoff multiplier (default: 2)
|
|
||||||
|
|
||||||
**Returns:** `Promise<WorkflowExecutionResult | AsyncExecutionResult>`
|
|
||||||
|
|
||||||
##### getRateLimitInfo()
|
|
||||||
|
|
||||||
Get current rate limit information from the last API response.
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
const rateInfo = client.getRateLimitInfo();
|
|
||||||
if (rateInfo) {
|
|
||||||
console.log('Remaining requests:', rateInfo.remaining);
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
**Returns:** `RateLimitInfo | null`
|
|
||||||
|
|
||||||
##### getUsageLimits()
|
|
||||||
|
|
||||||
Get current usage limits and quota information.
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
const limits = await client.getUsageLimits();
|
|
||||||
console.log('Current usage:', limits.usage);
|
|
||||||
```
|
|
||||||
|
|
||||||
**Returns:** `Promise<UsageLimits>`
|
|
||||||
|
|
||||||
##### setApiKey(apiKey)
|
##### setApiKey(apiKey)
|
||||||
|
|
||||||
Update the API key.
|
Update the API key.
|
||||||
@@ -245,81 +170,6 @@ class SimStudioError extends Error {
|
|||||||
}
|
}
|
||||||
```
|
```
|
||||||
|
|
||||||
### AsyncExecutionResult
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
interface AsyncExecutionResult {
|
|
||||||
success: boolean;
|
|
||||||
taskId: string;
|
|
||||||
status: 'queued';
|
|
||||||
createdAt: string;
|
|
||||||
links: {
|
|
||||||
status: string;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### RateLimitInfo
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
interface RateLimitInfo {
|
|
||||||
limit: number;
|
|
||||||
remaining: number;
|
|
||||||
reset: number;
|
|
||||||
retryAfter?: number;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### UsageLimits
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
interface UsageLimits {
|
|
||||||
success: boolean;
|
|
||||||
rateLimit: {
|
|
||||||
sync: {
|
|
||||||
isLimited: boolean;
|
|
||||||
limit: number;
|
|
||||||
remaining: number;
|
|
||||||
resetAt: string;
|
|
||||||
};
|
|
||||||
async: {
|
|
||||||
isLimited: boolean;
|
|
||||||
limit: number;
|
|
||||||
remaining: number;
|
|
||||||
resetAt: string;
|
|
||||||
};
|
|
||||||
authType: string;
|
|
||||||
};
|
|
||||||
usage: {
|
|
||||||
currentPeriodCost: number;
|
|
||||||
limit: number;
|
|
||||||
plan: string;
|
|
||||||
};
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### ExecutionOptions
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
interface ExecutionOptions {
|
|
||||||
timeout?: number;
|
|
||||||
stream?: boolean;
|
|
||||||
selectedOutputs?: string[];
|
|
||||||
async?: boolean;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
### RetryOptions
|
|
||||||
|
|
||||||
```typescript
|
|
||||||
interface RetryOptions {
|
|
||||||
maxRetries?: number;
|
|
||||||
initialDelay?: number;
|
|
||||||
maxDelay?: number;
|
|
||||||
backoffMultiplier?: number;
|
|
||||||
}
|
|
||||||
```
|
|
||||||
|
|
||||||
## Examples
|
## Examples
|
||||||
|
|
||||||
### Basic Workflow Execution
|
### Basic Workflow Execution
|
||||||
@@ -341,8 +191,10 @@ async function runWorkflow() {
|
|||||||
|
|
||||||
// Execute the workflow
|
// Execute the workflow
|
||||||
const result = await client.executeWorkflow('my-workflow-id', {
|
const result = await client.executeWorkflow('my-workflow-id', {
|
||||||
message: 'Process this data',
|
input: {
|
||||||
userId: '12345'
|
message: 'Process this data',
|
||||||
|
userId: '12345'
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
if (result.success) {
|
if (result.success) {
|
||||||
@@ -446,18 +298,22 @@ const file = new File([fileBuffer], 'document.pdf', { type: 'application/pdf' })
|
|||||||
|
|
||||||
// Include files under the field name from your API trigger's input format
|
// Include files under the field name from your API trigger's input format
|
||||||
const result = await client.executeWorkflow('workflow-id', {
|
const result = await client.executeWorkflow('workflow-id', {
|
||||||
documents: [file], // Field name must match your API trigger's file input field
|
input: {
|
||||||
instructions: 'Process this document'
|
documents: [file], // Field name must match your API trigger's file input field
|
||||||
|
instructions: 'Process this document'
|
||||||
|
}
|
||||||
});
|
});
|
||||||
|
|
||||||
// Browser: From file input
|
// Browser: From file input
|
||||||
const handleFileUpload = async (event: Event) => {
|
const handleFileUpload = async (event: Event) => {
|
||||||
const inputEl = event.target as HTMLInputElement;
|
const input = event.target as HTMLInputElement;
|
||||||
const files = Array.from(inputEl.files || []);
|
const files = Array.from(input.files || []);
|
||||||
|
|
||||||
const result = await client.executeWorkflow('workflow-id', {
|
const result = await client.executeWorkflow('workflow-id', {
|
||||||
attachments: files, // Field name must match your API trigger's file input field
|
input: {
|
||||||
query: 'Analyze these files'
|
attachments: files, // Field name must match your API trigger's file input field
|
||||||
|
query: 'Analyze these files'
|
||||||
|
}
|
||||||
});
|
});
|
||||||
};
|
};
|
||||||
```
|
```
|
||||||
|
|||||||
@@ -1,6 +1,6 @@
|
|||||||
{
|
{
|
||||||
"name": "simstudio-ts-sdk",
|
"name": "simstudio-ts-sdk",
|
||||||
"version": "0.1.2",
|
"version": "0.1.1",
|
||||||
"description": "Sim SDK - Execute workflows programmatically",
|
"description": "Sim SDK - Execute workflows programmatically",
|
||||||
"type": "module",
|
"type": "module",
|
||||||
"exports": {
|
"exports": {
|
||||||
|
|||||||
@@ -119,11 +119,10 @@ describe('SimStudioClient', () => {
|
|||||||
}
|
}
|
||||||
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
||||||
|
|
||||||
const result = await client.executeWorkflow(
|
const result = await client.executeWorkflow('workflow-id', {
|
||||||
'workflow-id',
|
input: { message: 'Hello' },
|
||||||
{ message: 'Hello' },
|
async: true,
|
||||||
{ async: true }
|
})
|
||||||
)
|
|
||||||
|
|
||||||
expect(result).toHaveProperty('taskId', 'task-123')
|
expect(result).toHaveProperty('taskId', 'task-123')
|
||||||
expect(result).toHaveProperty('status', 'queued')
|
expect(result).toHaveProperty('status', 'queued')
|
||||||
@@ -153,11 +152,10 @@ describe('SimStudioClient', () => {
|
|||||||
}
|
}
|
||||||
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
||||||
|
|
||||||
const result = await client.executeWorkflow(
|
const result = await client.executeWorkflow('workflow-id', {
|
||||||
'workflow-id',
|
input: { message: 'Hello' },
|
||||||
{ message: 'Hello' },
|
async: false,
|
||||||
{ async: false }
|
})
|
||||||
)
|
|
||||||
|
|
||||||
expect(result).toHaveProperty('success', true)
|
expect(result).toHaveProperty('success', true)
|
||||||
expect(result).toHaveProperty('output')
|
expect(result).toHaveProperty('output')
|
||||||
@@ -179,7 +177,9 @@ describe('SimStudioClient', () => {
|
|||||||
}
|
}
|
||||||
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
||||||
|
|
||||||
await client.executeWorkflow('workflow-id', { message: 'Hello' })
|
await client.executeWorkflow('workflow-id', {
|
||||||
|
input: { message: 'Hello' },
|
||||||
|
})
|
||||||
|
|
||||||
const calls = vi.mocked(fetch.default).mock.calls
|
const calls = vi.mocked(fetch.default).mock.calls
|
||||||
expect(calls[0][1]?.headers).not.toHaveProperty('X-Execution-Mode')
|
expect(calls[0][1]?.headers).not.toHaveProperty('X-Execution-Mode')
|
||||||
@@ -256,7 +256,9 @@ describe('SimStudioClient', () => {
|
|||||||
}
|
}
|
||||||
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
||||||
|
|
||||||
const result = await client.executeWithRetry('workflow-id', { message: 'test' })
|
const result = await client.executeWithRetry('workflow-id', {
|
||||||
|
input: { message: 'test' },
|
||||||
|
})
|
||||||
|
|
||||||
expect(result).toHaveProperty('success', true)
|
expect(result).toHaveProperty('success', true)
|
||||||
expect(vi.mocked(fetch.default)).toHaveBeenCalledTimes(1)
|
expect(vi.mocked(fetch.default)).toHaveBeenCalledTimes(1)
|
||||||
@@ -303,8 +305,7 @@ describe('SimStudioClient', () => {
|
|||||||
|
|
||||||
const result = await client.executeWithRetry(
|
const result = await client.executeWithRetry(
|
||||||
'workflow-id',
|
'workflow-id',
|
||||||
{ message: 'test' },
|
{ input: { message: 'test' } },
|
||||||
{},
|
|
||||||
{ maxRetries: 3, initialDelay: 10 }
|
{ maxRetries: 3, initialDelay: 10 }
|
||||||
)
|
)
|
||||||
|
|
||||||
@@ -335,8 +336,7 @@ describe('SimStudioClient', () => {
|
|||||||
await expect(
|
await expect(
|
||||||
client.executeWithRetry(
|
client.executeWithRetry(
|
||||||
'workflow-id',
|
'workflow-id',
|
||||||
{ message: 'test' },
|
{ input: { message: 'test' } },
|
||||||
{},
|
|
||||||
{ maxRetries: 2, initialDelay: 10 }
|
{ maxRetries: 2, initialDelay: 10 }
|
||||||
)
|
)
|
||||||
).rejects.toThrow('Rate limit exceeded')
|
).rejects.toThrow('Rate limit exceeded')
|
||||||
@@ -361,9 +361,9 @@ describe('SimStudioClient', () => {
|
|||||||
|
|
||||||
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
||||||
|
|
||||||
await expect(client.executeWithRetry('workflow-id', { message: 'test' })).rejects.toThrow(
|
await expect(
|
||||||
'Server error'
|
client.executeWithRetry('workflow-id', { input: { message: 'test' } })
|
||||||
)
|
).rejects.toThrow('Server error')
|
||||||
|
|
||||||
expect(vi.mocked(fetch.default)).toHaveBeenCalledTimes(1) // No retries
|
expect(vi.mocked(fetch.default)).toHaveBeenCalledTimes(1) // No retries
|
||||||
})
|
})
|
||||||
@@ -393,7 +393,7 @@ describe('SimStudioClient', () => {
|
|||||||
|
|
||||||
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
||||||
|
|
||||||
await client.executeWorkflow('workflow-id', {})
|
await client.executeWorkflow('workflow-id', { input: {} })
|
||||||
|
|
||||||
const info = client.getRateLimitInfo()
|
const info = client.getRateLimitInfo()
|
||||||
expect(info).not.toBeNull()
|
expect(info).not.toBeNull()
|
||||||
@@ -490,11 +490,11 @@ describe('SimStudioClient', () => {
|
|||||||
|
|
||||||
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
||||||
|
|
||||||
await client.executeWorkflow(
|
await client.executeWorkflow('workflow-id', {
|
||||||
'workflow-id',
|
input: { message: 'test' },
|
||||||
{ message: 'test' },
|
stream: true,
|
||||||
{ stream: true, selectedOutputs: ['agent1.content', 'agent2.content'] }
|
selectedOutputs: ['agent1.content', 'agent2.content'],
|
||||||
)
|
})
|
||||||
|
|
||||||
const calls = vi.mocked(fetch.default).mock.calls
|
const calls = vi.mocked(fetch.default).mock.calls
|
||||||
const requestBody = JSON.parse(calls[0][1]?.body as string)
|
const requestBody = JSON.parse(calls[0][1]?.body as string)
|
||||||
@@ -505,134 +505,6 @@ describe('SimStudioClient', () => {
|
|||||||
expect(requestBody.selectedOutputs).toEqual(['agent1.content', 'agent2.content'])
|
expect(requestBody.selectedOutputs).toEqual(['agent1.content', 'agent2.content'])
|
||||||
})
|
})
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('executeWorkflow - primitive and array inputs', () => {
|
|
||||||
it('should wrap primitive string input in input field', async () => {
|
|
||||||
const fetch = await import('node-fetch')
|
|
||||||
const mockResponse = {
|
|
||||||
ok: true,
|
|
||||||
status: 200,
|
|
||||||
json: vi.fn().mockResolvedValue({
|
|
||||||
success: true,
|
|
||||||
output: {},
|
|
||||||
}),
|
|
||||||
headers: {
|
|
||||||
get: vi.fn().mockReturnValue(null),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
|
||||||
|
|
||||||
await client.executeWorkflow('workflow-id', 'NVDA')
|
|
||||||
|
|
||||||
const calls = vi.mocked(fetch.default).mock.calls
|
|
||||||
const requestBody = JSON.parse(calls[0][1]?.body as string)
|
|
||||||
|
|
||||||
expect(requestBody).toHaveProperty('input', 'NVDA')
|
|
||||||
expect(requestBody).not.toHaveProperty('0') // Should not spread string characters
|
|
||||||
})
|
|
||||||
|
|
||||||
it('should wrap primitive number input in input field', async () => {
|
|
||||||
const fetch = await import('node-fetch')
|
|
||||||
const mockResponse = {
|
|
||||||
ok: true,
|
|
||||||
status: 200,
|
|
||||||
json: vi.fn().mockResolvedValue({
|
|
||||||
success: true,
|
|
||||||
output: {},
|
|
||||||
}),
|
|
||||||
headers: {
|
|
||||||
get: vi.fn().mockReturnValue(null),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
|
||||||
|
|
||||||
await client.executeWorkflow('workflow-id', 42)
|
|
||||||
|
|
||||||
const calls = vi.mocked(fetch.default).mock.calls
|
|
||||||
const requestBody = JSON.parse(calls[0][1]?.body as string)
|
|
||||||
|
|
||||||
expect(requestBody).toHaveProperty('input', 42)
|
|
||||||
})
|
|
||||||
|
|
||||||
it('should wrap array input in input field', async () => {
|
|
||||||
const fetch = await import('node-fetch')
|
|
||||||
const mockResponse = {
|
|
||||||
ok: true,
|
|
||||||
status: 200,
|
|
||||||
json: vi.fn().mockResolvedValue({
|
|
||||||
success: true,
|
|
||||||
output: {},
|
|
||||||
}),
|
|
||||||
headers: {
|
|
||||||
get: vi.fn().mockReturnValue(null),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
|
||||||
|
|
||||||
await client.executeWorkflow('workflow-id', ['NVDA', 'AAPL', 'GOOG'])
|
|
||||||
|
|
||||||
const calls = vi.mocked(fetch.default).mock.calls
|
|
||||||
const requestBody = JSON.parse(calls[0][1]?.body as string)
|
|
||||||
|
|
||||||
expect(requestBody).toHaveProperty('input')
|
|
||||||
expect(requestBody.input).toEqual(['NVDA', 'AAPL', 'GOOG'])
|
|
||||||
expect(requestBody).not.toHaveProperty('0') // Should not spread array
|
|
||||||
})
|
|
||||||
|
|
||||||
it('should spread object input at root level', async () => {
|
|
||||||
const fetch = await import('node-fetch')
|
|
||||||
const mockResponse = {
|
|
||||||
ok: true,
|
|
||||||
status: 200,
|
|
||||||
json: vi.fn().mockResolvedValue({
|
|
||||||
success: true,
|
|
||||||
output: {},
|
|
||||||
}),
|
|
||||||
headers: {
|
|
||||||
get: vi.fn().mockReturnValue(null),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
|
||||||
|
|
||||||
await client.executeWorkflow('workflow-id', { ticker: 'NVDA', quantity: 100 })
|
|
||||||
|
|
||||||
const calls = vi.mocked(fetch.default).mock.calls
|
|
||||||
const requestBody = JSON.parse(calls[0][1]?.body as string)
|
|
||||||
|
|
||||||
expect(requestBody).toHaveProperty('ticker', 'NVDA')
|
|
||||||
expect(requestBody).toHaveProperty('quantity', 100)
|
|
||||||
expect(requestBody).not.toHaveProperty('input') // Should not wrap in input field
|
|
||||||
})
|
|
||||||
|
|
||||||
it('should handle null input as no input (empty body)', async () => {
|
|
||||||
const fetch = await import('node-fetch')
|
|
||||||
const mockResponse = {
|
|
||||||
ok: true,
|
|
||||||
status: 200,
|
|
||||||
json: vi.fn().mockResolvedValue({
|
|
||||||
success: true,
|
|
||||||
output: {},
|
|
||||||
}),
|
|
||||||
headers: {
|
|
||||||
get: vi.fn().mockReturnValue(null),
|
|
||||||
},
|
|
||||||
}
|
|
||||||
|
|
||||||
vi.mocked(fetch.default).mockResolvedValue(mockResponse as any)
|
|
||||||
|
|
||||||
await client.executeWorkflow('workflow-id', null)
|
|
||||||
|
|
||||||
const calls = vi.mocked(fetch.default).mock.calls
|
|
||||||
const requestBody = JSON.parse(calls[0][1]?.body as string)
|
|
||||||
|
|
||||||
// null treated as "no input" - sends empty body (consistent with Python SDK)
|
|
||||||
expect(requestBody).toEqual({})
|
|
||||||
})
|
|
||||||
})
|
|
||||||
})
|
})
|
||||||
|
|
||||||
describe('SimStudioError', () => {
|
describe('SimStudioError', () => {
|
||||||
|
|||||||
@@ -26,6 +26,7 @@ export interface WorkflowStatus {
|
|||||||
}
|
}
|
||||||
|
|
||||||
export interface ExecutionOptions {
|
export interface ExecutionOptions {
|
||||||
|
input?: any
|
||||||
timeout?: number
|
timeout?: number
|
||||||
stream?: boolean
|
stream?: boolean
|
||||||
selectedOutputs?: string[]
|
selectedOutputs?: string[]
|
||||||
@@ -116,6 +117,10 @@ export class SimStudioClient {
|
|||||||
this.baseUrl = normalizeBaseUrl(config.baseUrl || 'https://sim.ai')
|
this.baseUrl = normalizeBaseUrl(config.baseUrl || 'https://sim.ai')
|
||||||
}
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* Execute a workflow with optional input data
|
||||||
|
* If async is true, returns immediately with a task ID
|
||||||
|
*/
|
||||||
/**
|
/**
|
||||||
* Convert File objects in input to API format (base64)
|
* Convert File objects in input to API format (base64)
|
||||||
* Recursively processes nested objects and arrays
|
* Recursively processes nested objects and arrays
|
||||||
@@ -165,25 +170,20 @@ export class SimStudioClient {
|
|||||||
return value
|
return value
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
|
||||||
* Execute a workflow with optional input data
|
|
||||||
* @param workflowId - The ID of the workflow to execute
|
|
||||||
* @param input - Input data to pass to the workflow (object, primitive, or array)
|
|
||||||
* @param options - Execution options (timeout, stream, async, etc.)
|
|
||||||
*/
|
|
||||||
async executeWorkflow(
|
async executeWorkflow(
|
||||||
workflowId: string,
|
workflowId: string,
|
||||||
input?: any,
|
|
||||||
options: ExecutionOptions = {}
|
options: ExecutionOptions = {}
|
||||||
): Promise<WorkflowExecutionResult | AsyncExecutionResult> {
|
): Promise<WorkflowExecutionResult | AsyncExecutionResult> {
|
||||||
const url = `${this.baseUrl}/api/workflows/${workflowId}/execute`
|
const url = `${this.baseUrl}/api/workflows/${workflowId}/execute`
|
||||||
const { timeout = 30000, stream, selectedOutputs, async } = options
|
const { input, timeout = 30000, stream, selectedOutputs, async } = options
|
||||||
|
|
||||||
try {
|
try {
|
||||||
|
// Create a timeout promise
|
||||||
const timeoutPromise = new Promise<never>((_, reject) => {
|
const timeoutPromise = new Promise<never>((_, reject) => {
|
||||||
setTimeout(() => reject(new Error('TIMEOUT')), timeout)
|
setTimeout(() => reject(new Error('TIMEOUT')), timeout)
|
||||||
})
|
})
|
||||||
|
|
||||||
|
// Build headers - async execution uses X-Execution-Mode header
|
||||||
const headers: Record<string, string> = {
|
const headers: Record<string, string> = {
|
||||||
'Content-Type': 'application/json',
|
'Content-Type': 'application/json',
|
||||||
'X-API-Key': this.apiKey,
|
'X-API-Key': this.apiKey,
|
||||||
@@ -192,15 +192,10 @@ export class SimStudioClient {
|
|||||||
headers['X-Execution-Mode'] = 'async'
|
headers['X-Execution-Mode'] = 'async'
|
||||||
}
|
}
|
||||||
|
|
||||||
let jsonBody: any = {}
|
// Build JSON body - spread input at root level, then add API control parameters
|
||||||
if (input !== undefined && input !== null) {
|
let jsonBody: any = input !== undefined ? { ...input } : {}
|
||||||
if (typeof input === 'object' && input !== null && !Array.isArray(input)) {
|
|
||||||
jsonBody = { ...input }
|
|
||||||
} else {
|
|
||||||
jsonBody = { input }
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
|
// Convert any File objects in the input to base64 format
|
||||||
jsonBody = await this.convertFilesToBase64(jsonBody)
|
jsonBody = await this.convertFilesToBase64(jsonBody)
|
||||||
|
|
||||||
if (stream !== undefined) {
|
if (stream !== undefined) {
|
||||||
@@ -218,8 +213,10 @@ export class SimStudioClient {
|
|||||||
|
|
||||||
const response = await Promise.race([fetchPromise, timeoutPromise])
|
const response = await Promise.race([fetchPromise, timeoutPromise])
|
||||||
|
|
||||||
|
// Extract rate limit headers
|
||||||
this.updateRateLimitInfo(response)
|
this.updateRateLimitInfo(response)
|
||||||
|
|
||||||
|
// Handle rate limiting with retry
|
||||||
if (response.status === 429) {
|
if (response.status === 429) {
|
||||||
const retryAfter = this.rateLimitInfo?.retryAfter || 1000
|
const retryAfter = this.rateLimitInfo?.retryAfter || 1000
|
||||||
throw new SimStudioError(
|
throw new SimStudioError(
|
||||||
@@ -288,18 +285,15 @@ export class SimStudioClient {
|
|||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
* Execute a workflow synchronously (ensures non-async mode)
|
* Execute a workflow and poll for completion (useful for long-running workflows)
|
||||||
* @param workflowId - The ID of the workflow to execute
|
|
||||||
* @param input - Input data to pass to the workflow
|
|
||||||
* @param options - Execution options (timeout, stream, etc.)
|
|
||||||
*/
|
*/
|
||||||
async executeWorkflowSync(
|
async executeWorkflowSync(
|
||||||
workflowId: string,
|
workflowId: string,
|
||||||
input?: any,
|
|
||||||
options: ExecutionOptions = {}
|
options: ExecutionOptions = {}
|
||||||
): Promise<WorkflowExecutionResult> {
|
): Promise<WorkflowExecutionResult> {
|
||||||
|
// Ensure sync mode by explicitly setting async to false
|
||||||
const syncOptions = { ...options, async: false }
|
const syncOptions = { ...options, async: false }
|
||||||
return this.executeWorkflow(workflowId, input, syncOptions) as Promise<WorkflowExecutionResult>
|
return this.executeWorkflow(workflowId, syncOptions) as Promise<WorkflowExecutionResult>
|
||||||
}
|
}
|
||||||
|
|
||||||
/**
|
/**
|
||||||
@@ -367,14 +361,9 @@ export class SimStudioClient {
|
|||||||
|
|
||||||
/**
|
/**
|
||||||
* Execute workflow with automatic retry on rate limit
|
* Execute workflow with automatic retry on rate limit
|
||||||
* @param workflowId - The ID of the workflow to execute
|
|
||||||
* @param input - Input data to pass to the workflow
|
|
||||||
* @param options - Execution options (timeout, stream, async, etc.)
|
|
||||||
* @param retryOptions - Retry configuration (maxRetries, delays, etc.)
|
|
||||||
*/
|
*/
|
||||||
async executeWithRetry(
|
async executeWithRetry(
|
||||||
workflowId: string,
|
workflowId: string,
|
||||||
input?: any,
|
|
||||||
options: ExecutionOptions = {},
|
options: ExecutionOptions = {},
|
||||||
retryOptions: RetryOptions = {}
|
retryOptions: RetryOptions = {}
|
||||||
): Promise<WorkflowExecutionResult | AsyncExecutionResult> {
|
): Promise<WorkflowExecutionResult | AsyncExecutionResult> {
|
||||||
@@ -390,7 +379,7 @@ export class SimStudioClient {
|
|||||||
|
|
||||||
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
for (let attempt = 0; attempt <= maxRetries; attempt++) {
|
||||||
try {
|
try {
|
||||||
return await this.executeWorkflow(workflowId, input, options)
|
return await this.executeWorkflow(workflowId, options)
|
||||||
} catch (error: any) {
|
} catch (error: any) {
|
||||||
if (!(error instanceof SimStudioError) || error.code !== 'RATE_LIMIT_EXCEEDED') {
|
if (!(error instanceof SimStudioError) || error.code !== 'RATE_LIMIT_EXCEEDED') {
|
||||||
throw error
|
throw error
|
||||||
@@ -398,19 +387,23 @@ export class SimStudioClient {
|
|||||||
|
|
||||||
lastError = error
|
lastError = error
|
||||||
|
|
||||||
|
// Don't retry after last attempt
|
||||||
if (attempt === maxRetries) {
|
if (attempt === maxRetries) {
|
||||||
break
|
break
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Use retry-after if provided, otherwise use exponential backoff
|
||||||
const waitTime =
|
const waitTime =
|
||||||
error.status === 429 && this.rateLimitInfo?.retryAfter
|
error.status === 429 && this.rateLimitInfo?.retryAfter
|
||||||
? this.rateLimitInfo.retryAfter
|
? this.rateLimitInfo.retryAfter
|
||||||
: Math.min(delay, maxDelay)
|
: Math.min(delay, maxDelay)
|
||||||
|
|
||||||
|
// Add jitter (±25%)
|
||||||
const jitter = waitTime * (0.75 + Math.random() * 0.5)
|
const jitter = waitTime * (0.75 + Math.random() * 0.5)
|
||||||
|
|
||||||
await new Promise((resolve) => setTimeout(resolve, jitter))
|
await new Promise((resolve) => setTimeout(resolve, jitter))
|
||||||
|
|
||||||
|
// Exponential backoff for next attempt
|
||||||
delay *= backoffMultiplier
|
delay *= backoffMultiplier
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
@@ -482,4 +475,5 @@ export class SimStudioClient {
|
|||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
// Export types and classes
|
||||||
export { SimStudioClient as default }
|
export { SimStudioClient as default }
|
||||||
|
|||||||
@@ -126,7 +126,7 @@ async function fetchGitHubCommitDetails(
|
|||||||
|
|
||||||
const githubUsername = commit.author?.login || commit.committer?.login || 'unknown'
|
const githubUsername = commit.author?.login || commit.committer?.login || 'unknown'
|
||||||
|
|
||||||
let cleanMessage = commit.commit.message.split('\n')[0]
|
let cleanMessage = commit.commit.message.split('\n')[0] // First line only
|
||||||
if (prNumber) {
|
if (prNumber) {
|
||||||
cleanMessage = cleanMessage.replace(/\s*\(#\d+\)\s*$/, '')
|
cleanMessage = cleanMessage.replace(/\s*\(#\d+\)\s*$/, '')
|
||||||
}
|
}
|
||||||
@@ -226,23 +226,12 @@ async function getCommitsBetweenVersions(
|
|||||||
function categorizeCommit(message: string): 'features' | 'fixes' | 'improvements' | 'other' {
|
function categorizeCommit(message: string): 'features' | 'fixes' | 'improvements' | 'other' {
|
||||||
const msgLower = message.toLowerCase()
|
const msgLower = message.toLowerCase()
|
||||||
|
|
||||||
if (/^feat(\(|:|!)/.test(msgLower)) {
|
if (
|
||||||
return 'features'
|
msgLower.includes('feat') ||
|
||||||
}
|
msgLower.includes('add') ||
|
||||||
|
msgLower.includes('implement') ||
|
||||||
if (/^fix(\(|:|!)/.test(msgLower)) {
|
msgLower.includes('new ')
|
||||||
return 'fixes'
|
) {
|
||||||
}
|
|
||||||
|
|
||||||
if (/^(improvement|improve|perf|refactor)(\(|:|!)/.test(msgLower)) {
|
|
||||||
return 'improvements'
|
|
||||||
}
|
|
||||||
|
|
||||||
if (/^(chore|docs|style|test|ci|build)(\(|:|!)/.test(msgLower)) {
|
|
||||||
return 'other'
|
|
||||||
}
|
|
||||||
|
|
||||||
if (msgLower.includes('feat') || msgLower.includes('implement') || msgLower.includes('new ')) {
|
|
||||||
return 'features'
|
return 'features'
|
||||||
}
|
}
|
||||||
|
|
||||||
@@ -253,10 +242,9 @@ function categorizeCommit(message: string): 'features' | 'fixes' | 'improvements
|
|||||||
if (
|
if (
|
||||||
msgLower.includes('improve') ||
|
msgLower.includes('improve') ||
|
||||||
msgLower.includes('enhance') ||
|
msgLower.includes('enhance') ||
|
||||||
|
msgLower.includes('update') ||
|
||||||
msgLower.includes('upgrade') ||
|
msgLower.includes('upgrade') ||
|
||||||
msgLower.includes('optimization') ||
|
msgLower.includes('optimization')
|
||||||
msgLower.includes('add') ||
|
|
||||||
msgLower.includes('update')
|
|
||||||
) {
|
) {
|
||||||
return 'improvements'
|
return 'improvements'
|
||||||
}
|
}
|
||||||
|
|||||||
Reference in New Issue
Block a user