v0.6.44: streamdown, mothership intelligence, excel extension
@@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Video } from '@/components/ui/video'
|
||||
|
||||
Sim provides a comprehensive external API for querying workflow execution logs and setting up webhooks for real-time notifications when workflows complete.
|
||||
Sim provides a comprehensive external API for querying workflow run logs and setting up webhooks for real-time notifications when workflows complete.
|
||||
|
||||
## Authentication
|
||||
|
||||
@@ -21,7 +21,7 @@ You can generate API keys from the Sim platform and navigate to **Settings**, th
|
||||
|
||||
## Logs API
|
||||
|
||||
All API responses include information about your workflow execution limits and usage:
|
||||
All API responses include information about your workflow run limits and usage:
|
||||
|
||||
```json
|
||||
"limits": {
|
||||
@@ -48,11 +48,11 @@ All API responses include information about your workflow execution limits and u
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** Rate limits use a token bucket algorithm. `remaining` can exceed `requestsPerMinute` up to `maxBurst` when you haven't used your full allowance recently, allowing for burst traffic. The rate limits in the response body are for workflow executions. The rate limits for calling this API endpoint are in the response headers (`X-RateLimit-*`).
|
||||
**Note:** Rate limits use a token bucket algorithm. `remaining` can exceed `requestsPerMinute` up to `maxBurst` when you haven't used your full allowance recently, allowing for burst traffic. The rate limits in the response body are for workflow runs. The rate limits for calling this API endpoint are in the response headers (`X-RateLimit-*`).
|
||||
|
||||
### Query Logs
|
||||
|
||||
Query workflow execution logs with extensive filtering options.
|
||||
Query workflow run logs with extensive filtering options.
|
||||
|
||||
<Tabs items={['Request', 'Response']}>
|
||||
<Tab value="Request">
|
||||
@@ -70,11 +70,11 @@ Query workflow execution logs with extensive filtering options.
|
||||
- `level` - Filter by level: `info`, `error`
|
||||
- `startDate` - ISO timestamp for date range start
|
||||
- `endDate` - ISO timestamp for date range end
|
||||
- `executionId` - Exact execution ID match
|
||||
- `minDurationMs` - Minimum execution duration in milliseconds
|
||||
- `maxDurationMs` - Maximum execution duration in milliseconds
|
||||
- `minCost` - Minimum execution cost
|
||||
- `maxCost` - Maximum execution cost
|
||||
- `executionId` - Exact run ID match
|
||||
- `minDurationMs` - Minimum run duration in milliseconds
|
||||
- `maxDurationMs` - Maximum run duration in milliseconds
|
||||
- `minCost` - Minimum run cost
|
||||
- `maxCost` - Maximum run cost
|
||||
- `model` - Filter by AI model used
|
||||
|
||||
**Pagination:**
|
||||
@@ -213,9 +213,9 @@ Retrieve detailed information about a specific log entry.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### Get Execution Details
|
||||
### Get Run Details
|
||||
|
||||
Retrieve execution details including the workflow state snapshot.
|
||||
Retrieve run details including the workflow state snapshot.
|
||||
|
||||
<Tabs items={['Request', 'Response']}>
|
||||
<Tab value="Request">
|
||||
@@ -248,7 +248,7 @@ Retrieve execution details including the workflow state snapshot.
|
||||
|
||||
## Notifications
|
||||
|
||||
Get real-time notifications when workflow executions complete via webhook, email, or Slack. Notifications are configured at the workspace level from the Logs page.
|
||||
Get real-time notifications when workflow runs complete via webhook, email, or Slack. Notifications are configured at the workspace level from the Logs page.
|
||||
|
||||
### Configuration
|
||||
|
||||
@@ -256,7 +256,7 @@ Configure notifications from the Logs page by clicking the menu button and selec
|
||||
|
||||
**Notification Channels:**
|
||||
- **Webhook**: Send HTTP POST requests to your endpoint
|
||||
- **Email**: Receive email notifications with execution details
|
||||
- **Email**: Receive email notifications with run details
|
||||
- **Slack**: Post messages to a Slack channel
|
||||
|
||||
**Workflow Selection:**
|
||||
@@ -269,38 +269,38 @@ Configure notifications from the Logs page by clicking the menu button and selec
|
||||
|
||||
**Optional Data:**
|
||||
- `includeFinalOutput`: Include the workflow's final output
|
||||
- `includeTraceSpans`: Include detailed execution trace spans
|
||||
- `includeTraceSpans`: Include detailed trace spans
|
||||
- `includeRateLimits`: Include rate limit information (sync/async limits and remaining)
|
||||
- `includeUsageData`: Include billing period usage and limits
|
||||
|
||||
### Alert Rules
|
||||
|
||||
Instead of receiving notifications for every execution, configure alert rules to be notified only when issues are detected:
|
||||
Instead of receiving notifications for every run, configure alert rules to be notified only when issues are detected:
|
||||
|
||||
**Consecutive Failures**
|
||||
- Alert after X consecutive failed executions (e.g., 3 failures in a row)
|
||||
- Resets when an execution succeeds
|
||||
- Alert after X consecutive failed runs (e.g., 3 failures in a row)
|
||||
- Resets when a run succeeds
|
||||
|
||||
**Failure Rate**
|
||||
- Alert when failure rate exceeds X% over the last Y hours
|
||||
- Requires minimum 5 executions in the window
|
||||
- Requires minimum 5 runs in the window
|
||||
- Only triggers after the full time window has elapsed
|
||||
|
||||
**Latency Threshold**
|
||||
- Alert when any execution takes longer than X seconds
|
||||
- Alert when any run takes longer than X seconds
|
||||
- Useful for catching slow or hanging workflows
|
||||
|
||||
**Latency Spike**
|
||||
- Alert when execution is X% slower than the average
|
||||
- Alert when a run is X% slower than the average
|
||||
- Compares against the average duration over the configured time window
|
||||
- Requires minimum 5 executions to establish baseline
|
||||
- Requires minimum 5 runs to establish baseline
|
||||
|
||||
**Cost Threshold**
|
||||
- Alert when a single execution costs more than $X
|
||||
- Alert when a single run costs more than $X
|
||||
- Useful for catching expensive LLM calls
|
||||
|
||||
**No Activity**
|
||||
- Alert when no executions occur within X hours
|
||||
- Alert when no runs occur within X hours
|
||||
- Useful for monitoring scheduled workflows that should run regularly
|
||||
|
||||
**Error Count**
|
||||
@@ -317,7 +317,7 @@ For webhooks, additional options are available:
|
||||
|
||||
### Payload Structure
|
||||
|
||||
When a workflow execution completes, Sim sends the following payload (via webhook POST, email, or Slack):
|
||||
When a workflow run completes, Sim sends the following payload (via webhook POST, email, or Slack):
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -456,7 +456,7 @@ Failed webhook deliveries are retried with exponential backoff and jitter:
|
||||
- Deliveries timeout after 30 seconds
|
||||
|
||||
<Callout type="info">
|
||||
Webhook deliveries are processed asynchronously and don't affect workflow execution performance.
|
||||
Webhook deliveries are processed asynchronously and don't affect workflow run performance.
|
||||
</Callout>
|
||||
|
||||
## Best Practices
|
||||
@@ -596,11 +596,11 @@ app.listen(3000, () => {
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "How do I trigger async execution via the API?", answer: "Set the X-Execution-Mode header to 'async' on your POST request to /api/workflows/{id}/execute. The API returns a 202 response with a jobId, executionId, and a statusUrl you can poll to check when the job completes. Async mode does not support draft state, workflow overrides, or selective output options." },
|
||||
{ question: "How do I trigger an async run via the API?", answer: "Set the X-Execution-Mode header to 'async' on your POST request to /api/workflows/{id}/execute. The API returns a 202 response with a jobId, executionId, and a statusUrl you can poll to check when the job completes. Async mode does not support draft state, workflow overrides, or selective output options." },
|
||||
{ question: "What authentication methods does the API support?", answer: "The API supports two authentication methods: API keys passed in the x-api-key header, and session-based authentication for logged-in users. API keys can be generated from Settings > Sim Keys in the platform. Workflows with public API access enabled can also be called without authentication." },
|
||||
{ question: "How does the webhook retry policy work?", answer: "Failed webhook deliveries are retried up to 5 times with exponential backoff: 5 seconds, 15 seconds, 1 minute, 3 minutes, and 10 minutes, plus up to 10% jitter. Only HTTP 5xx and 429 responses trigger retries. Each delivery times out after 30 seconds." },
|
||||
{ question: "What rate limits apply to the Logs API?", answer: "Rate limits use a token bucket algorithm. Free plans get 30 requests/minute with 60 burst capacity, Pro gets 100/200, Team gets 200/400, and Enterprise gets 500/1000. These are separate from workflow execution rate limits, which are shown in the response body." },
|
||||
{ question: "What rate limits apply to the Logs API?", answer: "Rate limits use a token bucket algorithm. Free plans get 30 requests/minute with 60 burst capacity, Pro gets 100/200, Team gets 200/400, and Enterprise gets 500/1000. These are separate from workflow run rate limits, which are shown in the response body." },
|
||||
{ question: "How do I verify that a webhook is from Sim?", answer: "Configure a webhook secret when setting up notifications. Sim signs each delivery with HMAC-SHA256 using the format 't={timestamp},v1={signature}' in the sim-signature header. Compute the HMAC of '{timestamp}.{body}' with your secret and compare it to the signature value." },
|
||||
{ question: "What alert rules are available for notifications?", answer: "You can configure alerts for consecutive failures, failure rate thresholds, latency thresholds, latency spikes (percentage above average), cost thresholds, no-activity periods, and error counts within a time window. All alert types include a 1-hour cooldown to prevent notification spam." },
|
||||
{ question: "Can I filter which executions trigger notifications?", answer: "Yes. You can filter notifications by specific workflows (or select all), log level (info or error), and trigger type (api, webhook, schedule, manual, chat). You can also choose whether to include final output, trace spans, rate limits, and usage data in the notification payload." },
|
||||
{ question: "Can I filter which runs trigger notifications?", answer: "Yes. You can filter notifications by specific workflows (or select all), log level (info or error), and trigger type (api, webhook, schedule, manual, chat). You can also choose whether to include final output, trace spans, rate limits, and usage data in the notification payload." },
|
||||
]} />
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Image } from '@/components/ui/image'
|
||||
|
||||
Understanding how workflows execute in Sim is key to building efficient and reliable automations. The execution engine automatically handles dependencies, concurrency, and data flow to ensure your workflows run smoothly and predictably.
|
||||
Understanding how workflows run in Sim is key to building efficient and reliable automations. The execution engine automatically handles dependencies, concurrency, and data flow to ensure your workflows run smoothly and predictably.
|
||||
|
||||
## How Workflows Execute
|
||||
|
||||
@@ -14,7 +14,7 @@ Sim's execution engine processes workflows intelligently by analyzing dependenci
|
||||
|
||||
### Concurrent Execution by Default
|
||||
|
||||
Multiple blocks run concurrently when they don't depend on each other. This parallel execution dramatically improves performance without requiring manual configuration.
|
||||
Multiple blocks run concurrently when they don't depend on each other. This dramatically improves performance without requiring manual configuration.
|
||||
|
||||
<Image
|
||||
src="/static/execution/concurrency.png"
|
||||
@@ -49,7 +49,7 @@ Workflows can branch in multiple directions using routing blocks. The execution
|
||||
height={500}
|
||||
/>
|
||||
|
||||
This workflow demonstrates how execution can follow different paths based on conditions or AI decisions, with each path executing independently.
|
||||
This workflow demonstrates how a run can follow different paths based on conditions or AI decisions, with each path running independently.
|
||||
|
||||
## Block Types
|
||||
|
||||
@@ -57,7 +57,7 @@ Sim provides different types of blocks that serve specific purposes in your work
|
||||
|
||||
<Cards>
|
||||
<Card title="Triggers" href="/triggers">
|
||||
**Starter blocks** initiate workflows and **Webhook blocks** respond to external events. Every workflow needs a trigger to begin execution.
|
||||
**Starter blocks** initiate workflows and **Webhook blocks** respond to external events. Every workflow needs a trigger to begin a run.
|
||||
</Card>
|
||||
|
||||
<Card title="Processing Blocks" href="/blocks">
|
||||
@@ -73,37 +73,37 @@ Sim provides different types of blocks that serve specific purposes in your work
|
||||
</Card>
|
||||
</Cards>
|
||||
|
||||
All blocks execute automatically based on their dependencies - you don't need to manually manage execution order or timing.
|
||||
All blocks run automatically based on their dependencies - you don't need to manually manage run order or timing.
|
||||
|
||||
## Execution Monitoring
|
||||
## Run Monitoring
|
||||
|
||||
When workflows run, Sim provides real-time visibility into the execution process:
|
||||
When workflows run, Sim provides real-time visibility into the process:
|
||||
|
||||
- **Live Block States**: See which blocks are currently executing, completed, or failed
|
||||
- **Execution Logs**: Detailed logs appear in real-time showing inputs, outputs, and any errors
|
||||
- **Performance Metrics**: Track execution time and costs for each block
|
||||
- **Path Visualization**: Understand which execution paths were taken through your workflow
|
||||
- **Live Block States**: See which blocks are currently running, completed, or failed
|
||||
- **Run Logs**: Detailed logs appear in real-time showing inputs, outputs, and any errors
|
||||
- **Performance Metrics**: Track run time and costs for each block
|
||||
- **Path Visualization**: Understand which paths were taken through your workflow
|
||||
|
||||
<Callout type="info">
|
||||
All execution details are captured and available for review even after workflows complete, helping with debugging and optimization.
|
||||
All run details are captured and available for review even after workflows complete, helping with debugging and optimization.
|
||||
</Callout>
|
||||
|
||||
## Key Execution Principles
|
||||
## Key Principles
|
||||
|
||||
Understanding these core principles will help you build better workflows:
|
||||
|
||||
1. **Dependency-Based Execution**: Blocks only run when all their dependencies have completed
|
||||
2. **Automatic Parallelization**: Independent blocks run concurrently without configuration
|
||||
3. **Smart Data Flow**: Outputs flow automatically to connected blocks
|
||||
4. **Error Handling**: Failed blocks stop their execution path but don't affect independent paths
|
||||
5. **Response Blocks as Exit Points**: When a Response block executes, the entire workflow stops and the API response is sent immediately. Multiple Response blocks can exist on different branches — the first one to execute wins
|
||||
6. **State Persistence**: All block outputs and execution details are preserved for debugging
|
||||
7. **Cycle Protection**: Workflows that call other workflows (via Workflow blocks, MCP tools, or API blocks) are tracked with a call chain. If the chain exceeds 25 hops, execution is stopped to prevent infinite loops
|
||||
4. **Error Handling**: Failed blocks stop their run path but don't affect independent paths
|
||||
5. **Response Blocks as Exit Points**: When a Response block runs, the entire workflow stops and the API response is sent immediately. Multiple Response blocks can exist on different branches — the first one to run wins
|
||||
6. **State Persistence**: All block outputs and run details are preserved for debugging
|
||||
7. **Cycle Protection**: Workflows that call other workflows (via Workflow blocks, MCP tools, or API blocks) are tracked with a call chain. If the chain exceeds 25 hops, the run is stopped to prevent infinite loops
|
||||
|
||||
## Next Steps
|
||||
|
||||
Now that you understand execution basics, explore:
|
||||
- **[Block Types](/blocks)** - Learn about specific block capabilities
|
||||
- **[Logging](/execution/logging)** - Monitor workflow executions and debug issues
|
||||
- **[Logging](/execution/logging)** - Monitor workflow runs and debug issues
|
||||
- **[Cost Calculation](/execution/costs)** - Understand and optimize workflow costs
|
||||
- **[Triggers](/triggers)** - Set up different ways to run your workflows
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Image } from '@/components/ui/image'
|
||||
|
||||
Sim automatically calculates costs for all workflow executions, providing transparent pricing based on AI model usage and execution charges. Understanding these costs helps you optimize workflows and manage your budget effectively.
|
||||
Sim automatically calculates costs for all workflow runs, providing transparent pricing based on AI model usage and run charges. Understanding these costs helps you optimize workflows and manage your budget effectively.
|
||||
|
||||
## Credits
|
||||
|
||||
@@ -16,18 +16,18 @@ All plan limits, usage meters, and billing thresholds are displayed in credits t
|
||||
|
||||
## How Costs Are Calculated
|
||||
|
||||
Every workflow execution includes two cost components:
|
||||
Every workflow run includes two cost components:
|
||||
|
||||
**Base Execution Charge**: 1 credit ($0.005) per execution
|
||||
**Base Run Charge**: 1 credit ($0.005) per run
|
||||
|
||||
**AI Model Usage**: Variable cost based on token consumption
|
||||
```javascript
|
||||
modelCost = (inputTokens × inputPrice + outputTokens × outputPrice) / 1,000,000
|
||||
totalCredits = baseExecutionCharge + modelCost × 200
|
||||
totalCredits = baseRunCharge + modelCost × 200
|
||||
```
|
||||
|
||||
<Callout type="info">
|
||||
AI model prices are per million tokens. The calculation divides by 1,000,000 to get the actual cost. Workflows without AI blocks only incur the base execution charge.
|
||||
AI model prices are per million tokens. The calculation divides by 1,000,000 to get the actual cost. Workflows without AI blocks only incur the base run charge.
|
||||
</Callout>
|
||||
|
||||
## Model Breakdown in Logs
|
||||
@@ -48,7 +48,7 @@ The model breakdown shows:
|
||||
- **Token Usage**: Input and output token counts for each model
|
||||
- **Cost Breakdown**: Individual costs per model and operation
|
||||
- **Model Distribution**: Which models were used and how many times
|
||||
- **Total Cost**: Aggregate cost for the entire workflow execution
|
||||
- **Total Cost**: Aggregate cost for the entire workflow run
|
||||
|
||||
## Pricing Options
|
||||
|
||||
@@ -330,18 +330,18 @@ Max (individual) shares the same rate limits as team plans. Team plans (Pro or M
|
||||
|
||||
Team plans (Pro or Max for Teams) use 500 GB.
|
||||
|
||||
### Execution Time Limits
|
||||
### Run Time Limits
|
||||
|
||||
| Plan | Sync | Async |
|
||||
|------|------|-------|
|
||||
| **Free** | 5 minutes | 90 minutes |
|
||||
| **Pro / Max / Team / Enterprise** | 50 minutes | 90 minutes |
|
||||
|
||||
**Sync executions** run immediately and return results directly. These are triggered via the API with `async: false` (default) or through the UI.
|
||||
**Async executions** (triggered via API with `async: true`, webhooks, or schedules) run in the background.
|
||||
**Sync runs** complete immediately and return results directly. These are triggered via the API with `async: false` (default) or through the UI.
|
||||
**Async runs** (triggered via API with `async: true`, webhooks, or schedules) run in the background.
|
||||
|
||||
<Callout type="info">
|
||||
If a workflow exceeds its time limit, it will be terminated and marked as failed with a timeout error. Design long-running workflows to use async execution or break them into smaller workflows.
|
||||
If a workflow exceeds its time limit, it will be terminated and marked as failed with a timeout error. Design long-running workflows to use async runs or break them into smaller workflows.
|
||||
</Callout>
|
||||
|
||||
## Billing Model
|
||||
@@ -452,18 +452,18 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
|
||||
## Next Steps
|
||||
|
||||
- Review your current usage in [Settings → Subscription](https://sim.ai/settings/subscription)
|
||||
- Learn about [Logging](/execution/logging) to track execution details
|
||||
- Learn about [Logging](/execution/logging) to track run details
|
||||
- Explore the [External API](/execution/api) for programmatic cost monitoring
|
||||
- Check out [workflow optimization techniques](/blocks) to reduce costs
|
||||
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "How much does a single workflow execution cost?", answer: "Every execution incurs a base charge of 1 credit ($0.005). On top of that, any AI model usage is billed based on token consumption. Workflows that do not use AI blocks only pay the base execution charge." },
|
||||
{ question: "How much does a single workflow run cost?", answer: "Every run incurs a base charge of 1 credit ($0.005). On top of that, any AI model usage is billed based on token consumption. Workflows that do not use AI blocks only pay the base run charge." },
|
||||
{ question: "What is the credit-to-dollar conversion rate?", answer: "1 credit equals $0.005. All plan limits, usage meters, and billing thresholds in the Sim UI are displayed in credits." },
|
||||
{ question: "Do unused daily refresh credits carry over?", answer: "No. Daily refresh credits reset every 24 hours and do not accumulate. If you do not use them within the day, they are lost." },
|
||||
{ question: "What happens when I exceed my plan's credit limit?", answer: "By default, your usage is capped at your plan's included credits and executions will stop. If you enable on-demand billing or manually raise your usage limit in Settings, you can continue running workflows and pay for the overage at the end of the billing period." },
|
||||
{ question: "What happens when I exceed my plan's credit limit?", answer: "By default, your usage is capped at your plan's included credits and runs will stop. If you enable on-demand billing or manually raise your usage limit in Settings, you can continue running workflows and pay for the overage at the end of the billing period." },
|
||||
{ question: "How does the 1.1x hosted model multiplier work?", answer: "When you use Sim's hosted API keys (instead of bringing your own), a 1.1x multiplier is applied to the base model pricing for Agent blocks. This covers infrastructure and API management costs. You can avoid this multiplier by using your own API keys via the BYOK feature." },
|
||||
{ question: "Are there any free options for AI models?", answer: "Yes. If you run local models through Ollama or VLLM, there are no API costs for those model calls. You still pay the base execution charge of 1 credit per execution." },
|
||||
{ question: "Are there any free options for AI models?", answer: "Yes. If you run local models through Ollama or VLLM, there are no API costs for those model calls. You still pay the base run charge of 1 credit per run." },
|
||||
{ question: "When does threshold billing trigger?", answer: "When on-demand billing is enabled and your unbilled overage reaches $50, Sim automatically bills the full unbilled amount. This spreads large charges throughout the month instead of accumulating one large bill at period end." },
|
||||
]} />
|
||||
|
||||
@@ -156,7 +156,7 @@ Use `url` for direct downloads or `base64` for inline processing.
|
||||
- **Dropbox** - Dropbox file operations
|
||||
|
||||
<Callout type="info">
|
||||
Files are automatically available to downstream blocks. The execution engine handles all file transfer and format conversion.
|
||||
Files are automatically available to downstream blocks. The engine handles all file transfer and format conversion.
|
||||
</Callout>
|
||||
|
||||
## Best Practices
|
||||
@@ -165,15 +165,15 @@ Use `url` for direct downloads or `base64` for inline processing.
|
||||
|
||||
2. **Check file types** - Ensure the file type matches what the receiving block expects. The Vision block needs images, the File block handles documents.
|
||||
|
||||
3. **Consider file size** - Large files increase execution time. For very large files, consider using storage blocks (S3, Supabase) for intermediate storage.
|
||||
3. **Consider file size** - Large files increase run time. For very large files, consider using storage blocks (S3, Supabase) for intermediate storage.
|
||||
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "What is the maximum file size for uploads?", answer: "The maximum file size for files processed during workflow execution is 20 MB. Files exceeding this limit will be rejected with an error indicating the actual file size. For larger files, use storage blocks like S3 or Supabase for intermediate storage." },
|
||||
{ question: "What is the maximum file size for uploads?", answer: "The maximum file size for files processed during a workflow run is 20 MB. Files exceeding this limit will be rejected with an error indicating the actual file size. For larger files, use storage blocks like S3 or Supabase for intermediate storage." },
|
||||
{ question: "What file input formats are supported via the API?", answer: "When triggering a workflow via API, you can send files as base64-encoded data (using a data URI with the format 'data:{mime};base64,{data}') or as a URL pointing to a publicly accessible file. In both cases, include the file name and MIME type in the request." },
|
||||
{ question: "How are files passed between blocks internally?", answer: "Files are represented as standardized UserFile objects with name, url, base64, type, and size properties. Most blocks accept the full file object and extract what they need automatically, so you typically pass the entire object rather than individual properties." },
|
||||
{ question: "Which blocks can output files?", answer: "Gmail outputs email attachments, Slack outputs downloaded files, TTS generates audio files, Video Generator and Image Generator produce media files. Storage blocks like S3, Supabase, Google Drive, and Dropbox can also retrieve files for use in downstream blocks." },
|
||||
{ question: "Do I need to extract base64 or URL from file objects manually?", answer: "No. Most blocks accept the full file object and handle the format conversion automatically. Simply pass the entire file reference (e.g., <gmail.attachments[0]>) and the receiving block will extract the data it needs." },
|
||||
{ question: "How do file fields work in the Start block's input format?", answer: "When you define a field with type 'file[]' in the Start block's input format, the execution engine automatically processes incoming file data (base64 or URL) and uploads it to storage, converting it into UserFile objects before the workflow runs." },
|
||||
{ question: "How do file fields work in the Start block's input format?", answer: "When you define a field with type 'file[]' in the Start block's input format, the engine automatically processes incoming file data (base64 or URL) and uploads it to storage, converting it into UserFile objects before the workflow runs." },
|
||||
]} />
|
||||
|
||||
@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Image } from '@/components/ui/image'
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
Sim's execution engine brings your workflows to life by processing blocks in the correct order, managing data flow, and handling errors gracefully, so you can understand exactly how workflows are executed in Sim.
|
||||
Sim's execution engine brings your workflows to life by processing blocks in the correct order, managing data flow, and handling errors gracefully, so you can understand exactly how workflows run in Sim.
|
||||
|
||||
<Callout type="info">
|
||||
Every workflow execution follows a deterministic path based on your block connections and logic, ensuring predictable and reliable results.
|
||||
Every workflow run follows a deterministic path based on your block connections and logic, ensuring predictable and reliable results.
|
||||
</Callout>
|
||||
|
||||
## Documentation Overview
|
||||
@@ -22,33 +22,33 @@ Sim's execution engine brings your workflows to life by processing blocks in the
|
||||
</Card>
|
||||
|
||||
<Card title="Logging" href="/execution/logging">
|
||||
Monitor workflow executions with comprehensive logging and real-time visibility
|
||||
Monitor workflow runs with comprehensive logging and real-time visibility
|
||||
</Card>
|
||||
|
||||
|
||||
<Card title="Cost Calculation" href="/execution/costs">
|
||||
Understand how workflow execution costs are calculated and optimized
|
||||
Understand how workflow run costs are calculated and optimized
|
||||
</Card>
|
||||
|
||||
|
||||
<Card title="External API" href="/execution/api">
|
||||
Access execution logs and set up webhooks programmatically via REST API
|
||||
Access run logs and set up webhooks programmatically via REST API
|
||||
</Card>
|
||||
</Cards>
|
||||
|
||||
## Key Concepts
|
||||
|
||||
### Topological Execution
|
||||
Blocks execute in dependency order, similar to how a spreadsheet recalculates cells. The execution engine automatically determines which blocks can run based on completed dependencies.
|
||||
Blocks run in dependency order, similar to how a spreadsheet recalculates cells. The execution engine automatically determines which blocks can run based on completed dependencies.
|
||||
|
||||
### Path Tracking
|
||||
The engine actively tracks execution paths through your workflow. Router and Condition blocks dynamically update these paths, ensuring only relevant blocks execute.
|
||||
The engine actively tracks run paths through your workflow. Router and Condition blocks dynamically update these paths, ensuring only relevant blocks run.
|
||||
|
||||
### Layer-Based Processing
|
||||
Instead of executing blocks one-by-one, the engine identifies layers of blocks that can run in parallel, optimizing performance for complex workflows.
|
||||
|
||||
### Execution Context
|
||||
Each workflow maintains a rich context during execution containing:
|
||||
### Run Context
|
||||
Each workflow maintains a rich context during a run containing:
|
||||
- Block outputs and states
|
||||
- Active execution paths
|
||||
- Active run paths
|
||||
- Loop and parallel iteration tracking
|
||||
- Environment variables
|
||||
- Routing decisions
|
||||
@@ -56,7 +56,7 @@ Each workflow maintains a rich context during execution containing:
|
||||
|
||||
## Deployment Snapshots
|
||||
|
||||
API, Chat, Schedule, and Webhook executions run against the workflow’s active deployment snapshot. Manual runs from the editor execute the current draft canvas state, letting you test changes before deploying. Publish a new deployment whenever you change the canvas so every trigger uses the updated version.
|
||||
API, Chat, Schedule, and Webhook runs use the workflow’s active deployment snapshot. Manual runs from the editor use the current draft canvas state, letting you test changes before deploying. Publish a new deployment whenever you change the canvas so every trigger uses the updated version.
|
||||
|
||||
<div className='flex justify-center my-6'>
|
||||
<Image
|
||||
@@ -70,9 +70,9 @@ API, Chat, Schedule, and Webhook executions run against the workflow’s active
|
||||
|
||||
The Deploy modal keeps a full version history—inspect any snapshot, compare it against your draft, and promote or roll back with one click when you need to restore a prior release.
|
||||
|
||||
## Programmatic Execution
|
||||
## Programmatic Access
|
||||
|
||||
Execute workflows from your applications using our official SDKs:
|
||||
Run workflows from your applications using our official SDKs:
|
||||
|
||||
```bash
|
||||
# TypeScript/JavaScript
|
||||
@@ -107,21 +107,21 @@ const result = await client.executeWorkflow('workflow-id', {
|
||||
- Use parallel execution for independent operations
|
||||
- Cache results with Memory blocks when appropriate
|
||||
|
||||
### Monitor Executions
|
||||
### Monitor Runs
|
||||
- Review logs regularly to understand performance patterns
|
||||
- Track costs for AI model usage
|
||||
- Use workflow snapshots to debug issues
|
||||
|
||||
## What's Next?
|
||||
|
||||
Start with [Execution Basics](/execution/basics) to understand how workflows run, then explore [Logging](/execution/logging) to monitor your executions and [Cost Calculation](/execution/costs) to optimize your spending.
|
||||
Start with [Execution Basics](/execution/basics) to understand how workflows run, then explore [Logging](/execution/logging) to monitor your runs and [Cost Calculation](/execution/costs) to optimize your spending.
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "What are the execution timeout limits?", answer: "Synchronous executions (API, chat) have a default timeout of 5 minutes on the Free plan and 50 minutes on Pro, Team, and Enterprise plans. Asynchronous executions (schedules, webhooks) allow up to 90 minutes across all plans. These limits are configurable by the platform administrator." },
|
||||
{ question: "What are the run timeout limits?", answer: "Synchronous runs (API, chat) have a default timeout of 5 minutes on the Free plan and 50 minutes on Pro, Team, and Enterprise plans. Asynchronous runs (schedules, webhooks) allow up to 90 minutes across all plans. These limits are configurable by the platform administrator." },
|
||||
{ question: "How does parallel execution work?", answer: "The engine identifies layers of blocks with no dependencies on each other and runs them concurrently. Within loops and parallel blocks, the engine supports up to 20 parallel branches by default and up to 1,000 loop iterations. Nested subflows (loops inside parallels, or vice versa) are supported up to 10 levels deep." },
|
||||
{ question: "Can I cancel a running execution?", answer: "Yes. The engine supports cancellation through an abort signal mechanism. When you cancel an execution, the engine checks for cancellation between block executions (at roughly 500ms intervals when using Redis-backed cancellation). Any in-progress blocks complete, and the execution returns with a cancelled status." },
|
||||
{ question: "What is a deployment snapshot?", answer: "A deployment snapshot is a frozen copy of your workflow at the time you click Deploy. Trigger-based executions (API, chat, schedule, webhook) run against the active snapshot, not your draft canvas. Manual runs from the editor execute the current draft canvas state, so you can test changes before deploying. You can view, compare, and roll back snapshots from the Deploy modal." },
|
||||
{ question: "How are execution costs calculated?", answer: "Costs are tracked per block based on the AI model used. Each block log records input tokens, output tokens, and the computed cost using the model's pricing. The total workflow cost is the sum of all block-level costs for that execution. You can review costs in the execution logs." },
|
||||
{ question: "What happens when a block fails during execution?", answer: "When a block throws an error, the engine captures the error message in the block log, finalizes any incomplete logs with timing data, and halts the execution with a failure status. If the failing block has an error output handle connected to another block, that error path is followed instead of halting entirely." },
|
||||
{ question: "Can I re-run part of a workflow without starting from scratch?", answer: "Yes. The run-from-block feature lets you select a specific block and re-execute from that point. The engine computes which upstream blocks need to be re-run (the dirty set) and preserves cached outputs from blocks that have not changed, so only the affected portion of the workflow re-executes." },
|
||||
{ question: "Can I cancel a running workflow?", answer: "Yes. The engine supports cancellation through an abort signal mechanism. When you cancel a run, the engine checks for cancellation between blocks (at roughly 500ms intervals when using Redis-backed cancellation). Any in-progress blocks complete, and the run returns with a cancelled status." },
|
||||
{ question: "What is a deployment snapshot?", answer: "A deployment snapshot is a frozen copy of your workflow at the time you click Deploy. Trigger-based runs (API, chat, schedule, webhook) use the active snapshot, not your draft canvas. Manual runs from the editor use the current draft canvas state, so you can test changes before deploying. You can view, compare, and roll back snapshots from the Deploy modal." },
|
||||
{ question: "How are run costs calculated?", answer: "Costs are tracked per block based on the AI model used. Each block log records input tokens, output tokens, and the computed cost using the model's pricing. The total workflow cost is the sum of all block-level costs for that run. You can review costs in the run logs." },
|
||||
{ question: "What happens when a block fails during a run?", answer: "When a block throws an error, the engine captures the error message in the block log, finalizes any incomplete logs with timing data, and halts the run with a failure status. If the failing block has an error output handle connected to another block, that error path is followed instead of halting entirely." },
|
||||
{ question: "Can I re-run part of a workflow without starting from scratch?", answer: "Yes. The run-from-block feature lets you select a specific block and re-run from that point. The engine computes which upstream blocks need to be re-run (the dirty set) and preserves cached outputs from blocks that have not changed, so only the affected portion of the workflow re-runs." },
|
||||
]} />
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Image } from '@/components/ui/image'
|
||||
|
||||
Sim provides comprehensive logging for all workflow executions, giving you complete visibility into how your workflows run, what data flows through them, and where issues might occur.
|
||||
Sim provides comprehensive logging for all workflow runs, giving you complete visibility into how your workflows run, what data flows through them, and where issues might occur.
|
||||
|
||||
## Logging System
|
||||
|
||||
@@ -14,7 +14,7 @@ Sim offers two complementary logging interfaces to match different workflows and
|
||||
|
||||
### Real-Time Console
|
||||
|
||||
During manual or chat workflow execution, logs appear in real-time in the Console panel on the right side of the workflow editor:
|
||||
During manual or chat workflow runs, logs appear in real-time in the Console panel on the right side of the workflow editor:
|
||||
|
||||
<div className="flex justify-center">
|
||||
<Image
|
||||
@@ -27,14 +27,14 @@ During manual or chat workflow execution, logs appear in real-time in the Consol
|
||||
</div>
|
||||
|
||||
The console shows:
|
||||
- Block execution progress with active block highlighting
|
||||
- Block progress with active block highlighting
|
||||
- Real-time outputs as blocks complete
|
||||
- Execution timing for each block
|
||||
- Timing for each block
|
||||
- Success/error status indicators
|
||||
|
||||
### Logs Page
|
||||
|
||||
All workflow executions—whether triggered manually, via API, Chat, Schedule, or Webhook—are logged to the dedicated Logs page:
|
||||
All workflow runs—whether triggered manually, via API, Chat, Schedule, or Webhook—are logged to the dedicated Logs page:
|
||||
|
||||
<div className="flex justify-center">
|
||||
<Image
|
||||
@@ -72,7 +72,7 @@ View the complete data flow for each block with tabs to switch between:
|
||||
|
||||
<Tabs items={['Output', 'Input']}>
|
||||
<Tab>
|
||||
**Output Tab** shows the block's execution result:
|
||||
**Output Tab** shows the block's result:
|
||||
- Structured data with JSON formatting
|
||||
- Markdown rendering for AI-generated content
|
||||
- Copy button for easy data extraction
|
||||
@@ -87,17 +87,17 @@ View the complete data flow for each block with tabs to switch between:
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### Execution Timeline
|
||||
### Run Timeline
|
||||
|
||||
For workflow-level logs, view detailed execution metrics:
|
||||
For workflow-level logs, view detailed run metrics:
|
||||
- Start and end timestamps
|
||||
- Total workflow duration
|
||||
- Individual block execution times
|
||||
- Individual block run times
|
||||
- Performance bottleneck identification
|
||||
|
||||
## Workflow Snapshots
|
||||
|
||||
For any logged execution, click "View Snapshot" to see the exact workflow state at execution time:
|
||||
For any logged run, click "View Snapshot" to see the exact workflow state at the time of the run:
|
||||
|
||||
<div className="flex justify-center">
|
||||
<Image
|
||||
@@ -111,12 +111,12 @@ For any logged execution, click "View Snapshot" to see the exact workflow state
|
||||
|
||||
The snapshot provides:
|
||||
- Frozen canvas showing the workflow structure
|
||||
- Block states and connections as they were during execution
|
||||
- Block states and connections as they were during the run
|
||||
- Click any block to see its inputs and outputs
|
||||
- Useful for debugging workflows that have since been modified
|
||||
|
||||
<Callout type="info">
|
||||
Workflow snapshots are only available for executions after the enhanced logging system was introduced. Older migrated logs show a "Logged State Not Found" message.
|
||||
Workflow snapshots are only available for runs after the enhanced logging system was introduced. Older migrated logs show a "Logged State Not Found" message.
|
||||
</Callout>
|
||||
|
||||
## Log Retention
|
||||
@@ -134,11 +134,11 @@ The snapshot provides:
|
||||
### For Production
|
||||
- Monitor the Logs page regularly for errors or performance issues
|
||||
- Set up filters to focus on specific workflows or time periods
|
||||
- Use live mode during critical deployments to watch executions in real-time
|
||||
- Use live mode during critical deployments to watch runs in real-time
|
||||
|
||||
### For Debugging
|
||||
- Always check the execution timeline to identify slow blocks
|
||||
- Compare inputs between working and failing executions
|
||||
- Always check the run timeline to identify slow blocks
|
||||
- Compare inputs between working and failing runs
|
||||
- Use workflow snapshots to see the exact state when issues occurred
|
||||
|
||||
## Next Steps
|
||||
@@ -150,10 +150,10 @@ The snapshot provides:
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "How long are execution logs retained?", answer: "Free plans retain logs for 7 days — after that, logs are archived to cloud storage and deleted from the database. Pro, Team, and Enterprise plans retain logs indefinitely with no automatic cleanup." },
|
||||
{ question: "What data is captured in each execution log?", answer: "Each log entry includes the execution ID, workflow ID, trigger type, start and end timestamps, total duration in milliseconds, cost breakdown (total cost, token counts, and per-model breakdowns), execution data with trace spans, final output, and any associated files. The log details sidebar lets you inspect block-level inputs and outputs." },
|
||||
{ question: "How long are run logs retained?", answer: "Free plans retain logs for 7 days — after that, logs are archived to cloud storage and deleted from the database. Pro, Team, and Enterprise plans retain logs indefinitely with no automatic cleanup." },
|
||||
{ question: "What data is captured in each run log?", answer: "Each log entry includes the run ID, workflow ID, trigger type, start and end timestamps, total duration in milliseconds, cost breakdown (total cost, token counts, and per-model breakdowns), run data with trace spans, final output, and any associated files. The log details sidebar lets you inspect block-level inputs and outputs." },
|
||||
{ question: "Are API keys visible in the logs?", answer: "No. API keys and credentials are automatically redacted in the log input tab for security. You can safely inspect block inputs without exposing sensitive values." },
|
||||
{ question: "What is a workflow snapshot?", answer: "A workflow snapshot is a frozen copy of the workflow's structure (blocks, connections, and configuration) captured at execution time. It lets you see the exact state of the workflow when a particular execution ran, which is useful for debugging workflows that have been modified since the execution." },
|
||||
{ question: "Can I access logs programmatically?", answer: "Yes. The External API provides endpoints to query logs with filtering by workflow, time range, trigger type, duration, cost, and model. You can also set up webhook, email, or Slack notifications for real-time alerts when executions complete." },
|
||||
{ question: "What does Live mode do on the Logs page?", answer: "Live mode automatically refreshes the Logs page in real-time so new execution entries appear as they are logged, without requiring manual page refreshes. This is useful during deployments or when monitoring active workflows." },
|
||||
{ question: "What is a workflow snapshot?", answer: "A workflow snapshot is a frozen copy of the workflow's structure (blocks, connections, and configuration) captured at the time of a run. It lets you see the exact state of the workflow when a particular run happened, which is useful for debugging workflows that have been modified since." },
|
||||
{ question: "Can I access logs programmatically?", answer: "Yes. The External API provides endpoints to query logs with filtering by workflow, time range, trigger type, duration, cost, and model. You can also set up webhook, email, or Slack notifications for real-time alerts when runs complete." },
|
||||
{ question: "What does Live mode do on the Logs page?", answer: "Live mode automatically refreshes the Logs page in real-time so new log entries appear as they are recorded, without requiring manual page refreshes. This is useful during deployments or when monitoring active workflows." },
|
||||
]} />
|
||||
@@ -251,7 +251,7 @@ Update a Jira issue
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `issueKey` | string | Yes | Jira issue key to update \(e.g., PROJ-123\) |
|
||||
| `summary` | string | No | New summary for the issue |
|
||||
| `description` | string | No | New description for the issue |
|
||||
| `description` | string | No | New description for the issue. Accepts plain text \(auto-wrapped in ADF\) or a raw ADF document object |
|
||||
| `priority` | string | No | New priority ID or name for the issue \(e.g., "High"\) |
|
||||
| `assignee` | string | No | New assignee account ID for the issue |
|
||||
| `labels` | json | No | Labels to set on the issue \(array of label name strings\) |
|
||||
@@ -284,7 +284,7 @@ Create a new Jira issue
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `projectId` | string | Yes | Jira project key \(e.g., PROJ\) |
|
||||
| `summary` | string | Yes | Summary for the issue |
|
||||
| `description` | string | No | Description for the issue |
|
||||
| `description` | string | No | Description for the issue. Accepts plain text \(auto-wrapped in ADF\) or a raw ADF document object |
|
||||
| `priority` | string | No | Priority ID or name for the issue \(e.g., "10000" or "High"\) |
|
||||
| `assignee` | string | No | Assignee account ID for the issue |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. |
|
||||
|
||||
@@ -45,6 +45,7 @@ Read data from a specific sheet in a Microsoft Excel spreadsheet
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `spreadsheetId` | string | Yes | The ID of the spreadsheet/workbook to read from \(e.g., "01ABC123DEF456"\) |
|
||||
| `driveId` | string | No | The ID of the drive containing the spreadsheet. Required for SharePoint files. If omitted, uses personal OneDrive. |
|
||||
| `range` | string | No | The range of cells to read from. Accepts "SheetName!A1:B2" for explicit ranges or just "SheetName" to read the used range of that sheet. If omitted, reads the used range of the first sheet. |
|
||||
|
||||
#### Output
|
||||
@@ -67,6 +68,7 @@ Write data to a specific sheet in a Microsoft Excel spreadsheet
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `spreadsheetId` | string | Yes | The ID of the spreadsheet/workbook to write to \(e.g., "01ABC123DEF456"\) |
|
||||
| `driveId` | string | No | The ID of the drive containing the spreadsheet. Required for SharePoint files. If omitted, uses personal OneDrive. |
|
||||
| `range` | string | No | The range of cells to write to \(e.g., "Sheet1!A1:B2"\) |
|
||||
| `values` | array | Yes | The data to write as a 2D array \(e.g., \[\["Name", "Age"\], \["Alice", 30\]\]\) or array of objects |
|
||||
| `valueInputOption` | string | No | The format of the data to write |
|
||||
|
||||
@@ -29,7 +29,7 @@ Use the Start block for everything originating from the editor, deploy-to-API, o
|
||||
Receive external webhook payloads
|
||||
</Card>
|
||||
<Card title="Schedule" href="/triggers/schedule">
|
||||
Cron or interval based execution
|
||||
Cron or interval based runs
|
||||
</Card>
|
||||
<Card title="RSS Feed" href="/triggers/rss">
|
||||
Monitor RSS and Atom feeds for new content
|
||||
@@ -59,17 +59,17 @@ Use the Start block for everything originating from the editor, deploy-to-API, o
|
||||
|
||||
> Deployments power every trigger. Update the workflow, redeploy, and all trigger entry points pick up the new snapshot. Learn more in [Execution → Deployment Snapshots](/execution).
|
||||
|
||||
## Manual Execution Priority
|
||||
## Manual Run Priority
|
||||
|
||||
When you click **Run** in the editor, Sim automatically selects which trigger to execute based on the following priority order:
|
||||
When you click **Run** in the editor, Sim automatically selects which trigger to run based on the following priority order:
|
||||
|
||||
1. **Start Block** (highest priority)
|
||||
2. **Schedule Triggers**
|
||||
3. **External Triggers** (webhooks, integrations like Slack, Gmail, Airtable, etc.)
|
||||
|
||||
If your workflow has multiple triggers, the highest priority trigger will be executed. For example, if you have both a Start block and a Webhook trigger, clicking Run will execute the Start block.
|
||||
If your workflow has multiple triggers, the highest priority trigger will be used. For example, if you have both a Start block and a Webhook trigger, clicking Run will use the Start block.
|
||||
|
||||
**External triggers with mock payloads**: When external triggers (webhooks and integrations) are executed manually, Sim automatically generates mock payloads based on the trigger's expected data structure. This ensures downstream blocks can resolve variables correctly during testing.
|
||||
**External triggers with mock payloads**: When external triggers (webhooks and integrations) are run manually, Sim automatically generates mock payloads based on the trigger's expected data structure. This ensures downstream blocks can resolve variables correctly during testing.
|
||||
|
||||
## Email Polling Groups
|
||||
|
||||
@@ -94,10 +94,10 @@ Invitees receive an email with a link to connect their account. Once connected,
|
||||
When configuring an email trigger, select your polling group from the credentials dropdown instead of an individual account. The system creates webhooks for each member and routes all emails through your workflow.
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "Can I have multiple triggers on the same workflow?", answer: "Yes, a workflow can have multiple triggers (for example, a Start block and a Webhook trigger). When you click Run in the editor, Sim executes the highest-priority trigger: Start block first, then Schedule, then external triggers like webhooks. Each trigger type can also fire independently when its event occurs." },
|
||||
{ question: "Can I have multiple triggers on the same workflow?", answer: "Yes, a workflow can have multiple triggers (for example, a Start block and a Webhook trigger). When you click Run in the editor, Sim uses the highest-priority trigger: Start block first, then Schedule, then external triggers like webhooks. Each trigger type can also fire independently when its event occurs." },
|
||||
{ question: "How do I secure my webhook endpoint?", answer: "The Generic Webhook trigger supports authentication. Enable the Require Authentication toggle, set an auth token, and optionally specify a custom header name. Incoming requests must include the token as a Bearer token in the Authorization header (or in your custom header). Requests without a valid token are rejected." },
|
||||
{ question: "What happens when I test an external trigger manually?", answer: "When you click Run on a workflow with an external trigger (webhook, Slack, Gmail, etc.), Sim generates a mock payload based on the trigger's expected data structure. This lets downstream blocks resolve their variable references correctly so you can test the full workflow without waiting for a real event." },
|
||||
{ question: "Do triggers use the draft canvas or the deployed version?", answer: "All trigger-based executions (API, chat, schedule, webhook) run against the active deployment snapshot, not your draft canvas. After making changes, you need to redeploy for triggers to pick up the updated workflow version." },
|
||||
{ question: "Do triggers use the draft canvas or the deployed version?", answer: "All trigger-based runs (API, chat, schedule, webhook) use the active deployment snapshot, not your draft canvas. After making changes, you need to redeploy for triggers to pick up the updated workflow version." },
|
||||
{ question: "What integrations are available as triggers?", answer: "Sim supports a wide range of trigger integrations including GitHub (push, PR, issues), Slack, Gmail, Outlook, Linear, Jira, HubSpot, Stripe, Airtable, Calendly, Typeform, Telegram, WhatsApp, Microsoft Teams, RSS feeds, and more. Each integration provides event-specific triggers like issue_created or email_received." },
|
||||
{ question: "How does the Schedule trigger work?", answer: "The Schedule trigger runs your workflow on a timer using cron expressions or interval-based configuration. The schedule is managed within the schedule block settings. Like all triggers, scheduled runs execute the active deployment snapshot, so make sure to redeploy after making workflow changes." },
|
||||
]} />
|
||||
|
||||
@@ -51,9 +51,9 @@ RSS triggers only fire for items published after you save the trigger. Existing
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "How often is the RSS feed checked for new items?", answer: "The feed is polled every minute. On each poll, the service fetches the feed, compares items against the last checked timestamp and a list of previously seen GUIDs, and triggers your workflow only for genuinely new items." },
|
||||
{ question: "How does the poller avoid processing the same item twice?", answer: "The service tracks up to 100 recent item GUIDs and the last checked timestamp. An item is considered new only if its GUID has not been seen before and its publication date is after the last checked timestamp. Additionally, an idempotency layer prevents duplicate workflow executions for the same item." },
|
||||
{ question: "How does the poller avoid processing the same item twice?", answer: "The service tracks up to 100 recent item GUIDs and the last checked timestamp. An item is considered new only if its GUID has not been seen before and its publication date is after the last checked timestamp. Additionally, an idempotency layer prevents duplicate workflow runs for the same item." },
|
||||
{ question: "Is there a limit on how many new items are processed per poll?", answer: "Yes. Each polling cycle processes a maximum of 25 new items, sorted by publication date (newest first). If a feed publishes more than 25 items between polls, only the 25 most recent are processed." },
|
||||
{ question: "What output fields are available from the RSS trigger?", answer: "Each triggered execution receives: title, link, and pubDate as top-level convenience fields, plus a full item object containing all fields (including guid, summary, content, contentSnippet, author, categories, enclosure, and isoDate), a feed object with the feed's title, link, and description, and a timestamp of when the event was processed." },
|
||||
{ question: "What output fields are available from the RSS trigger?", answer: "Each triggered run receives: title, link, and pubDate as top-level convenience fields, plus a full item object containing all fields (including guid, summary, content, contentSnippet, author, categories, enclosure, and isoDate), a feed object with the feed's title, link, and description, and a timestamp of when the event was processed." },
|
||||
{ question: "What happens if the RSS feed is temporarily unreachable?", answer: "A failed fetch increments the webhook's consecutive failure counter. After 100 consecutive failures, the RSS trigger is automatically disabled. On any successful poll, the counter resets to zero." },
|
||||
{ question: "Does the RSS trigger support Atom feeds?", answer: "Yes. The underlying parser (rss-parser) supports both RSS and Atom feed formats. You can use the URL of either format in the Feed URL field." },
|
||||
]} />
|
||||
|
||||
@@ -79,10 +79,10 @@ Schedule blocks cannot receive incoming connections and serve as workflow entry
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "Do I need to deploy my workflow for the schedule to start?", answer: "Yes. Schedules are created in the database only when you deploy the workflow. Undeploying removes the schedule, and redeploying recreates it with the current configuration." },
|
||||
{ question: "What exactly happens after 100 consecutive failures?", answer: "After 100 consecutive failures, the schedule is automatically set to a disabled status to prevent runaway errors. A warning badge appears on the schedule block in the editor. You can click the badge to reactivate it. The failure counter resets to zero on any successful execution." },
|
||||
{ question: "What exactly happens after 100 consecutive failures?", answer: "After 100 consecutive failures, the schedule is automatically set to a disabled status to prevent runaway errors. A warning badge appears on the schedule block in the editor. You can click the badge to reactivate it. The failure counter resets to zero on any successful run." },
|
||||
{ question: "Does the schedule support timezones?", answer: "Yes. The schedule configuration includes a timezone setting. Cron expressions and simple intervals are evaluated relative to the configured timezone, which defaults to UTC if not specified." },
|
||||
{ question: "What happens if my scheduled workflow is rate-limited?", answer: "If a rate limit (HTTP 429) is encountered during execution, the schedule automatically retries after a 5-minute delay rather than counting it as a failure." },
|
||||
{ question: "What happens if my scheduled workflow is rate-limited?", answer: "If a rate limit (HTTP 429) is encountered during a run, the schedule automatically retries after a 5-minute delay rather than counting it as a failure." },
|
||||
{ question: "Can I have multiple schedule blocks in one workflow?", answer: "Yes. The deployment process finds all schedule blocks in the workflow and creates a separate schedule record for each one. Each schedule operates independently with its own cron expression and failure counter." },
|
||||
{ question: "What happens if the workflow is undeployed while a schedule execution is in progress?", answer: "The currently running execution will complete, but no new executions will be triggered. When the schedule next tries to fire, it checks that the workflow is still deployed and the schedule record still exists before executing." },
|
||||
{ question: "What happens if the workflow is undeployed while a scheduled run is in progress?", answer: "The currently running workflow will complete, but no new runs will be triggered. When the schedule next tries to fire, it checks that the workflow is still deployed and the schedule record still exists before running." },
|
||||
]} />
|
||||
|
||||
|
||||
@@ -19,12 +19,12 @@ The Start block is the default trigger for workflows built in Sim. It collects s
|
||||
</div>
|
||||
|
||||
<Callout type="info">
|
||||
The Start block sits in the start slot when you create a workflow. Keep it there when you want the same entry point to serve editor runs, deploy-to-API requests, and chat sessions. Swap it with Webhook or Schedule triggers when you only need event-driven execution.
|
||||
The Start block sits in the start slot when you create a workflow. Keep it there when you want the same entry point to serve editor runs, deploy-to-API requests, and chat sessions. Swap it with Webhook or Schedule triggers when you only need event-driven runs.
|
||||
</Callout>
|
||||
|
||||
## Fields exposed by Start
|
||||
|
||||
The Start block emits different data depending on the execution surface:
|
||||
The Start block emits different data depending on the run surface:
|
||||
|
||||
- **Input Format fields** — Every field you add becomes available as <code><start.fieldName></code>. For example, a `customerId` field shows up as <code><start.customerId></code> in downstream blocks and templates.
|
||||
- **Chat-only fields** — When the workflow runs from the chat side panel or a deployed chat experience, Sim also provides <code><start.input></code> (latest user message), <code><start.conversationId></code> (active session id), and <code><start.files></code> (chat attachments).
|
||||
@@ -33,11 +33,11 @@ Keep Input Format fields scoped to the names you expect to reference later—tho
|
||||
|
||||
## Configure the Input Format
|
||||
|
||||
Use the Input Format sub-block to define the schema that applies across execution modes:
|
||||
Use the Input Format sub-block to define the schema that applies across run modes:
|
||||
|
||||
1. Add a field for each value you want to collect.
|
||||
2. Choose a type (`string`, `number`, `boolean`, `object`, `array`, or `files`). File fields accept uploads from chat and API callers.
|
||||
3. Provide default values when you want the manual run modal to populate test data automatically. These defaults are ignored for deployed executions.
|
||||
3. Provide default values when you want the manual run modal to populate test data automatically. These defaults are ignored for deployed runs.
|
||||
4. Reorder fields to control how they appear in the editor form.
|
||||
|
||||
Reference structured values downstream with expressions such as <code><start.customerId></code> depending on the block you connect.
|
||||
@@ -53,7 +53,7 @@ Reference structured values downstream with expressions such as <code><start.
|
||||
tools or storage steps.
|
||||
</Tab>
|
||||
<Tab>
|
||||
Deploying to API turns the Input Format into a JSON contract for clients. Each field becomes part of the request body, and Sim coerces primitive types on ingestion. File fields expect objects that reference uploaded files; use the execution file upload endpoint before invoking the workflow.
|
||||
Deploying to API turns the Input Format into a JSON contract for clients. Each field becomes part of the request body, and Sim coerces primitive types on ingestion. File fields expect objects that reference uploaded files; use the file upload endpoint before invoking the workflow.
|
||||
|
||||
API callers can include additional optional properties. They are preserved
|
||||
inside <code><start.fieldName></code> outputs so you can experiment
|
||||
|
||||
@@ -8,7 +8,7 @@ import { Image } from '@/components/ui/image'
|
||||
import { Video } from '@/components/ui/video'
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
Webhooks allow external services to trigger workflow execution by sending HTTP requests to your workflow. Sim supports two approaches for webhook-based triggers.
|
||||
Webhooks allow external services to trigger workflow runs by sending HTTP requests to your workflow. Sim supports two approaches for webhook-based triggers.
|
||||
|
||||
## Generic Webhook Trigger
|
||||
|
||||
@@ -30,7 +30,7 @@ The Generic Webhook block creates a flexible endpoint that can receive any paylo
|
||||
2. **Configure Payload** - Set up the expected payload structure (optional)
|
||||
3. **Get Webhook URL** - Copy the automatically generated unique endpoint
|
||||
4. **External Integration** - Configure your external service to send POST requests to this URL
|
||||
5. **Workflow Execution** - Every request to the webhook URL triggers the workflow
|
||||
5. **Workflow Run** - Every request to the webhook URL triggers the workflow
|
||||
|
||||
### Features
|
||||
|
||||
@@ -38,7 +38,7 @@ The Generic Webhook block creates a flexible endpoint that can receive any paylo
|
||||
- **Automatic Parsing**: Webhook data is automatically parsed and available to subsequent blocks
|
||||
- **Authentication**: Optional bearer token or custom header authentication
|
||||
- **Rate Limiting**: Built-in protection against abuse
|
||||
- **Deduplication**: Prevents duplicate executions from repeated requests
|
||||
- **Deduplication**: Prevents duplicate runs from repeated requests
|
||||
|
||||
<Callout type="info">
|
||||
The Generic Webhook trigger fires every time the webhook URL receives a request, making it perfect for real-time integrations.
|
||||
@@ -58,7 +58,7 @@ Alternatively, you can use specific service blocks (like Slack, GitHub, etc.) in
|
||||
2. **Enable Trigger Mode** - Toggle "Use as Trigger" in the block settings
|
||||
3. **Configure Service** - Set up authentication and event filters specific to that service
|
||||
4. **Webhook Registration** - The service automatically registers the webhook with the external platform
|
||||
5. **Event-Based Execution** - Workflow triggers only for specific events from that service
|
||||
5. **Event-Based Runs** - Workflow triggers only for specific events from that service
|
||||
|
||||
### When to Use Each Approach
|
||||
|
||||
@@ -120,7 +120,7 @@ Alternatively, you can use specific service blocks (like Slack, GitHub, etc.) in
|
||||
### Testing Webhooks
|
||||
|
||||
1. Use tools like Postman or curl to test your webhook endpoints
|
||||
2. Check workflow execution logs for debugging
|
||||
2. Check workflow run logs for debugging
|
||||
3. Verify payload structure matches your expectations
|
||||
4. Test authentication and error scenarios
|
||||
|
||||
@@ -153,8 +153,8 @@ Always validate and sanitize incoming webhook data before processing it in your
|
||||
{ question: "What HTTP methods does the Generic Webhook endpoint accept?", answer: "The webhook endpoint handles POST requests for triggering workflows. GET requests are only used for provider-specific verification challenges (such as Microsoft Graph or WhatsApp verification). Other methods return a 405 Method Not Allowed response." },
|
||||
{ question: "How do I authenticate webhook requests?", answer: "Enable the Require Authentication toggle in the webhook configuration, then set an Authentication Token. Callers can send the token as a Bearer token in the Authorization header, or you can specify a custom header name (e.g., X-Secret-Key) and the token will be matched against that header instead." },
|
||||
{ question: "Can I define the expected payload structure for a webhook?", answer: "Yes. The Generic Webhook block includes an Input Format field where you can define the expected JSON schema. This is optional but helps document the expected structure. You can also use type \"file[]\" for file upload fields." },
|
||||
{ question: "Does the webhook have deduplication built in?", answer: "Yes. The webhook processing pipeline includes idempotency checks to prevent duplicate executions from repeated requests with the same payload." },
|
||||
{ question: "Does the webhook have deduplication built in?", answer: "Yes. The webhook processing pipeline includes idempotency checks to prevent duplicate runs from repeated requests with the same payload." },
|
||||
{ question: "What data from the webhook request is available in my workflow?", answer: "All request data including headers, body, and query parameters is parsed and made available to subsequent blocks. Common fields like event, id, and data are automatically extracted from the payload when present." },
|
||||
{ question: "Do I need to deploy my workflow for the webhook URL to work?", answer: "Yes. The webhook endpoint checks that the associated workflow is deployed before triggering execution. If the workflow is not deployed, the webhook returns a not-found response." },
|
||||
{ question: "Does the webhook auto-disable after repeated failures?", answer: "No. Unlike polling-based triggers (RSS, Gmail, IMAP), push-based generic webhooks do not auto-disable after consecutive failures. Each incoming request is processed independently. If your workflow consistently fails, check the execution logs for error details." },
|
||||
{ question: "Do I need to deploy my workflow for the webhook URL to work?", answer: "Yes. The webhook endpoint checks that the associated workflow is deployed before triggering a run. If the workflow is not deployed, the webhook returns a not-found response." },
|
||||
{ question: "Does the webhook auto-disable after repeated failures?", answer: "No. Unlike polling-based triggers (RSS, Gmail, IMAP), push-based generic webhooks do not auto-disable after consecutive failures. Each incoming request is processed independently. If your workflow consistently fails, check the run logs for error details." },
|
||||
]} />
|
||||
|
||||
@@ -2,6 +2,7 @@ import type { Metadata } from 'next'
|
||||
import Image from 'next/image'
|
||||
import Link from 'next/link'
|
||||
import { getAllPostMeta } from '@/lib/blog/registry'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
export const revalidate = 3600
|
||||
|
||||
@@ -17,11 +18,11 @@ export async function generateMetadata({
|
||||
return {
|
||||
title: `${name} — Sim Blog`,
|
||||
description: `Read articles by ${name} on the Sim blog.`,
|
||||
alternates: { canonical: `https://sim.ai/blog/authors/${id}` },
|
||||
alternates: { canonical: `${SITE_URL}/blog/authors/${id}` },
|
||||
openGraph: {
|
||||
title: `${name} — Sim Blog`,
|
||||
description: `Read articles by ${name} on the Sim blog.`,
|
||||
url: `https://sim.ai/blog/authors/${id}`,
|
||||
url: `${SITE_URL}/blog/authors/${id}`,
|
||||
siteName: 'Sim',
|
||||
type: 'profile',
|
||||
...(author?.avatarUrl
|
||||
@@ -55,25 +56,25 @@ export default async function AuthorPage({ params }: { params: Promise<{ id: str
|
||||
{
|
||||
'@type': 'Person',
|
||||
name: author.name,
|
||||
url: `https://sim.ai/blog/authors/${author.id}`,
|
||||
url: `${SITE_URL}/blog/authors/${author.id}`,
|
||||
sameAs: author.url ? [author.url] : [],
|
||||
image: author.avatarUrl,
|
||||
worksFor: {
|
||||
'@type': 'Organization',
|
||||
name: 'Sim',
|
||||
url: 'https://sim.ai',
|
||||
url: SITE_URL,
|
||||
},
|
||||
},
|
||||
{
|
||||
'@type': 'BreadcrumbList',
|
||||
itemListElement: [
|
||||
{ '@type': 'ListItem', position: 1, name: 'Home', item: 'https://sim.ai' },
|
||||
{ '@type': 'ListItem', position: 2, name: 'Blog', item: 'https://sim.ai/blog' },
|
||||
{ '@type': 'ListItem', position: 1, name: 'Home', item: SITE_URL },
|
||||
{ '@type': 'ListItem', position: 2, name: 'Blog', item: `${SITE_URL}/blog` },
|
||||
{
|
||||
'@type': 'ListItem',
|
||||
position: 3,
|
||||
name: author.name,
|
||||
item: `https://sim.ai/blog/authors/${author.id}`,
|
||||
item: `${SITE_URL}/blog/authors/${author.id}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { getNavBlogPosts } from '@/lib/blog/registry'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import Footer from '@/app/(landing)/components/footer/footer'
|
||||
import Navbar from '@/app/(landing)/components/navbar/navbar'
|
||||
|
||||
@@ -8,10 +9,10 @@ export default async function StudioLayout({ children }: { children: React.React
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'Organization',
|
||||
name: 'Sim',
|
||||
url: 'https://sim.ai',
|
||||
url: SITE_URL,
|
||||
description:
|
||||
'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents.',
|
||||
logo: 'https://sim.ai/logo/primary/small.png',
|
||||
logo: `${SITE_URL}/logo/primary/small.png`,
|
||||
sameAs: [
|
||||
'https://x.com/simdotai',
|
||||
'https://github.com/simstudioai/sim',
|
||||
@@ -23,7 +24,7 @@ export default async function StudioLayout({ children }: { children: React.React
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'WebSite',
|
||||
name: 'Sim',
|
||||
url: 'https://sim.ai',
|
||||
url: SITE_URL,
|
||||
}
|
||||
|
||||
return (
|
||||
|
||||
@@ -4,6 +4,7 @@ import Link from 'next/link'
|
||||
import { Badge } from '@/components/emcn'
|
||||
import { getAllPostMeta } from '@/lib/blog/registry'
|
||||
import { buildCollectionPageJsonLd } from '@/lib/blog/seo'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
export async function generateMetadata({
|
||||
searchParams,
|
||||
@@ -26,7 +27,7 @@ export async function generateMetadata({
|
||||
if (tag) canonicalParams.set('tag', tag)
|
||||
if (pageNum > 1) canonicalParams.set('page', String(pageNum))
|
||||
const qs = canonicalParams.toString()
|
||||
const canonical = `https://sim.ai/blog${qs ? `?${qs}` : ''}`
|
||||
const canonical = `${SITE_URL}/blog${qs ? `?${qs}` : ''}`
|
||||
|
||||
return {
|
||||
title,
|
||||
@@ -41,7 +42,7 @@ export async function generateMetadata({
|
||||
type: 'website',
|
||||
images: [
|
||||
{
|
||||
url: 'https://sim.ai/logo/primary/medium.png',
|
||||
url: `${SITE_URL}/logo/primary/medium.png`,
|
||||
width: 1200,
|
||||
height: 630,
|
||||
alt: 'Sim Blog',
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { getAllPostMeta } from '@/lib/blog/registry'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
export const revalidate = 3600
|
||||
|
||||
export async function GET() {
|
||||
const posts = await getAllPostMeta()
|
||||
const items = posts.slice(0, 50)
|
||||
const site = 'https://sim.ai'
|
||||
const site = SITE_URL
|
||||
const lastBuildDate =
|
||||
items.length > 0 ? new Date(items[0].date).toUTCString() : new Date().toUTCString()
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { getAllPostMeta } from '@/lib/blog/registry'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
export const revalidate = 3600
|
||||
|
||||
export async function GET() {
|
||||
const posts = await getAllPostMeta()
|
||||
const base = 'https://sim.ai'
|
||||
const base = SITE_URL
|
||||
const xml = `<?xml version="1.0" encoding="UTF-8"?>
|
||||
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:image="http://www.google.com/schemas/sitemap-image/1.1">
|
||||
${posts
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
import type { Metadata } from 'next'
|
||||
import Link from 'next/link'
|
||||
import { getAllTags } from '@/lib/blog/registry'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: 'Tags',
|
||||
description: 'Browse Sim blog posts by topic — AI agents, workflows, integrations, and more.',
|
||||
alternates: { canonical: 'https://sim.ai/blog/tags' },
|
||||
alternates: { canonical: `${SITE_URL}/blog/tags` },
|
||||
openGraph: {
|
||||
title: 'Blog Tags | Sim',
|
||||
description: 'Browse Sim blog posts by topic — AI agents, workflows, integrations, and more.',
|
||||
url: 'https://sim.ai/blog/tags',
|
||||
url: `${SITE_URL}/blog/tags`,
|
||||
siteName: 'Sim',
|
||||
locale: 'en_US',
|
||||
type: 'website',
|
||||
@@ -26,9 +27,9 @@ const breadcrumbJsonLd = {
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'BreadcrumbList',
|
||||
itemListElement: [
|
||||
{ '@type': 'ListItem', position: 1, name: 'Home', item: 'https://sim.ai' },
|
||||
{ '@type': 'ListItem', position: 2, name: 'Blog', item: 'https://sim.ai/blog' },
|
||||
{ '@type': 'ListItem', position: 3, name: 'Tags', item: 'https://sim.ai/blog/tags' },
|
||||
{ '@type': 'ListItem', position: 1, name: 'Home', item: SITE_URL },
|
||||
{ '@type': 'ListItem', position: 2, name: 'Blog', item: `${SITE_URL}/blog` },
|
||||
{ '@type': 'ListItem', position: 3, name: 'Tags', item: `${SITE_URL}/blog/tags` },
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
/**
|
||||
* JSON-LD structured data for the landing page.
|
||||
*
|
||||
@@ -23,22 +25,22 @@ export default function StructuredData() {
|
||||
'@graph': [
|
||||
{
|
||||
'@type': 'Organization',
|
||||
'@id': 'https://sim.ai/#organization',
|
||||
'@id': `${SITE_URL}/#organization`,
|
||||
name: 'Sim',
|
||||
alternateName: 'Sim Studio',
|
||||
description:
|
||||
'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work.',
|
||||
url: 'https://sim.ai',
|
||||
url: SITE_URL,
|
||||
logo: {
|
||||
'@type': 'ImageObject',
|
||||
'@id': 'https://sim.ai/#logo',
|
||||
url: 'https://sim.ai/logo/b%26w/text/b%26w.svg',
|
||||
contentUrl: 'https://sim.ai/logo/b%26w/text/b%26w.svg',
|
||||
'@id': `${SITE_URL}/#logo`,
|
||||
url: `${SITE_URL}/logo/b%26w/text/b%26w.svg`,
|
||||
contentUrl: `${SITE_URL}/logo/b%26w/text/b%26w.svg`,
|
||||
width: 49.78314,
|
||||
height: 24.276,
|
||||
caption: 'Sim Logo',
|
||||
},
|
||||
image: { '@id': 'https://sim.ai/#logo' },
|
||||
image: { '@id': `${SITE_URL}/#logo` },
|
||||
sameAs: [
|
||||
'https://x.com/simdotai',
|
||||
'https://github.com/simstudioai/sim',
|
||||
@@ -53,44 +55,42 @@ export default function StructuredData() {
|
||||
},
|
||||
{
|
||||
'@type': 'WebSite',
|
||||
'@id': 'https://sim.ai/#website',
|
||||
url: 'https://sim.ai',
|
||||
'@id': `${SITE_URL}/#website`,
|
||||
url: SITE_URL,
|
||||
name: 'Sim — The AI Workspace | Build, Deploy & Manage AI Agents',
|
||||
description:
|
||||
'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM. Join 100,000+ builders.',
|
||||
publisher: { '@id': 'https://sim.ai/#organization' },
|
||||
publisher: { '@id': `${SITE_URL}/#organization` },
|
||||
inLanguage: 'en-US',
|
||||
},
|
||||
{
|
||||
'@type': 'WebPage',
|
||||
'@id': 'https://sim.ai/#webpage',
|
||||
url: 'https://sim.ai',
|
||||
'@id': `${SITE_URL}/#webpage`,
|
||||
url: SITE_URL,
|
||||
name: 'Sim — The AI Workspace | Build, Deploy & Manage AI Agents',
|
||||
isPartOf: { '@id': 'https://sim.ai/#website' },
|
||||
about: { '@id': 'https://sim.ai/#software' },
|
||||
isPartOf: { '@id': `${SITE_URL}/#website` },
|
||||
about: { '@id': `${SITE_URL}/#software` },
|
||||
datePublished: '2024-01-01T00:00:00+00:00',
|
||||
dateModified: new Date().toISOString(),
|
||||
description:
|
||||
'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work.',
|
||||
breadcrumb: { '@id': 'https://sim.ai/#breadcrumb' },
|
||||
breadcrumb: { '@id': `${SITE_URL}/#breadcrumb` },
|
||||
inLanguage: 'en-US',
|
||||
speakable: {
|
||||
'@type': 'SpeakableSpecification',
|
||||
cssSelector: ['#hero-heading', '[id="hero"] p'],
|
||||
},
|
||||
potentialAction: [{ '@type': 'ReadAction', target: ['https://sim.ai'] }],
|
||||
potentialAction: [{ '@type': 'ReadAction', target: [SITE_URL] }],
|
||||
},
|
||||
{
|
||||
'@type': 'BreadcrumbList',
|
||||
'@id': 'https://sim.ai/#breadcrumb',
|
||||
itemListElement: [
|
||||
{ '@type': 'ListItem', position: 1, name: 'Home', item: 'https://sim.ai' },
|
||||
],
|
||||
'@id': `${SITE_URL}/#breadcrumb`,
|
||||
itemListElement: [{ '@type': 'ListItem', position: 1, name: 'Home', item: SITE_URL }],
|
||||
},
|
||||
{
|
||||
'@type': 'WebApplication',
|
||||
'@id': 'https://sim.ai/#software',
|
||||
url: 'https://sim.ai',
|
||||
'@id': `${SITE_URL}/#software`,
|
||||
url: SITE_URL,
|
||||
name: 'Sim — The AI Workspace',
|
||||
description:
|
||||
'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work — visually, conversationally, or with code. Trusted by over 100,000 builders. SOC2 compliant.',
|
||||
@@ -98,7 +98,7 @@ export default function StructuredData() {
|
||||
applicationSubCategory: 'AI Workspace',
|
||||
operatingSystem: 'Web',
|
||||
browserRequirements: 'Requires a modern browser with JavaScript enabled',
|
||||
installUrl: 'https://sim.ai/signup',
|
||||
installUrl: `${SITE_URL}/signup`,
|
||||
offers: [
|
||||
{
|
||||
'@type': 'Offer',
|
||||
@@ -175,16 +175,16 @@ export default function StructuredData() {
|
||||
},
|
||||
{
|
||||
'@type': 'SoftwareSourceCode',
|
||||
'@id': 'https://sim.ai/#source',
|
||||
'@id': `${SITE_URL}/#source`,
|
||||
codeRepository: 'https://github.com/simstudioai/sim',
|
||||
programmingLanguage: ['TypeScript', 'Python'],
|
||||
runtimePlatform: 'Node.js',
|
||||
license: 'https://opensource.org/licenses/Apache-2.0',
|
||||
isPartOf: { '@id': 'https://sim.ai/#software' },
|
||||
isPartOf: { '@id': `${SITE_URL}/#software` },
|
||||
},
|
||||
{
|
||||
'@type': 'FAQPage',
|
||||
'@id': 'https://sim.ai/#faq',
|
||||
'@id': `${SITE_URL}/#faq`,
|
||||
mainEntity: [
|
||||
{
|
||||
'@type': 'Question',
|
||||
|
||||
@@ -2,7 +2,7 @@ import type { Metadata } from 'next'
|
||||
import Image from 'next/image'
|
||||
import Link from 'next/link'
|
||||
import { notFound } from 'next/navigation'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { IntegrationCtaButton } from '@/app/(landing)/integrations/[slug]/components/integration-cta-button'
|
||||
import { IntegrationFAQ } from '@/app/(landing)/integrations/[slug]/components/integration-faq'
|
||||
import { TemplateCardButton } from '@/app/(landing)/integrations/[slug]/components/template-card-button'
|
||||
@@ -14,7 +14,7 @@ import { TEMPLATES } from '@/app/workspace/[workspaceId]/home/components/templat
|
||||
|
||||
const allIntegrations = integrations as Integration[]
|
||||
const INTEGRATION_COUNT = allIntegrations.length
|
||||
const baseUrl = getBaseUrl()
|
||||
const baseUrl = SITE_URL
|
||||
|
||||
/** Fast O(1) lookups — avoids repeated linear scans inside render loops. */
|
||||
const bySlug = new Map(allIntegrations.map((i) => [i.slug, i]))
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import { getNavBlogPosts } from '@/lib/blog/registry'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import Footer from '@/app/(landing)/components/footer/footer'
|
||||
import Navbar from '@/app/(landing)/components/navbar/navbar'
|
||||
|
||||
export default async function IntegrationsLayout({ children }: { children: React.ReactNode }) {
|
||||
const blogPosts = await getNavBlogPosts()
|
||||
const url = getBaseUrl()
|
||||
const url = SITE_URL
|
||||
const orgJsonLd = {
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'Organization',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { Metadata } from 'next'
|
||||
import { Badge } from '@/components/emcn'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { IntegrationCard } from './components/integration-card'
|
||||
import { IntegrationGrid } from './components/integration-grid'
|
||||
import { RequestIntegrationModal } from './components/request-integration-modal'
|
||||
@@ -18,7 +18,7 @@ const INTEGRATION_COUNT = allIntegrations.length
|
||||
*/
|
||||
const TOP_NAMES = [...new Set(POPULAR_WORKFLOWS.flatMap((p) => [p.from, p.to]))].slice(0, 6)
|
||||
|
||||
const baseUrl = getBaseUrl()
|
||||
const baseUrl = SITE_URL
|
||||
|
||||
/** Curated featured integrations — high-recognition services shown as cards. */
|
||||
const FEATURED_SLUGS = ['slack', 'notion', 'github', 'gmail'] as const
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import type { Metadata } from 'next'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { martianMono } from '@/app/_styles/fonts/martian-mono/martian-mono'
|
||||
import { season } from '@/app/_styles/fonts/season/season'
|
||||
|
||||
export const metadata: Metadata = {
|
||||
metadataBase: new URL('https://sim.ai'),
|
||||
metadataBase: new URL(SITE_URL),
|
||||
manifest: '/manifest.webmanifest',
|
||||
icons: {
|
||||
icon: [{ url: '/icon.svg', type: 'image/svg+xml', sizes: 'any' }],
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import type { Metadata } from 'next'
|
||||
import Link from 'next/link'
|
||||
import { notFound } from 'next/navigation'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { LandingFAQ } from '@/app/(landing)/components/landing-faq'
|
||||
import { FeaturedModelCard, ProviderIcon } from '@/app/(landing)/models/components/model-primitives'
|
||||
import {
|
||||
@@ -18,7 +18,7 @@ import {
|
||||
getRelatedModels,
|
||||
} from '@/app/(landing)/models/utils'
|
||||
|
||||
const baseUrl = getBaseUrl()
|
||||
const baseUrl = SITE_URL
|
||||
|
||||
export async function generateStaticParams() {
|
||||
return ALL_CATALOG_MODELS.map((model) => ({
|
||||
@@ -221,7 +221,7 @@ export default async function ModelPage({
|
||||
|
||||
<div className='flex flex-wrap gap-2'>
|
||||
<a
|
||||
href='https://sim.ai'
|
||||
href='/'
|
||||
className='inline-flex h-[32px] items-center gap-2 rounded-[5px] border border-white bg-white px-2.5 font-season text-black text-sm transition-colors hover:border-[#E0E0E0] hover:bg-[#E0E0E0]'
|
||||
>
|
||||
Build with this model
|
||||
|
||||
@@ -2,7 +2,7 @@ import type { Metadata } from 'next'
|
||||
import Link from 'next/link'
|
||||
import { notFound } from 'next/navigation'
|
||||
import { Badge } from '@/components/emcn'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { LandingFAQ } from '@/app/(landing)/components/landing-faq'
|
||||
import {
|
||||
ChevronArrow,
|
||||
@@ -20,7 +20,7 @@ import {
|
||||
TOP_MODEL_PROVIDERS,
|
||||
} from '@/app/(landing)/models/utils'
|
||||
|
||||
const baseUrl = getBaseUrl()
|
||||
const baseUrl = SITE_URL
|
||||
|
||||
export async function generateStaticParams() {
|
||||
return MODEL_PROVIDERS_WITH_CATALOGS.map((provider) => ({
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import { getNavBlogPosts } from '@/lib/blog/registry'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import Footer from '@/app/(landing)/components/footer/footer'
|
||||
import Navbar from '@/app/(landing)/components/navbar/navbar'
|
||||
|
||||
export default async function ModelsLayout({ children }: { children: React.ReactNode }) {
|
||||
const blogPosts = await getNavBlogPosts()
|
||||
const url = getBaseUrl()
|
||||
const url = SITE_URL
|
||||
const orgJsonLd = {
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'Organization',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { Metadata } from 'next'
|
||||
import { Badge } from '@/components/emcn'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { LandingFAQ } from '@/app/(landing)/components/landing-faq'
|
||||
import { ModelComparisonCharts } from '@/app/(landing)/models/components/model-comparison-charts'
|
||||
import { ModelDirectory } from '@/app/(landing)/models/components/model-directory'
|
||||
@@ -17,7 +17,7 @@ import {
|
||||
TOTAL_MODELS,
|
||||
} from '@/app/(landing)/models/utils'
|
||||
|
||||
const baseUrl = getBaseUrl()
|
||||
const baseUrl = SITE_URL
|
||||
|
||||
const faqItems = [
|
||||
{
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import type { Metadata } from 'next'
|
||||
import { getNavBlogPosts } from '@/lib/blog/registry'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { martianMono } from '@/app/_styles/fonts/martian-mono/martian-mono'
|
||||
import { season } from '@/app/_styles/fonts/season/season'
|
||||
import Footer from '@/app/(landing)/components/footer/footer'
|
||||
@@ -9,7 +10,7 @@ export const metadata: Metadata = {
|
||||
title: 'Partner Program',
|
||||
description:
|
||||
"Join the Sim partner program. Build, deploy, and sell AI agent solutions powered by Sim's AI workspace. Earn your certification through Sim Academy.",
|
||||
metadataBase: new URL('https://sim.ai'),
|
||||
metadataBase: new URL(SITE_URL),
|
||||
openGraph: {
|
||||
title: 'Partner Program | Sim',
|
||||
description: 'Join the Sim partner program.',
|
||||
|
||||
127
apps/sim/app/(landing)/seo.test.ts
Normal file
@@ -0,0 +1,127 @@
|
||||
/**
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import fs from 'fs'
|
||||
import path from 'path'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
const SIM_ROOT = path.resolve(__dirname, '..', '..')
|
||||
const APP_DIR = path.resolve(SIM_ROOT, 'app')
|
||||
const LANDING_DIR = path.resolve(APP_DIR, '(landing)')
|
||||
|
||||
/**
|
||||
* All directories containing public-facing pages or SEO-relevant code.
|
||||
* Non-marketing app routes (workspace, chat, form) are excluded —
|
||||
* they legitimately use getBaseUrl() for dynamic, env-dependent URLs.
|
||||
*/
|
||||
const SEO_SCAN_DIRS = [
|
||||
LANDING_DIR,
|
||||
path.resolve(APP_DIR, 'changelog'),
|
||||
path.resolve(APP_DIR, 'changelog.xml'),
|
||||
path.resolve(APP_DIR, 'academy'),
|
||||
path.resolve(SIM_ROOT, 'lib', 'blog'),
|
||||
path.resolve(SIM_ROOT, 'content', 'blog'),
|
||||
]
|
||||
|
||||
const SEO_SCAN_INDIVIDUAL_FILES = [
|
||||
path.resolve(APP_DIR, 'page.tsx'),
|
||||
path.resolve(SIM_ROOT, 'ee', 'whitelabeling', 'metadata.ts'),
|
||||
]
|
||||
|
||||
function collectFiles(dir: string, exts: string[]): string[] {
|
||||
const results: string[] = []
|
||||
if (!fs.existsSync(dir)) return results
|
||||
|
||||
for (const entry of fs.readdirSync(dir, { withFileTypes: true })) {
|
||||
const full = path.join(dir, entry.name)
|
||||
if (entry.isDirectory()) {
|
||||
results.push(...collectFiles(full, exts))
|
||||
} else if (exts.some((ext) => entry.name.endsWith(ext)) && !entry.name.includes('.test.')) {
|
||||
results.push(full)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
function getAllSeoFiles(exts: string[]): string[] {
|
||||
const files: string[] = []
|
||||
for (const dir of SEO_SCAN_DIRS) {
|
||||
files.push(...collectFiles(dir, exts))
|
||||
}
|
||||
for (const file of SEO_SCAN_INDIVIDUAL_FILES) {
|
||||
if (fs.existsSync(file)) files.push(file)
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
describe('SEO canonical URLs', () => {
|
||||
it('SITE_URL equals https://www.sim.ai', () => {
|
||||
expect(SITE_URL).toBe('https://www.sim.ai')
|
||||
})
|
||||
|
||||
it('public pages do not hardcode https://sim.ai (without www)', () => {
|
||||
const files = getAllSeoFiles(['.ts', '.tsx', '.mdx'])
|
||||
const violations: string[] = []
|
||||
|
||||
for (const file of files) {
|
||||
const content = fs.readFileSync(file, 'utf-8')
|
||||
const lines = content.split('\n')
|
||||
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const line = lines[i]
|
||||
const hasBareSimAi =
|
||||
line.includes("'https://sim.ai'") ||
|
||||
line.includes("'https://sim.ai/") ||
|
||||
line.includes('"https://sim.ai"') ||
|
||||
line.includes('"https://sim.ai/') ||
|
||||
line.includes('`https://sim.ai/') ||
|
||||
line.includes('`https://sim.ai`') ||
|
||||
line.includes('canonical: https://sim.ai/')
|
||||
|
||||
if (!hasBareSimAi) continue
|
||||
|
||||
const isAllowlisted =
|
||||
line.includes('https://sim.ai/careers') || line.includes('https://sim.ai/discord')
|
||||
|
||||
if (isAllowlisted) continue
|
||||
|
||||
const rel = path.relative(SIM_ROOT, file)
|
||||
violations.push(`${rel}:${i + 1}: ${line.trim()}`)
|
||||
}
|
||||
}
|
||||
|
||||
expect(
|
||||
violations,
|
||||
`Found hardcoded https://sim.ai (without www):\n${violations.join('\n')}`
|
||||
).toHaveLength(0)
|
||||
})
|
||||
|
||||
it('public pages do not use getBaseUrl() for SEO metadata', () => {
|
||||
const files = getAllSeoFiles(['.ts', '.tsx'])
|
||||
const violations: string[] = []
|
||||
|
||||
for (const file of files) {
|
||||
const content = fs.readFileSync(file, 'utf-8')
|
||||
|
||||
if (!content.includes('getBaseUrl')) continue
|
||||
|
||||
const hasMetadataExport =
|
||||
content.includes('export const metadata') ||
|
||||
content.includes('export async function generateMetadata')
|
||||
const usesGetBaseUrlInMetadata =
|
||||
hasMetadataExport &&
|
||||
(content.includes('= getBaseUrl()') || content.includes('metadataBase: new URL(getBaseUrl'))
|
||||
|
||||
if (usesGetBaseUrlInMetadata) {
|
||||
const rel = path.relative(SIM_ROOT, file)
|
||||
violations.push(rel)
|
||||
}
|
||||
}
|
||||
|
||||
expect(
|
||||
violations,
|
||||
`Public pages should use SITE_URL for metadata, not getBaseUrl():\n${violations.join('\n')}`
|
||||
).toHaveLength(0)
|
||||
})
|
||||
})
|
||||
@@ -1,6 +1,7 @@
|
||||
import type React from 'react'
|
||||
import type { Metadata } from 'next'
|
||||
import { notFound } from 'next/navigation'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
// TODO: Remove notFound() call to make academy pages public once content is ready
|
||||
const ACADEMY_ENABLED = false
|
||||
@@ -12,7 +13,7 @@ export const metadata: Metadata = {
|
||||
},
|
||||
description:
|
||||
'Become a certified Sim partner — learn to build, integrate, and deploy AI workflows.',
|
||||
metadataBase: new URL('https://sim.ai'),
|
||||
metadataBase: new URL(SITE_URL),
|
||||
openGraph: {
|
||||
title: 'Sim Academy',
|
||||
description: 'Become a certified Sim partner.',
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { authorizeCredentialUse } from '@/lib/auth/credential-access'
|
||||
import { validatePathSegment } from '@/lib/core/security/input-validation'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { getCredential, refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
import { GRAPH_ID_PATTERN } from '@/tools/microsoft_excel/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
@@ -19,6 +21,7 @@ export async function GET(request: NextRequest) {
|
||||
const { searchParams } = new URL(request.url)
|
||||
const credentialId = searchParams.get('credentialId')
|
||||
const query = searchParams.get('query') || ''
|
||||
const driveId = searchParams.get('driveId') || undefined
|
||||
const workflowId = searchParams.get('workflowId') || undefined
|
||||
|
||||
if (!credentialId) {
|
||||
@@ -72,8 +75,21 @@ export async function GET(request: NextRequest) {
|
||||
)
|
||||
searchParams_new.append('$top', '50')
|
||||
|
||||
// When driveId is provided (SharePoint), search within that specific drive.
|
||||
// Otherwise, search the user's personal OneDrive.
|
||||
if (driveId) {
|
||||
const driveIdValidation = validatePathSegment(driveId, {
|
||||
paramName: 'driveId',
|
||||
customPattern: GRAPH_ID_PATTERN,
|
||||
})
|
||||
if (!driveIdValidation.isValid) {
|
||||
return NextResponse.json({ error: driveIdValidation.error }, { status: 400 })
|
||||
}
|
||||
}
|
||||
const drivePath = driveId ? `drives/${driveId}` : 'me/drive'
|
||||
|
||||
const response = await fetch(
|
||||
`https://graph.microsoft.com/v1.0/me/drive/root/search(q='${encodeURIComponent(searchQuery)}')?${searchParams_new.toString()}`,
|
||||
`https://graph.microsoft.com/v1.0/${drivePath}/root/search(q='${encodeURIComponent(searchQuery)}')?${searchParams_new.toString()}`,
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
|
||||
@@ -169,24 +169,24 @@ export async function DELETE(req: NextRequest) {
|
||||
const body = await req.json()
|
||||
const { chatId, resourceType, resourceId } = RemoveResourceSchema.parse(body)
|
||||
|
||||
const [chat] = await db
|
||||
.select({ resources: copilotChats.resources })
|
||||
.from(copilotChats)
|
||||
const [updated] = await db
|
||||
.update(copilotChats)
|
||||
.set({
|
||||
resources: sql`COALESCE((
|
||||
SELECT jsonb_agg(elem)
|
||||
FROM jsonb_array_elements(${copilotChats.resources}) elem
|
||||
WHERE NOT (elem->>'type' = ${resourceType} AND elem->>'id' = ${resourceId})
|
||||
), '[]'::jsonb)`,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId)))
|
||||
.limit(1)
|
||||
.returning({ resources: copilotChats.resources })
|
||||
|
||||
if (!chat) {
|
||||
if (!updated) {
|
||||
return createNotFoundResponse('Chat not found or unauthorized')
|
||||
}
|
||||
|
||||
const existing = Array.isArray(chat.resources) ? (chat.resources as ChatResource[]) : []
|
||||
const key = `${resourceType}:${resourceId}`
|
||||
const merged = existing.filter((r) => `${r.type}:${r.id}` !== key)
|
||||
|
||||
await db
|
||||
.update(copilotChats)
|
||||
.set({ resources: sql`${JSON.stringify(merged)}::jsonb`, updatedAt: new Date() })
|
||||
.where(eq(copilotChats.id, chatId))
|
||||
const merged = Array.isArray(updated.resources) ? (updated.resources as ChatResource[]) : []
|
||||
|
||||
logger.info('Removed resource from chat', { chatId, resourceType, resourceId })
|
||||
|
||||
|
||||
@@ -17,6 +17,7 @@ import { eq, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { validateOAuthAccessToken } from '@/lib/auth/oauth-token'
|
||||
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
|
||||
import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context'
|
||||
import { ORCHESTRATION_TIMEOUT_MS, SIM_AGENT_API_URL } from '@/lib/copilot/constants'
|
||||
import { runHeadlessCopilotLifecycle } from '@/lib/copilot/request/lifecycle/headless'
|
||||
import { orchestrateSubagentStream } from '@/lib/copilot/request/subagent'
|
||||
@@ -136,14 +137,14 @@ When the user refers to a workflow by name or description ("the email one", "my
|
||||
### Organization
|
||||
|
||||
- \`rename_workflow\` — rename a workflow
|
||||
- \`move_workflow\` — move a workflow into a folder (or root with null)
|
||||
- \`move_folder\` — nest a folder inside another (or root with null)
|
||||
- \`move_workflow\` — move a workflow into a folder (or back to root by clearing the folder id)
|
||||
- \`move_folder\` — nest a folder inside another (or move it back to root by clearing the parent id)
|
||||
- \`create_folder(name, parentId)\` — create nested folder hierarchies
|
||||
|
||||
### Key Rules
|
||||
|
||||
- You can test workflows immediately after building — deployment is only needed for external access (API, chat, MCP).
|
||||
- All workflow-scoped copilot tools require \`workflowId\`.
|
||||
- Tools that operate on a specific workflow such as \`sim_workflow\`, \`sim_test\`, \`sim_deploy\`, and workflow-scoped \`sim_info\` requests require \`workflowId\`.
|
||||
- If the user reports errors, route through \`sim_workflow\` and ask it to reproduce, inspect logs, and fix the issue end to end.
|
||||
- Variable syntax: \`<blockname.field>\` for block outputs, \`{{ENV_VAR}}\` for env vars.
|
||||
`
|
||||
@@ -667,10 +668,10 @@ async function handleDirectToolCall(
|
||||
}
|
||||
|
||||
/**
|
||||
* Build mode uses the main chat orchestrator with the 'fast' command instead of
|
||||
* the subagent endpoint. In Go, 'workflow' is not a registered subagent — it's a mode
|
||||
* (ModeFast) on the main chat processor that bypasses subagent orchestration and
|
||||
* executes all tools directly.
|
||||
* Build mode uses the main /api/mcp orchestrator instead of /api/subagent/workflow.
|
||||
* The main agent still delegates workflow work to the workflow subagent inside Go;
|
||||
* this helper simply uses the full headless lifecycle so build requests behave like
|
||||
* the primary MCP chat flow.
|
||||
*/
|
||||
async function handleBuildToolCall(
|
||||
args: Record<string, unknown>,
|
||||
@@ -680,6 +681,8 @@ async function handleBuildToolCall(
|
||||
try {
|
||||
const requestText = (args.request as string) || JSON.stringify(args)
|
||||
const workflowId = args.workflowId as string | undefined
|
||||
let resolvedWorkflowName: string | undefined
|
||||
let resolvedWorkspaceId: string | undefined
|
||||
|
||||
const resolved = workflowId
|
||||
? await (async () => {
|
||||
@@ -688,8 +691,10 @@ async function handleBuildToolCall(
|
||||
userId,
|
||||
action: 'read',
|
||||
})
|
||||
resolvedWorkflowName = authorization.workflow?.name || undefined
|
||||
resolvedWorkspaceId = authorization.workflow?.workspaceId || undefined
|
||||
return authorization.allowed
|
||||
? { status: 'resolved' as const, workflowId }
|
||||
? { status: 'resolved' as const, workflowId, workflowName: resolvedWorkflowName }
|
||||
: {
|
||||
status: 'not_found' as const,
|
||||
message: 'workflowId is required for build. Call create_workflow first.',
|
||||
@@ -697,6 +702,10 @@ async function handleBuildToolCall(
|
||||
})()
|
||||
: await resolveWorkflowIdForUser(userId)
|
||||
|
||||
if (resolved.status === 'resolved') {
|
||||
resolvedWorkflowName ||= resolved.workflowName
|
||||
}
|
||||
|
||||
if (!resolved || resolved.status !== 'resolved') {
|
||||
return {
|
||||
content: [
|
||||
@@ -719,10 +728,29 @@ async function handleBuildToolCall(
|
||||
}
|
||||
|
||||
const chatId = generateId()
|
||||
const executionContext = await prepareExecutionContext(userId, resolved.workflowId, chatId, {
|
||||
workspaceId: resolvedWorkspaceId,
|
||||
})
|
||||
resolvedWorkspaceId = executionContext.workspaceId
|
||||
let workspaceContext: string | undefined
|
||||
if (resolvedWorkspaceId) {
|
||||
try {
|
||||
workspaceContext = await generateWorkspaceContext(resolvedWorkspaceId, userId)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to generate workspace context for build tool call', {
|
||||
workflowId: resolved.workflowId,
|
||||
workspaceId: resolvedWorkspaceId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const requestPayload = {
|
||||
message: requestText,
|
||||
workflowId: resolved.workflowId,
|
||||
...(resolvedWorkflowName ? { workflowName: resolvedWorkflowName } : {}),
|
||||
...(resolvedWorkspaceId ? { workspaceId: resolvedWorkspaceId } : {}),
|
||||
...(workspaceContext ? { workspaceContext } : {}),
|
||||
userId,
|
||||
model: DEFAULT_COPILOT_MODEL,
|
||||
mode: 'agent',
|
||||
@@ -734,8 +762,10 @@ async function handleBuildToolCall(
|
||||
const result = await runHeadlessCopilotLifecycle(requestPayload, {
|
||||
userId,
|
||||
workflowId: resolved.workflowId,
|
||||
workspaceId: resolvedWorkspaceId,
|
||||
chatId,
|
||||
goRoute: '/api/mcp',
|
||||
executionContext,
|
||||
autoExecuteTools: true,
|
||||
timeout: ORCHESTRATION_TIMEOUT_MS,
|
||||
interactive: false,
|
||||
|
||||
135
apps/sim/app/api/tools/microsoft_excel/drives/route.ts
Normal file
@@ -0,0 +1,135 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { authorizeCredentialUse } from '@/lib/auth/credential-access'
|
||||
import { validatePathSegment, validateSharePointSiteId } from '@/lib/core/security/input-validation'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
import { GRAPH_ID_PATTERN } from '@/tools/microsoft_excel/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
const logger = createLogger('MicrosoftExcelDrivesAPI')
|
||||
|
||||
interface GraphDrive {
|
||||
id: string
|
||||
name: string
|
||||
driveType: string
|
||||
webUrl?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* List document libraries (drives) for a SharePoint site.
|
||||
* Used by the microsoft.excel.drives selector to let users pick
|
||||
* which drive contains their Excel file.
|
||||
*/
|
||||
export async function POST(request: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
|
||||
try {
|
||||
const body = await request.json()
|
||||
const { credential, workflowId, siteId, driveId } = body
|
||||
|
||||
if (!credential) {
|
||||
logger.warn(`[${requestId}] Missing credential in request`)
|
||||
return NextResponse.json({ error: 'Credential is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
if (!siteId) {
|
||||
logger.warn(`[${requestId}] Missing siteId in request`)
|
||||
return NextResponse.json({ error: 'Site ID is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const siteIdValidation = validateSharePointSiteId(siteId, 'siteId')
|
||||
if (!siteIdValidation.isValid) {
|
||||
logger.warn(`[${requestId}] Invalid siteId format`)
|
||||
return NextResponse.json({ error: siteIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const authz = await authorizeCredentialUse(request, {
|
||||
credentialId: credential,
|
||||
workflowId,
|
||||
})
|
||||
if (!authz.ok || !authz.credentialOwnerUserId) {
|
||||
return NextResponse.json({ error: authz.error || 'Unauthorized' }, { status: 403 })
|
||||
}
|
||||
|
||||
const accessToken = await refreshAccessTokenIfNeeded(
|
||||
credential,
|
||||
authz.credentialOwnerUserId,
|
||||
requestId
|
||||
)
|
||||
if (!accessToken) {
|
||||
logger.warn(`[${requestId}] Failed to obtain valid access token`)
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to obtain valid access token', authRequired: true },
|
||||
{ status: 401 }
|
||||
)
|
||||
}
|
||||
|
||||
// Single-drive lookup when driveId is provided (used by fetchById)
|
||||
if (driveId) {
|
||||
const driveIdValidation = validatePathSegment(driveId, {
|
||||
paramName: 'driveId',
|
||||
customPattern: GRAPH_ID_PATTERN,
|
||||
})
|
||||
if (!driveIdValidation.isValid) {
|
||||
return NextResponse.json({ error: driveIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const url = `https://graph.microsoft.com/v1.0/sites/${siteId}/drives/${driveId}?$select=id,name,driveType,webUrl`
|
||||
const response = await fetch(url, {
|
||||
headers: { Authorization: `Bearer ${accessToken}` },
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response
|
||||
.json()
|
||||
.catch(() => ({ error: { message: 'Unknown error' } }))
|
||||
return NextResponse.json(
|
||||
{ error: errorData.error?.message || 'Failed to fetch drive' },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data: GraphDrive = await response.json()
|
||||
return NextResponse.json(
|
||||
{ drive: { id: data.id, name: data.name, driveType: data.driveType } },
|
||||
{ status: 200 }
|
||||
)
|
||||
}
|
||||
|
||||
// List all drives for the site
|
||||
const url = `https://graph.microsoft.com/v1.0/sites/${siteId}/drives?$select=id,name,driveType,webUrl`
|
||||
|
||||
const response = await fetch(url, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
},
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json().catch(() => ({ error: { message: 'Unknown error' } }))
|
||||
logger.error(`[${requestId}] Microsoft Graph API error fetching drives`, {
|
||||
status: response.status,
|
||||
error: errorData.error?.message,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{ error: errorData.error?.message || 'Failed to fetch drives' },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
const drives = (data.value || []).map((drive: GraphDrive) => ({
|
||||
id: drive.id,
|
||||
name: drive.name,
|
||||
driveType: drive.driveType,
|
||||
}))
|
||||
|
||||
logger.info(`[${requestId}] Successfully fetched ${drives.length} drives for site ${siteId}`)
|
||||
return NextResponse.json({ drives }, { status: 200 })
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error fetching drives`, error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { authorizeCredentialUse } from '@/lib/auth/credential-access'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
import { getItemBasePath } from '@/tools/microsoft_excel/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
@@ -30,6 +31,7 @@ export async function GET(request: NextRequest) {
|
||||
const { searchParams } = new URL(request.url)
|
||||
const credentialId = searchParams.get('credentialId')
|
||||
const spreadsheetId = searchParams.get('spreadsheetId')
|
||||
const driveId = searchParams.get('driveId') || undefined
|
||||
const workflowId = searchParams.get('workflowId') || undefined
|
||||
|
||||
if (!credentialId) {
|
||||
@@ -61,17 +63,23 @@ export async function GET(request: NextRequest) {
|
||||
`[${requestId}] Fetching worksheets from Microsoft Graph API for workbook ${spreadsheetId}`
|
||||
)
|
||||
|
||||
// Fetch worksheets from Microsoft Graph API
|
||||
const worksheetsResponse = await fetch(
|
||||
`https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}/workbook/worksheets`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}
|
||||
)
|
||||
let basePath: string
|
||||
try {
|
||||
basePath = getItemBasePath(spreadsheetId, driveId)
|
||||
} catch (error) {
|
||||
return NextResponse.json(
|
||||
{ error: error instanceof Error ? error.message : 'Invalid parameters' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
const worksheetsResponse = await fetch(`${basePath}/workbook/worksheets`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
})
|
||||
|
||||
if (!worksheetsResponse.ok) {
|
||||
const errorData = await worksheetsResponse
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
* @vitest-environment node
|
||||
*/
|
||||
|
||||
import { databaseMock } from '@sim/testing'
|
||||
import { NextRequest } from 'next/server'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
@@ -203,4 +204,73 @@ describe('POST /api/workflows/[id]/executions/[executionId]/cancel', () => {
|
||||
|
||||
expect(response.status).toBe(403)
|
||||
})
|
||||
|
||||
it('updates execution log status in DB when durably recorded', async () => {
|
||||
const mockWhere = vi.fn().mockResolvedValue(undefined)
|
||||
const mockSet = vi.fn(() => ({ where: mockWhere }))
|
||||
databaseMock.db.update.mockReturnValueOnce({ set: mockSet })
|
||||
mockMarkExecutionCancelled.mockResolvedValue({
|
||||
durablyRecorded: true,
|
||||
reason: 'recorded',
|
||||
})
|
||||
|
||||
await POST(makeRequest(), makeParams())
|
||||
|
||||
expect(databaseMock.db.update).toHaveBeenCalled()
|
||||
expect(mockSet).toHaveBeenCalledWith({
|
||||
status: 'cancelled',
|
||||
endedAt: expect.any(Date),
|
||||
})
|
||||
})
|
||||
|
||||
it('updates execution log status in DB when locally aborted', async () => {
|
||||
const mockWhere = vi.fn().mockResolvedValue(undefined)
|
||||
const mockSet = vi.fn(() => ({ where: mockWhere }))
|
||||
databaseMock.db.update.mockReturnValueOnce({ set: mockSet })
|
||||
mockMarkExecutionCancelled.mockResolvedValue({
|
||||
durablyRecorded: false,
|
||||
reason: 'redis_unavailable',
|
||||
})
|
||||
mockAbortManualExecution.mockReturnValue(true)
|
||||
|
||||
await POST(makeRequest(), makeParams())
|
||||
|
||||
expect(databaseMock.db.update).toHaveBeenCalled()
|
||||
expect(mockSet).toHaveBeenCalledWith({
|
||||
status: 'cancelled',
|
||||
endedAt: expect.any(Date),
|
||||
})
|
||||
})
|
||||
|
||||
it('does not update execution log status in DB when only paused execution was cancelled', async () => {
|
||||
mockMarkExecutionCancelled.mockResolvedValue({
|
||||
durablyRecorded: false,
|
||||
reason: 'redis_unavailable',
|
||||
})
|
||||
mockCancelPausedExecution.mockResolvedValue(true)
|
||||
|
||||
await POST(makeRequest(), makeParams())
|
||||
|
||||
expect(databaseMock.db.update).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('returns success even if direct DB update fails', async () => {
|
||||
mockMarkExecutionCancelled.mockResolvedValue({
|
||||
durablyRecorded: true,
|
||||
reason: 'recorded',
|
||||
})
|
||||
databaseMock.db.update.mockReturnValueOnce({
|
||||
set: vi.fn(() => ({
|
||||
where: vi.fn(() => {
|
||||
throw new Error('DB connection failed')
|
||||
}),
|
||||
})),
|
||||
})
|
||||
|
||||
const response = await POST(makeRequest(), makeParams())
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const data = await response.json()
|
||||
expect(data.success).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
import { db } from '@sim/db'
|
||||
import { workflowExecutionLogs } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { checkHybridAuth } from '@/lib/auth/hybrid'
|
||||
import { markExecutionCancelled } from '@/lib/execution/cancellation'
|
||||
@@ -83,6 +86,25 @@ export async function POST(
|
||||
})
|
||||
}
|
||||
|
||||
if ((cancellation.durablyRecorded || locallyAborted) && !pausedCancelled) {
|
||||
try {
|
||||
await db
|
||||
.update(workflowExecutionLogs)
|
||||
.set({ status: 'cancelled', endedAt: new Date() })
|
||||
.where(
|
||||
and(
|
||||
eq(workflowExecutionLogs.executionId, executionId),
|
||||
eq(workflowExecutionLogs.status, 'running')
|
||||
)
|
||||
)
|
||||
} catch (dbError) {
|
||||
logger.warn('Failed to update execution log status directly', {
|
||||
executionId,
|
||||
error: dbError,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const success = cancellation.durablyRecorded || locallyAborted || pausedCancelled
|
||||
|
||||
if (success) {
|
||||
|
||||
@@ -48,14 +48,11 @@ export async function GET(
|
||||
|
||||
const meta = await getExecutionMeta(executionId)
|
||||
if (!meta) {
|
||||
return NextResponse.json({ error: 'Execution buffer not found or expired' }, { status: 404 })
|
||||
return NextResponse.json({ error: 'Run buffer not found or expired' }, { status: 404 })
|
||||
}
|
||||
|
||||
if (meta.workflowId && meta.workflowId !== workflowId) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Execution does not belong to this workflow' },
|
||||
{ status: 403 }
|
||||
)
|
||||
return NextResponse.json({ error: 'Run does not belong to this workflow' }, { status: 403 })
|
||||
}
|
||||
|
||||
const fromParam = req.nextUrl.searchParams.get('from')
|
||||
|
||||
@@ -95,7 +95,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(resultWithOutput as ExecutionResult)
|
||||
|
||||
if (result.success === false) {
|
||||
const message = result.error || 'Workflow execution failed'
|
||||
const message = result.error || 'Workflow run failed'
|
||||
await loggingSession.safeCompleteWithError({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: totalDuration || result.metadata?.duration || 0,
|
||||
@@ -112,7 +112,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
}
|
||||
|
||||
return createSuccessResponse({
|
||||
message: 'Execution logs persisted successfully',
|
||||
message: 'Run logs persisted successfully',
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
export const dynamic = 'force-static'
|
||||
export const revalidate = 3600
|
||||
@@ -48,7 +49,7 @@ export async function GET() {
|
||||
<rss version="2.0">
|
||||
<channel>
|
||||
<title>Sim Changelog</title>
|
||||
<link>https://sim.ai/changelog</link>
|
||||
<link>${SITE_URL}/changelog</link>
|
||||
<description>Latest changes, fixes and updates in Sim.</description>
|
||||
<language>en-us</language>
|
||||
${items}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import type { Metadata } from 'next'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import ChangelogContent from '@/app/changelog/components/changelog-content'
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: 'Changelog',
|
||||
description: 'Stay up-to-date with the latest features, improvements, and bug fixes in Sim.',
|
||||
alternates: { canonical: `${SITE_URL}/changelog` },
|
||||
openGraph: {
|
||||
title: 'Changelog',
|
||||
description: 'Stay up-to-date with the latest features, improvements, and bug fixes in Sim.',
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import React, { type HTMLAttributes, memo, type ReactNode, useMemo } from 'react'
|
||||
import React, { type HTMLAttributes, memo, type ReactNode } from 'react'
|
||||
import { Streamdown } from 'streamdown'
|
||||
import 'streamdown/styles.css'
|
||||
import { CopyCodeButton, Tooltip } from '@/components/emcn'
|
||||
import { extractTextContent } from '@/lib/core/utils/react-node-text'
|
||||
|
||||
export function LinkWithPreview({ href, children }: { href: string; children: React.ReactNode }) {
|
||||
function LinkWithPreview({ href, children }: { href: string; children: React.ReactNode }) {
|
||||
return (
|
||||
<Tooltip.Root delayDuration={300}>
|
||||
<Tooltip.Trigger asChild>
|
||||
@@ -24,175 +24,151 @@ export function LinkWithPreview({ href, children }: { href: string; children: Re
|
||||
)
|
||||
}
|
||||
|
||||
function createCustomComponents(LinkComponent: typeof LinkWithPreview) {
|
||||
return {
|
||||
p: ({ children }: React.HTMLAttributes<HTMLParagraphElement>) => (
|
||||
<p className='mb-1 font-sans text-base text-gray-800 leading-relaxed last:mb-0 dark:text-gray-200'>
|
||||
{children}
|
||||
</p>
|
||||
),
|
||||
const COMPONENTS = {
|
||||
p: ({ children }: React.HTMLAttributes<HTMLParagraphElement>) => (
|
||||
<p className='mb-1 font-sans text-base text-gray-800 leading-relaxed last:mb-0 dark:text-gray-200'>
|
||||
{children}
|
||||
</p>
|
||||
),
|
||||
|
||||
h1: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h1 className='mt-10 mb-5 font-sans font-semibold text-2xl text-gray-900 dark:text-gray-100'>
|
||||
{children}
|
||||
</h1>
|
||||
),
|
||||
h2: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h2 className='mt-8 mb-4 font-sans font-semibold text-gray-900 text-xl dark:text-gray-100'>
|
||||
{children}
|
||||
</h2>
|
||||
),
|
||||
h3: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h3 className='mt-7 mb-3 font-sans font-semibold text-gray-900 text-lg dark:text-gray-100'>
|
||||
{children}
|
||||
</h3>
|
||||
),
|
||||
h4: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h4 className='mt-5 mb-2 font-sans font-semibold text-base text-gray-900 dark:text-gray-100'>
|
||||
{children}
|
||||
</h4>
|
||||
),
|
||||
h1: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h1 className='mt-10 mb-5 font-sans font-semibold text-2xl text-gray-900 dark:text-gray-100'>
|
||||
{children}
|
||||
</h1>
|
||||
),
|
||||
h2: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h2 className='mt-8 mb-4 font-sans font-semibold text-gray-900 text-xl dark:text-gray-100'>
|
||||
{children}
|
||||
</h2>
|
||||
),
|
||||
h3: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h3 className='mt-7 mb-3 font-sans font-semibold text-gray-900 text-lg dark:text-gray-100'>
|
||||
{children}
|
||||
</h3>
|
||||
),
|
||||
h4: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h4 className='mt-5 mb-2 font-sans font-semibold text-base text-gray-900 dark:text-gray-100'>
|
||||
{children}
|
||||
</h4>
|
||||
),
|
||||
|
||||
ul: ({ children }: React.HTMLAttributes<HTMLUListElement>) => (
|
||||
<ul
|
||||
className='mt-1 mb-1 space-y-1 pl-6 font-sans text-gray-800 dark:text-gray-200'
|
||||
style={{ listStyleType: 'disc' }}
|
||||
>
|
||||
{children}
|
||||
</ul>
|
||||
),
|
||||
ol: ({ children }: React.HTMLAttributes<HTMLOListElement>) => (
|
||||
<ol
|
||||
className='mt-1 mb-1 space-y-1 pl-6 font-sans text-gray-800 dark:text-gray-200'
|
||||
style={{ listStyleType: 'decimal' }}
|
||||
>
|
||||
{children}
|
||||
</ol>
|
||||
),
|
||||
li: ({ children }: React.LiHTMLAttributes<HTMLLIElement>) => (
|
||||
<li className='font-sans text-gray-800 dark:text-gray-200' style={{ display: 'list-item' }}>
|
||||
{children}
|
||||
</li>
|
||||
),
|
||||
ul: ({ children }: React.HTMLAttributes<HTMLUListElement>) => (
|
||||
<ul
|
||||
className='mt-1 mb-1 space-y-1 pl-6 font-sans text-gray-800 dark:text-gray-200'
|
||||
style={{ listStyleType: 'disc' }}
|
||||
>
|
||||
{children}
|
||||
</ul>
|
||||
),
|
||||
ol: ({ children }: React.HTMLAttributes<HTMLOListElement>) => (
|
||||
<ol
|
||||
className='mt-1 mb-1 space-y-1 pl-6 font-sans text-gray-800 dark:text-gray-200'
|
||||
style={{ listStyleType: 'decimal' }}
|
||||
>
|
||||
{children}
|
||||
</ol>
|
||||
),
|
||||
li: ({ children }: React.LiHTMLAttributes<HTMLLIElement>) => (
|
||||
<li className='font-sans text-gray-800 dark:text-gray-200' style={{ display: 'list-item' }}>
|
||||
{children}
|
||||
</li>
|
||||
),
|
||||
|
||||
pre: ({ children }: HTMLAttributes<HTMLPreElement>) => {
|
||||
let codeProps: HTMLAttributes<HTMLElement> = {}
|
||||
let codeContent: ReactNode = children
|
||||
pre: ({ children }: HTMLAttributes<HTMLPreElement>) => {
|
||||
let codeProps: HTMLAttributes<HTMLElement> = {}
|
||||
let codeContent: ReactNode = children
|
||||
|
||||
if (
|
||||
React.isValidElement<{ className?: string; children?: ReactNode }>(children) &&
|
||||
children.type === 'code'
|
||||
) {
|
||||
const childElement = children as React.ReactElement<{
|
||||
className?: string
|
||||
children?: ReactNode
|
||||
}>
|
||||
codeProps = { className: childElement.props.className }
|
||||
codeContent = childElement.props.children
|
||||
}
|
||||
if (
|
||||
React.isValidElement<{ className?: string; children?: ReactNode }>(children) &&
|
||||
children.type === 'code'
|
||||
) {
|
||||
const childElement = children as React.ReactElement<{
|
||||
className?: string
|
||||
children?: ReactNode
|
||||
}>
|
||||
codeProps = { className: childElement.props.className }
|
||||
codeContent = childElement.props.children
|
||||
}
|
||||
|
||||
return (
|
||||
<div className='my-6 rounded-md bg-gray-900 text-sm dark:bg-black'>
|
||||
<div className='flex items-center justify-between border-gray-700 border-b px-4 py-1.5 dark:border-gray-800'>
|
||||
<span className='font-sans text-gray-400 text-xs'>
|
||||
{codeProps.className?.replace('language-', '') || 'code'}
|
||||
</span>
|
||||
<CopyCodeButton
|
||||
code={extractTextContent(codeContent)}
|
||||
className='text-gray-400 hover-hover:bg-gray-700 hover-hover:text-gray-200'
|
||||
/>
|
||||
</div>
|
||||
<pre className='overflow-x-auto p-4 font-mono text-gray-200 dark:text-gray-100'>
|
||||
{codeContent}
|
||||
</pre>
|
||||
return (
|
||||
<div className='my-6 rounded-md bg-gray-900 text-sm dark:bg-black'>
|
||||
<div className='flex items-center justify-between border-gray-700 border-b px-4 py-1.5 dark:border-gray-800'>
|
||||
<span className='font-sans text-gray-400 text-xs'>
|
||||
{codeProps.className?.replace('language-', '') || 'code'}
|
||||
</span>
|
||||
<CopyCodeButton
|
||||
code={extractTextContent(codeContent)}
|
||||
className='text-gray-400 hover-hover:bg-gray-700 hover-hover:text-gray-200'
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
},
|
||||
|
||||
inlineCode: ({ children }: { children?: React.ReactNode }) => (
|
||||
<code className='rounded bg-gray-200 px-1 py-0.5 font-mono text-gray-800 text-inherit dark:bg-gray-700 dark:text-gray-200'>
|
||||
{children}
|
||||
</code>
|
||||
),
|
||||
|
||||
blockquote: ({ children }: React.HTMLAttributes<HTMLQuoteElement>) => (
|
||||
<blockquote className='my-4 border-gray-300 border-l-4 py-1 pl-4 font-sans text-gray-700 italic dark:border-gray-600 dark:text-gray-300'>
|
||||
{children}
|
||||
</blockquote>
|
||||
),
|
||||
|
||||
hr: () => <hr className='my-8 border-gray-500/[.07] border-t dark:border-gray-400/[.07]' />,
|
||||
|
||||
a: ({ href, children, ...props }: React.AnchorHTMLAttributes<HTMLAnchorElement>) => (
|
||||
<LinkComponent href={href || '#'} {...props}>
|
||||
{children}
|
||||
</LinkComponent>
|
||||
),
|
||||
|
||||
table: ({ children }: React.TableHTMLAttributes<HTMLTableElement>) => (
|
||||
<div className='my-4 w-full overflow-x-auto'>
|
||||
<table className='min-w-full table-auto border border-gray-300 font-sans text-sm dark:border-gray-700'>
|
||||
{children}
|
||||
</table>
|
||||
<pre className='overflow-x-auto p-4 font-mono text-gray-200 dark:text-gray-100'>
|
||||
{codeContent}
|
||||
</pre>
|
||||
</div>
|
||||
),
|
||||
thead: ({ children }: React.HTMLAttributes<HTMLTableSectionElement>) => (
|
||||
<thead className='bg-gray-100 text-left dark:bg-gray-800'>{children}</thead>
|
||||
),
|
||||
tbody: ({ children }: React.HTMLAttributes<HTMLTableSectionElement>) => (
|
||||
<tbody className='divide-y divide-gray-200 bg-white dark:divide-gray-700 dark:bg-gray-900'>
|
||||
{children}
|
||||
</tbody>
|
||||
),
|
||||
tr: ({ children }: React.HTMLAttributes<HTMLTableRowElement>) => (
|
||||
<tr className='border-gray-200 border-b transition-colors hover:bg-gray-50 dark:border-gray-700 dark:hover:bg-gray-800/60'>
|
||||
{children}
|
||||
</tr>
|
||||
),
|
||||
th: ({ children }: React.ThHTMLAttributes<HTMLTableCellElement>) => (
|
||||
<th className='border-gray-300 border-r px-4 py-2 font-medium text-gray-700 last:border-r-0 dark:border-gray-700 dark:text-gray-300'>
|
||||
{children}
|
||||
</th>
|
||||
),
|
||||
td: ({ children }: React.TdHTMLAttributes<HTMLTableCellElement>) => (
|
||||
<td className='break-words border-gray-300 border-r px-4 py-2 text-gray-800 last:border-r-0 dark:border-gray-700 dark:text-gray-200'>
|
||||
{children}
|
||||
</td>
|
||||
),
|
||||
)
|
||||
},
|
||||
|
||||
img: ({ src, alt, ...props }: React.ImgHTMLAttributes<HTMLImageElement>) => (
|
||||
<img
|
||||
src={src}
|
||||
alt={alt || 'Image'}
|
||||
className='my-3 h-auto max-w-full rounded-md'
|
||||
{...props}
|
||||
/>
|
||||
),
|
||||
}
|
||||
inlineCode: ({ children }: { children?: React.ReactNode }) => (
|
||||
<code className='rounded bg-gray-200 px-1 py-0.5 font-mono text-gray-800 text-inherit dark:bg-gray-700 dark:text-gray-200'>
|
||||
{children}
|
||||
</code>
|
||||
),
|
||||
|
||||
blockquote: ({ children }: React.HTMLAttributes<HTMLQuoteElement>) => (
|
||||
<blockquote className='my-4 border-gray-300 border-l-4 py-1 pl-4 font-sans text-gray-700 italic dark:border-gray-600 dark:text-gray-300'>
|
||||
{children}
|
||||
</blockquote>
|
||||
),
|
||||
|
||||
hr: () => <hr className='my-8 border-gray-500/[.07] border-t dark:border-gray-400/[.07]' />,
|
||||
|
||||
a: ({ href, children, ...props }: React.AnchorHTMLAttributes<HTMLAnchorElement>) => (
|
||||
<LinkWithPreview href={href || '#'} {...props}>
|
||||
{children}
|
||||
</LinkWithPreview>
|
||||
),
|
||||
|
||||
table: ({ children }: React.TableHTMLAttributes<HTMLTableElement>) => (
|
||||
<div className='my-4 w-full overflow-x-auto'>
|
||||
<table className='min-w-full table-auto border border-gray-300 font-sans text-sm dark:border-gray-700'>
|
||||
{children}
|
||||
</table>
|
||||
</div>
|
||||
),
|
||||
thead: ({ children }: React.HTMLAttributes<HTMLTableSectionElement>) => (
|
||||
<thead className='bg-gray-100 text-left dark:bg-gray-800'>{children}</thead>
|
||||
),
|
||||
tbody: ({ children }: React.HTMLAttributes<HTMLTableSectionElement>) => (
|
||||
<tbody className='divide-y divide-gray-200 bg-white dark:divide-gray-700 dark:bg-gray-900'>
|
||||
{children}
|
||||
</tbody>
|
||||
),
|
||||
tr: ({ children }: React.HTMLAttributes<HTMLTableRowElement>) => (
|
||||
<tr className='border-gray-200 border-b transition-colors hover:bg-gray-50 dark:border-gray-700 dark:hover:bg-gray-800/60'>
|
||||
{children}
|
||||
</tr>
|
||||
),
|
||||
th: ({ children }: React.ThHTMLAttributes<HTMLTableCellElement>) => (
|
||||
<th className='border-gray-300 border-r px-4 py-2 font-medium text-gray-700 last:border-r-0 dark:border-gray-700 dark:text-gray-300'>
|
||||
{children}
|
||||
</th>
|
||||
),
|
||||
td: ({ children }: React.TdHTMLAttributes<HTMLTableCellElement>) => (
|
||||
<td className='break-words border-gray-300 border-r px-4 py-2 text-gray-800 last:border-r-0 dark:border-gray-700 dark:text-gray-200'>
|
||||
{children}
|
||||
</td>
|
||||
),
|
||||
|
||||
img: ({ src, alt, ...props }: React.ImgHTMLAttributes<HTMLImageElement>) => (
|
||||
<img src={src} alt={alt || 'Image'} className='my-3 h-auto max-w-full rounded-md' {...props} />
|
||||
),
|
||||
}
|
||||
|
||||
const DEFAULT_COMPONENTS = createCustomComponents(LinkWithPreview)
|
||||
|
||||
const MarkdownRenderer = memo(function MarkdownRenderer({
|
||||
content,
|
||||
customLinkComponent,
|
||||
}: {
|
||||
content: string
|
||||
customLinkComponent?: typeof LinkWithPreview
|
||||
}) {
|
||||
const components = useMemo(() => {
|
||||
if (!customLinkComponent) {
|
||||
return DEFAULT_COMPONENTS
|
||||
}
|
||||
return createCustomComponents(customLinkComponent)
|
||||
}, [customLinkComponent])
|
||||
|
||||
const processedContent = content.trim()
|
||||
|
||||
const MarkdownRenderer = memo(function MarkdownRenderer({ content }: { content: string }) {
|
||||
return (
|
||||
<div className='space-y-4 break-words font-sans text-[var(--landing-text)] text-base leading-relaxed'>
|
||||
<Streamdown mode='static' components={components}>
|
||||
{processedContent}
|
||||
<Streamdown mode='static' components={COMPONENTS}>
|
||||
{content.trim()}
|
||||
</Streamdown>
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -8,7 +8,6 @@ import {
|
||||
ChatFileDownloadAll,
|
||||
} from '@/app/chat/components/message/components/file-download'
|
||||
import MarkdownRenderer from '@/app/chat/components/message/components/markdown-renderer'
|
||||
import { useThrottledValue } from '@/hooks/use-throttled-value'
|
||||
|
||||
export interface ChatAttachment {
|
||||
id: string
|
||||
@@ -39,11 +38,6 @@ export interface ChatMessage {
|
||||
files?: ChatFile[]
|
||||
}
|
||||
|
||||
function EnhancedMarkdownRenderer({ content }: { content: string }) {
|
||||
const throttled = useThrottledValue(content)
|
||||
return <MarkdownRenderer content={throttled} />
|
||||
}
|
||||
|
||||
export const ClientChatMessage = memo(
|
||||
function ClientChatMessage({ message }: { message: ChatMessage }) {
|
||||
const [isCopied, setIsCopied] = useState(false)
|
||||
@@ -188,7 +182,7 @@ export const ClientChatMessage = memo(
|
||||
{JSON.stringify(cleanTextContent, null, 2)}
|
||||
</pre>
|
||||
) : (
|
||||
<EnhancedMarkdownRenderer content={cleanTextContent as string} />
|
||||
<MarkdownRenderer content={cleanTextContent as string} />
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -6,7 +6,7 @@ export const CHAT_ERROR_MESSAGES = {
|
||||
AUTH_REQUIRED_EMAIL: 'Please provide your email to access this chat.',
|
||||
CHAT_UNAVAILABLE: 'This chat is currently unavailable. Please try again later.',
|
||||
NO_CHAT_TRIGGER:
|
||||
'No Chat trigger configured for this workflow. Add a Chat Trigger block to enable chat execution.',
|
||||
'No Chat trigger configured for this workflow. Add a Chat Trigger block to enable chat.',
|
||||
USAGE_LIMIT_EXCEEDED: 'Usage limit exceeded. Please upgrade your plan to continue using chat.',
|
||||
} as const
|
||||
|
||||
|
||||
@@ -25,10 +25,10 @@ Sim lets teams create agents visually with the workflow builder, conversationall
|
||||
|
||||
## Key Concepts
|
||||
|
||||
- **Workspace**: The AI workspace — container for agents, workflows, data sources, and executions
|
||||
- **Workspace**: The AI workspace — container for agents, workflows, data sources, and runs
|
||||
- **Workflow**: Visual builder — directed graph of blocks defining agent logic
|
||||
- **Block**: Individual step such as an LLM call, tool call, HTTP request, or code execution
|
||||
- **Trigger**: Event or schedule that initiates workflow execution
|
||||
- **Trigger**: Event or schedule that initiates a workflow run
|
||||
- **Execution**: A single run of a workflow with logs and outputs
|
||||
- **Knowledge Base**: Document store used for retrieval-augmented generation
|
||||
|
||||
@@ -41,7 +41,7 @@ Sim lets teams create agents visually with the workflow builder, conversationall
|
||||
- Knowledge bases and retrieval-augmented generation
|
||||
- Table creation and management
|
||||
- Document creation and processing
|
||||
- Scheduled and webhook-triggered executions
|
||||
- Scheduled and webhook-triggered runs
|
||||
|
||||
## Use Cases
|
||||
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
import type { Metadata } from 'next'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import Landing from '@/app/(landing)/landing'
|
||||
|
||||
export const revalidate = 3600
|
||||
|
||||
const baseUrl = getBaseUrl()
|
||||
|
||||
export const metadata: Metadata = {
|
||||
metadataBase: new URL(baseUrl),
|
||||
metadataBase: new URL(SITE_URL),
|
||||
title: {
|
||||
absolute: 'Sim — The AI Workspace | Build, Deploy & Manage AI Agents',
|
||||
},
|
||||
@@ -28,7 +26,7 @@ export const metadata: Metadata = {
|
||||
description:
|
||||
'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work — visually, conversationally, or with code.',
|
||||
type: 'website',
|
||||
url: baseUrl,
|
||||
url: SITE_URL,
|
||||
siteName: 'Sim',
|
||||
locale: 'en_US',
|
||||
images: [
|
||||
@@ -54,10 +52,10 @@ export const metadata: Metadata = {
|
||||
},
|
||||
},
|
||||
alternates: {
|
||||
canonical: baseUrl,
|
||||
canonical: SITE_URL,
|
||||
languages: {
|
||||
'en-US': baseUrl,
|
||||
'x-default': baseUrl,
|
||||
'en-US': SITE_URL,
|
||||
'x-default': SITE_URL,
|
||||
},
|
||||
},
|
||||
robots: {
|
||||
|
||||
@@ -18,7 +18,6 @@ import {
|
||||
SpecialTags,
|
||||
} from '@/app/workspace/[workspaceId]/home/components/message-content/components/special-tags'
|
||||
import type { MothershipResource } from '@/app/workspace/[workspaceId]/home/types'
|
||||
import { useStreamingText } from '@/hooks/use-streaming-text'
|
||||
|
||||
const LANG_ALIASES: Record<string, string> = {
|
||||
js: 'javascript',
|
||||
@@ -236,7 +235,6 @@ interface ChatContentProps {
|
||||
isStreaming?: boolean
|
||||
onOptionSelect?: (id: string) => void
|
||||
onWorkspaceResourceSelect?: (resource: MothershipResource) => void
|
||||
smoothStreaming?: boolean
|
||||
}
|
||||
|
||||
export function ChatContent({
|
||||
@@ -244,20 +242,7 @@ export function ChatContent({
|
||||
isStreaming = false,
|
||||
onOptionSelect,
|
||||
onWorkspaceResourceSelect,
|
||||
smoothStreaming = true,
|
||||
}: ChatContentProps) {
|
||||
const hydratedStreamingRef = useRef(isStreaming && content.trim().length > 0)
|
||||
const previousIsStreamingRef = useRef(isStreaming)
|
||||
|
||||
useEffect(() => {
|
||||
if (!previousIsStreamingRef.current && isStreaming && content.trim().length > 0) {
|
||||
hydratedStreamingRef.current = true
|
||||
} else if (!isStreaming) {
|
||||
hydratedStreamingRef.current = false
|
||||
}
|
||||
previousIsStreamingRef.current = isStreaming
|
||||
}, [content, isStreaming])
|
||||
|
||||
const onWorkspaceResourceSelectRef = useRef(onWorkspaceResourceSelect)
|
||||
onWorkspaceResourceSelectRef.current = onWorkspaceResourceSelect
|
||||
|
||||
@@ -270,9 +255,7 @@ export function ChatContent({
|
||||
return () => window.removeEventListener('wsres-click', handler)
|
||||
}, [])
|
||||
|
||||
const rendered = useStreamingText(content, isStreaming && smoothStreaming)
|
||||
|
||||
const parsed = useMemo(() => parseSpecialTags(rendered, isStreaming), [rendered, isStreaming])
|
||||
const parsed = useMemo(() => parseSpecialTags(content, isStreaming), [content, isStreaming])
|
||||
const hasSpecialContent = parsed.hasPendingTag || parsed.segments.some((s) => s.type !== 'text')
|
||||
|
||||
if (hasSpecialContent) {
|
||||
@@ -322,7 +305,10 @@ export function ChatContent({
|
||||
key={`inline-${i}`}
|
||||
className={cn(PROSE_CLASSES, '[&>:first-child]:mt-0 [&>:last-child]:mb-0')}
|
||||
>
|
||||
<Streamdown mode='static' components={MARKDOWN_COMPONENTS}>
|
||||
<Streamdown
|
||||
mode={isStreaming ? undefined : 'static'}
|
||||
components={MARKDOWN_COMPONENTS}
|
||||
>
|
||||
{group.markdown}
|
||||
</Streamdown>
|
||||
</div>
|
||||
@@ -343,13 +329,8 @@ export function ChatContent({
|
||||
|
||||
return (
|
||||
<div className={cn(PROSE_CLASSES, '[&>:first-child]:mt-0 [&>:last-child]:mb-0')}>
|
||||
<Streamdown
|
||||
mode={isStreaming ? undefined : 'static'}
|
||||
isAnimating={isStreaming}
|
||||
animated={isStreaming && !hydratedStreamingRef.current}
|
||||
components={MARKDOWN_COMPONENTS}
|
||||
>
|
||||
{rendered}
|
||||
<Streamdown mode={isStreaming ? undefined : 'static'} components={MARKDOWN_COMPONENTS}>
|
||||
{content}
|
||||
</Streamdown>
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -415,7 +415,7 @@ function OptionsDisplay({ data, onSelect }: OptionsDisplayProps) {
|
||||
if (entries.length === 0) return null
|
||||
|
||||
return (
|
||||
<div className='animate-stream-fade-in'>
|
||||
<div>
|
||||
{disabled ? (
|
||||
<button
|
||||
type='button'
|
||||
@@ -608,7 +608,7 @@ function CredentialDisplay({ data }: { data: CredentialTagData }) {
|
||||
href={data.value}
|
||||
target='_blank'
|
||||
rel='noopener noreferrer'
|
||||
className='flex animate-stream-fade-in items-center gap-2 rounded-lg border border-[var(--divider)] px-3 py-2.5 transition-colors hover-hover:bg-[var(--surface-5)]'
|
||||
className='flex items-center gap-2 rounded-lg border border-[var(--divider)] px-3 py-2.5 transition-colors hover-hover:bg-[var(--surface-5)]'
|
||||
>
|
||||
{createElement(Icon, { className: 'h-[16px] w-[16px] shrink-0' })}
|
||||
<span className='flex-1 font-base text-[var(--text-body)] text-sm'>
|
||||
@@ -623,7 +623,7 @@ function MothershipErrorDisplay({ data }: { data: MothershipErrorTagData }) {
|
||||
const detail = data.code ? `${data.message} (${data.code})` : data.message
|
||||
|
||||
return (
|
||||
<p className='animate-stream-fade-in font-base text-[13px] text-[var(--text-secondary)] italic leading-[20px]'>
|
||||
<p className='font-base text-[13px] text-[var(--text-secondary)] italic leading-[20px]'>
|
||||
{detail}
|
||||
</p>
|
||||
)
|
||||
@@ -635,7 +635,7 @@ function UsageUpgradeDisplay({ data }: { data: UsageUpgradeTagData }) {
|
||||
const buttonLabel = data.action === 'upgrade_plan' ? 'Upgrade Plan' : 'Increase Limit'
|
||||
|
||||
return (
|
||||
<div className='animate-stream-fade-in rounded-xl border border-amber-300/40 bg-amber-50/50 px-4 py-3 dark:border-amber-500/20 dark:bg-amber-950/20'>
|
||||
<div className='rounded-xl border border-amber-300/40 bg-amber-50/50 px-4 py-3 dark:border-amber-500/20 dark:bg-amber-950/20'>
|
||||
<div className='flex items-center gap-2'>
|
||||
<svg
|
||||
className='h-4 w-4 shrink-0 text-amber-600 dark:text-amber-400'
|
||||
|
||||
@@ -384,7 +384,6 @@ export function MessageContent({
|
||||
const hasSubagentEnded = blocks.some((b) => b.type === 'subagent_end')
|
||||
const showTrailingThinking =
|
||||
isStreaming && !hasTrailingContent && (hasSubagentEnded || allLastGroupToolsDone)
|
||||
const hasStructuredSegments = segments.some((segment) => segment.type !== 'text')
|
||||
const lastOpenSubagentGroupId = [...segments]
|
||||
.reverse()
|
||||
.find(
|
||||
@@ -404,7 +403,6 @@ export function MessageContent({
|
||||
isStreaming={isStreaming}
|
||||
onOptionSelect={onOptionSelect}
|
||||
onWorkspaceResourceSelect={onWorkspaceResourceSelect}
|
||||
smoothStreaming={!hasStructuredSegments}
|
||||
/>
|
||||
)
|
||||
case 'agent_group': {
|
||||
|
||||
@@ -2,6 +2,7 @@ import type { ComponentType, SVGProps } from 'react'
|
||||
import {
|
||||
Asterisk,
|
||||
Blimp,
|
||||
Bug,
|
||||
Calendar,
|
||||
Database,
|
||||
Eye,
|
||||
@@ -44,6 +45,7 @@ const TOOL_ICONS: Record<string, IconComponent> = {
|
||||
create_workflow: Layout,
|
||||
edit_workflow: Pencil,
|
||||
workflow: Hammer,
|
||||
debug: Bug,
|
||||
run: PlayOutline,
|
||||
deploy: Rocket,
|
||||
auth: Integration,
|
||||
|
||||
@@ -684,12 +684,10 @@ function EmbeddedLog({ logId }: EmbeddedLogProps) {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Execution ID */}
|
||||
{/* Run ID */}
|
||||
{log.executionId && (
|
||||
<div className='flex flex-col gap-1.5 rounded-md border border-[var(--border)] bg-[var(--surface-2)] px-2.5 py-2'>
|
||||
<span className='font-medium text-[var(--text-tertiary)] text-caption'>
|
||||
Execution ID
|
||||
</span>
|
||||
<span className='font-medium text-[var(--text-tertiary)] text-caption'>Run ID</span>
|
||||
<span className='truncate font-medium text-[var(--text-secondary)] text-sm'>
|
||||
{log.executionId}
|
||||
</span>
|
||||
|
||||
@@ -180,6 +180,26 @@ export function ResourceTabs({
|
||||
return () => node.removeEventListener('wheel', handler)
|
||||
}, [])
|
||||
|
||||
useEffect(() => {
|
||||
const node = scrollNodeRef.current
|
||||
if (!node || !activeId) return
|
||||
const tab = node.querySelector<HTMLElement>(`[data-resource-tab-id="${CSS.escape(activeId)}"]`)
|
||||
if (!tab) return
|
||||
// Use bounding rects because the tab's offsetParent is a `position: relative`
|
||||
// wrapper, so `offsetLeft` is relative to that wrapper rather than `node`.
|
||||
const tabRect = tab.getBoundingClientRect()
|
||||
const nodeRect = node.getBoundingClientRect()
|
||||
const tabLeft = tabRect.left - nodeRect.left + node.scrollLeft
|
||||
const tabRight = tabLeft + tabRect.width
|
||||
const viewLeft = node.scrollLeft
|
||||
const viewRight = viewLeft + node.clientWidth
|
||||
if (tabLeft < viewLeft) {
|
||||
node.scrollTo({ left: tabLeft, behavior: 'smooth' })
|
||||
} else if (tabRight > viewRight) {
|
||||
node.scrollTo({ left: tabRight - node.clientWidth, behavior: 'smooth' })
|
||||
}
|
||||
}, [activeId])
|
||||
|
||||
const addResource = useAddChatResource(chatId)
|
||||
const removeResource = useRemoveChatResource(chatId)
|
||||
const reorderResources = useReorderChatResources(chatId)
|
||||
@@ -286,24 +306,9 @@ export function ResourceTabs({
|
||||
if (anchorIdRef.current && removedIds.has(anchorIdRef.current)) {
|
||||
anchorIdRef.current = null
|
||||
}
|
||||
// Serialize mutations so each onMutate sees the cache updated by the prior
|
||||
// one. Continue on individual failures so remaining removals still fire.
|
||||
const persistable = targets.filter((r) => !isEphemeralResource(r))
|
||||
if (persistable.length > 0) {
|
||||
void (async () => {
|
||||
for (const r of persistable) {
|
||||
try {
|
||||
await removeResource.mutateAsync({
|
||||
chatId,
|
||||
resourceType: r.type,
|
||||
resourceId: r.id,
|
||||
})
|
||||
} catch {
|
||||
// Individual failure — the mutation's onError already rolled back
|
||||
// this resource in cache. Remaining removals continue.
|
||||
}
|
||||
}
|
||||
})()
|
||||
for (const r of targets) {
|
||||
if (isEphemeralResource(r)) continue
|
||||
removeResource.mutate({ chatId, resourceType: r.type, resourceId: r.id })
|
||||
}
|
||||
},
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
|
||||
@@ -1317,7 +1317,11 @@ export function useChat(
|
||||
const persistedResources = chatHistory.resources.filter((r) => r.id !== 'streaming-file')
|
||||
if (persistedResources.length > 0) {
|
||||
setResources(persistedResources)
|
||||
setActiveResourceId(persistedResources[persistedResources.length - 1].id)
|
||||
setActiveResourceId((prev) =>
|
||||
prev && persistedResources.some((r) => r.id === prev)
|
||||
? prev
|
||||
: persistedResources[persistedResources.length - 1].id
|
||||
)
|
||||
|
||||
for (const resource of persistedResources) {
|
||||
if (resource.type !== 'workflow') continue
|
||||
@@ -2963,7 +2967,7 @@ export function useChat(
|
||||
input: {},
|
||||
output: {},
|
||||
success: false,
|
||||
error: 'Execution was cancelled',
|
||||
error: 'Run was cancelled',
|
||||
durationMs: 0,
|
||||
startedAt: now.toISOString(),
|
||||
executionOrder: Number.MAX_SAFE_INTEGER,
|
||||
@@ -2971,7 +2975,7 @@ export function useChat(
|
||||
workflowId,
|
||||
blockId: 'cancelled',
|
||||
executionId: executionId ?? undefined,
|
||||
blockName: 'Execution Cancelled',
|
||||
blockName: 'Run Cancelled',
|
||||
blockType: 'cancelled',
|
||||
})
|
||||
|
||||
|
||||
@@ -2,6 +2,7 @@ import {
|
||||
Agent,
|
||||
Auth,
|
||||
CreateWorkflow,
|
||||
Debug,
|
||||
Deploy,
|
||||
EditWorkflow,
|
||||
FunctionExecute,
|
||||
@@ -161,6 +162,7 @@ export interface ChatMessage {
|
||||
|
||||
export const SUBAGENT_LABELS: Record<string, string> = {
|
||||
workflow: 'Workflow Agent',
|
||||
debug: 'Debug Agent',
|
||||
deploy: 'Deploy Agent',
|
||||
auth: 'Auth Agent',
|
||||
research: 'Research Agent',
|
||||
@@ -200,6 +202,7 @@ export const TOOL_UI_METADATA: Record<string, ToolTitleMetadata> = {
|
||||
[CreateWorkflow.id]: { title: 'Creating workflow' },
|
||||
[EditWorkflow.id]: { title: 'Editing workflow' },
|
||||
[Workflow.id]: { title: 'Workflow Agent' },
|
||||
[Debug.id]: { title: 'Debug Agent' },
|
||||
[RUN_SUBAGENT_ID]: { title: 'Run Agent' },
|
||||
[Deploy.id]: { title: 'Deploy Agent' },
|
||||
[Auth.id]: { title: 'Auth Agent' },
|
||||
|
||||
@@ -94,7 +94,7 @@ export function ExecutionSnapshot({
|
||||
>
|
||||
<div className='flex items-center gap-2 text-[var(--text-secondary)]'>
|
||||
<Loader2 className='h-[16px] w-[16px] animate-spin' />
|
||||
<span className='text-small'>Loading execution snapshot...</span>
|
||||
<span className='text-small'>Loading run snapshot...</span>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
@@ -108,7 +108,7 @@ export function ExecutionSnapshot({
|
||||
>
|
||||
<div className='flex items-center gap-2 text-[var(--text-error)]'>
|
||||
<AlertCircle className='h-[16px] w-[16px]' />
|
||||
<span className='text-small'>Failed to load execution snapshot: {error.message}</span>
|
||||
<span className='text-small'>Failed to load run snapshot: {error.message}</span>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
@@ -122,7 +122,7 @@ export function ExecutionSnapshot({
|
||||
>
|
||||
<div className='flex items-center gap-2 text-[var(--text-secondary)]'>
|
||||
<Loader2 className='h-[16px] w-[16px] animate-spin' />
|
||||
<span className='text-small'>Loading execution snapshot...</span>
|
||||
<span className='text-small'>Loading run snapshot...</span>
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
@@ -139,8 +139,8 @@ export function ExecutionSnapshot({
|
||||
<span className='font-medium text-base'>Logged State Not Found</span>
|
||||
</div>
|
||||
<div className='max-w-md text-center text-[var(--text-secondary)] text-small'>
|
||||
This log was migrated from the old logging system. The workflow state at execution time
|
||||
is not available.
|
||||
This log was migrated from the old logging system. The workflow state at the time of
|
||||
this run is not available.
|
||||
</div>
|
||||
<div className='text-[var(--text-tertiary)] text-caption'>
|
||||
Note: {workflowState._note}
|
||||
@@ -191,7 +191,7 @@ export function ExecutionSnapshot({
|
||||
>
|
||||
<DropdownMenuItem onSelect={handleCopyExecutionId}>
|
||||
<Copy />
|
||||
Copy Execution ID
|
||||
Copy Run ID
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>,
|
||||
|
||||
@@ -448,11 +448,11 @@ export const LogDetails = memo(function LogDetails({
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Execution ID */}
|
||||
{/* Run ID */}
|
||||
{log.executionId && (
|
||||
<div className='flex flex-col gap-1.5 rounded-md border border-[var(--border)] bg-[var(--surface-2)] px-2.5 py-2'>
|
||||
<span className='font-medium text-[var(--text-tertiary)] text-caption'>
|
||||
Execution ID
|
||||
Run ID
|
||||
</span>
|
||||
<span className='truncate font-medium text-[var(--text-secondary)] text-sm'>
|
||||
{log.executionId}
|
||||
@@ -576,7 +576,7 @@ export const LogDetails = memo(function LogDetails({
|
||||
<div className='flex flex-col gap-2.5 rounded-md p-2.5'>
|
||||
<div className='flex items-center justify-between'>
|
||||
<span className='font-medium text-[var(--text-tertiary)] text-caption'>
|
||||
Base Execution:
|
||||
Base Run:
|
||||
</span>
|
||||
<span className='font-medium text-[var(--text-secondary)] text-caption'>
|
||||
{formatCost(BASE_EXECUTION_CHARGE)}
|
||||
@@ -643,8 +643,8 @@ export const LogDetails = memo(function LogDetails({
|
||||
|
||||
<div className='flex items-center justify-center rounded-md bg-[var(--surface-2)] p-2 text-center'>
|
||||
<p className='font-medium text-[var(--text-subtle)] text-xs'>
|
||||
Total cost includes a base execution charge of{' '}
|
||||
{formatCost(BASE_EXECUTION_CHARGE)} plus any model and tool usage costs.
|
||||
Total cost includes a base run charge of {formatCost(BASE_EXECUTION_CHARGE)}{' '}
|
||||
plus any model and tool usage costs.
|
||||
</p>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -77,14 +77,14 @@ export const LogRowContextMenu = memo(function LogRowContextMenu({
|
||||
<>
|
||||
<DropdownMenuItem onSelect={onCancelExecution}>
|
||||
<X />
|
||||
Cancel Execution
|
||||
Cancel Run
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuSeparator />
|
||||
</>
|
||||
)}
|
||||
<DropdownMenuItem disabled={!hasExecutionId} onSelect={onCopyExecutionId}>
|
||||
<Copy />
|
||||
Copy Execution ID
|
||||
Copy Run ID
|
||||
</DropdownMenuItem>
|
||||
<DropdownMenuItem disabled={!hasExecutionId} onSelect={onCopyLink}>
|
||||
<Link />
|
||||
|
||||
@@ -15,7 +15,7 @@ import type { PlanFeature } from '@/app/workspace/[workspaceId]/settings/compone
|
||||
export const PRO_PLAN_FEATURES: PlanFeature[] = [
|
||||
{ icon: Zap, text: '150 runs/min (sync)' },
|
||||
{ icon: Clock, text: '1,000 runs/min (async)' },
|
||||
{ icon: Timer, text: '50 min sync execution limit' },
|
||||
{ icon: Timer, text: '50 min sync run limit' },
|
||||
{ icon: HardDrive, text: '50GB file storage' },
|
||||
{ icon: Table2, text: '25 tables · 5,000 rows each' },
|
||||
]
|
||||
@@ -23,7 +23,7 @@ export const PRO_PLAN_FEATURES: PlanFeature[] = [
|
||||
export const MAX_PLAN_FEATURES: PlanFeature[] = [
|
||||
{ icon: Zap, text: '300 runs/min (sync)' },
|
||||
{ icon: Clock, text: '2,500 runs/min (async)' },
|
||||
{ icon: Timer, text: '50 min sync execution limit' },
|
||||
{ icon: Timer, text: '50 min sync run limit' },
|
||||
{ icon: HardDrive, text: '500GB file storage' },
|
||||
{ icon: Table2, text: '25 tables · 5,000 rows each' },
|
||||
]
|
||||
|
||||
@@ -449,7 +449,7 @@ export function addExecutionErrorConsoleEntry(
|
||||
const isPreExecutionError = params.isPreExecutionError ?? false
|
||||
if (!isPreExecutionError && hasBlockError) return
|
||||
|
||||
const errorMessage = params.error || 'Execution failed'
|
||||
const errorMessage = params.error || 'Run failed'
|
||||
const isTimeout = errorMessage.toLowerCase().includes('timed out')
|
||||
const timing = buildExecutionTiming(params.durationMs)
|
||||
|
||||
@@ -469,7 +469,7 @@ export function addExecutionErrorConsoleEntry(
|
||||
? 'Workflow Validation'
|
||||
: isTimeout
|
||||
? 'Timeout Error'
|
||||
: 'Execution Error',
|
||||
: 'Run Error',
|
||||
blockType: isPreExecutionError ? 'validation' : 'error',
|
||||
})
|
||||
}
|
||||
@@ -514,7 +514,7 @@ export function addHttpErrorConsoleEntry(
|
||||
workflowId: params.workflowId,
|
||||
blockId: isValidationError ? 'validation' : 'execution-error',
|
||||
executionId: params.executionId,
|
||||
blockName: isValidationError ? 'Workflow Validation' : 'Execution Error',
|
||||
blockName: isValidationError ? 'Workflow Validation' : 'Run Error',
|
||||
blockType: isValidationError ? 'validation' : 'error',
|
||||
})
|
||||
}
|
||||
@@ -537,7 +537,7 @@ export function addCancelledConsoleEntry(
|
||||
input: {},
|
||||
output: {},
|
||||
success: false,
|
||||
error: 'Execution was cancelled',
|
||||
error: 'Run was cancelled',
|
||||
durationMs: timing.durationMs,
|
||||
startedAt: timing.startedAt,
|
||||
executionOrder: Number.MAX_SAFE_INTEGER,
|
||||
@@ -545,7 +545,7 @@ export function addCancelledConsoleEntry(
|
||||
workflowId: params.workflowId,
|
||||
blockId: 'cancelled',
|
||||
executionId: params.executionId,
|
||||
blockName: 'Execution Cancelled',
|
||||
blockName: 'Run Cancelled',
|
||||
blockType: 'cancelled',
|
||||
})
|
||||
}
|
||||
@@ -652,7 +652,7 @@ export async function executeWorkflowWithFullLogging(
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json()
|
||||
const errorMessage = error.error || 'Workflow execution failed'
|
||||
const errorMessage = error.error || 'Workflow run failed'
|
||||
addHttpErrorConsoleEntry(addConsole, {
|
||||
workflowId: wfId,
|
||||
executionId,
|
||||
@@ -721,14 +721,14 @@ export async function executeWorkflowWithFullLogging(
|
||||
executionResult = {
|
||||
success: false,
|
||||
output: {},
|
||||
error: 'Execution was cancelled',
|
||||
error: 'Run was cancelled',
|
||||
logs: accumulatedBlockLogs,
|
||||
}
|
||||
},
|
||||
|
||||
onExecutionError: (data) => {
|
||||
setCurrentExecutionId(wfId, null)
|
||||
const errorMessage = data.error || 'Execution failed'
|
||||
const errorMessage = data.error || 'Run failed'
|
||||
executionResult = {
|
||||
success: false,
|
||||
output: {},
|
||||
|
||||
@@ -2,7 +2,7 @@
|
||||
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { MoreHorizontal } from 'lucide-react'
|
||||
import { MoreHorizontal, Search } from 'lucide-react'
|
||||
import {
|
||||
Button,
|
||||
ChevronDown,
|
||||
@@ -11,6 +11,7 @@ import {
|
||||
DropdownMenuGroup,
|
||||
DropdownMenuSeparator,
|
||||
DropdownMenuTrigger,
|
||||
Input,
|
||||
Modal,
|
||||
ModalBody,
|
||||
ModalContent,
|
||||
@@ -34,6 +35,9 @@ import { useSettingsNavigation } from '@/hooks/use-settings-navigation'
|
||||
|
||||
const logger = createLogger('WorkspaceHeader')
|
||||
|
||||
/** Minimum workspace count before the search input and keyboard navigation are shown. */
|
||||
const WORKSPACE_SEARCH_THRESHOLD = 3
|
||||
|
||||
interface WorkspaceHeaderProps {
|
||||
/** The active workspace object */
|
||||
activeWorkspace?: { name: string } | null
|
||||
@@ -120,6 +124,22 @@ export function WorkspaceHeader({
|
||||
const [editingWorkspaceId, setEditingWorkspaceId] = useState<string | null>(null)
|
||||
const [editingName, setEditingName] = useState('')
|
||||
const [isListRenaming, setIsListRenaming] = useState(false)
|
||||
const [workspaceSearch, setWorkspaceSearch] = useState('')
|
||||
const [highlightedIndex, setHighlightedIndex] = useState(0)
|
||||
const searchInputRef = useRef<HTMLInputElement>(null)
|
||||
const workspaceListRef = useRef<HTMLDivElement>(null)
|
||||
|
||||
useEffect(() => {
|
||||
const row = workspaceListRef.current?.querySelector<HTMLElement>(
|
||||
`[data-workspace-row-idx="${highlightedIndex}"]`
|
||||
)
|
||||
row?.scrollIntoView({ block: 'nearest' })
|
||||
}, [highlightedIndex])
|
||||
|
||||
const searchQuery = workspaceSearch.trim().toLowerCase()
|
||||
const filteredWorkspaces = searchQuery
|
||||
? workspaces.filter((w) => w.name.toLowerCase().includes(searchQuery))
|
||||
: workspaces
|
||||
|
||||
const [contextMenuPosition, setContextMenuPosition] = useState({ x: 0, y: 0 })
|
||||
const [isContextMenuOpen, setIsContextMenuOpen] = useState(false)
|
||||
@@ -173,6 +193,15 @@ export function WorkspaceHeader({
|
||||
}
|
||||
}, [isWorkspaceMenuOpen, editingWorkspaceId, editingName, workspaces, onRenameWorkspace])
|
||||
|
||||
useEffect(() => {
|
||||
if (isWorkspaceMenuOpen) {
|
||||
setHighlightedIndex(0)
|
||||
const id = requestAnimationFrame(() => searchInputRef.current?.focus())
|
||||
return () => cancelAnimationFrame(id)
|
||||
}
|
||||
setWorkspaceSearch('')
|
||||
}, [isWorkspaceMenuOpen])
|
||||
|
||||
const activeWorkspaceFull = workspaces.find((w) => w.id === workspaceId) || null
|
||||
|
||||
const workspaceInitial = (() => {
|
||||
@@ -466,10 +495,57 @@ export function WorkspaceHeader({
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<DropdownMenuGroup className='mt-1 min-h-0 flex-1'>
|
||||
<div className='flex max-h-[130px] flex-col gap-0.5 overflow-y-auto'>
|
||||
{workspaces.map((workspace) => (
|
||||
<div key={workspace.id}>
|
||||
{workspaces.length > WORKSPACE_SEARCH_THRESHOLD && (
|
||||
<div className='mt-1 flex items-center gap-1.5 rounded-md border border-[var(--border)] bg-transparent px-2 py-1 transition-colors duration-100 dark:bg-[var(--surface-4)] dark:hover-hover:border-[var(--border-1)] dark:hover-hover:bg-[var(--surface-5)]'>
|
||||
<Search
|
||||
className='h-[12px] w-[12px] flex-shrink-0 text-[var(--text-tertiary)]'
|
||||
strokeWidth={2}
|
||||
/>
|
||||
<Input
|
||||
ref={searchInputRef}
|
||||
placeholder='Search workspaces...'
|
||||
value={workspaceSearch}
|
||||
onChange={(e) => {
|
||||
setWorkspaceSearch(e.target.value)
|
||||
setHighlightedIndex(0)
|
||||
}}
|
||||
onKeyDown={(e) => {
|
||||
e.stopPropagation()
|
||||
if (filteredWorkspaces.length === 0) return
|
||||
if (e.key === 'ArrowDown') {
|
||||
e.preventDefault()
|
||||
setHighlightedIndex((i) => (i + 1) % filteredWorkspaces.length)
|
||||
} else if (e.key === 'ArrowUp') {
|
||||
e.preventDefault()
|
||||
setHighlightedIndex(
|
||||
(i) => (i - 1 + filteredWorkspaces.length) % filteredWorkspaces.length
|
||||
)
|
||||
} else if (e.key === 'Enter') {
|
||||
e.preventDefault()
|
||||
const target = filteredWorkspaces[highlightedIndex]
|
||||
if (target) onWorkspaceSwitch(target)
|
||||
}
|
||||
}}
|
||||
className='h-auto flex-1 border-0 bg-transparent p-0 text-caption leading-none placeholder:text-[var(--text-tertiary)] focus-visible:ring-0 focus-visible:ring-offset-0'
|
||||
/>
|
||||
</div>
|
||||
)}
|
||||
<DropdownMenuGroup className='mt-2 min-h-0 flex-1'>
|
||||
<div
|
||||
ref={workspaceListRef}
|
||||
className='flex max-h-[130px] flex-col gap-0.5 overflow-y-auto'
|
||||
>
|
||||
{filteredWorkspaces.length === 0 && workspaceSearch && (
|
||||
<div className='px-2 py-[5px] text-[var(--text-tertiary)] text-caption'>
|
||||
No workspaces match "{workspaceSearch}"
|
||||
</div>
|
||||
)}
|
||||
{filteredWorkspaces.map((workspace, idx) => (
|
||||
<div
|
||||
key={workspace.id}
|
||||
data-workspace-row-idx={idx}
|
||||
onMouseEnter={() => setHighlightedIndex(idx)}
|
||||
>
|
||||
{editingWorkspaceId === workspace.id ? (
|
||||
<div className='flex items-center gap-2 rounded-[5px] bg-[var(--surface-active)] px-2 py-[5px]'>
|
||||
<input
|
||||
@@ -532,9 +608,26 @@ export function WorkspaceHeader({
|
||||
'hover-hover:bg-[var(--surface-hover)]',
|
||||
(workspace.id === workspaceId ||
|
||||
menuOpenWorkspaceId === workspace.id) &&
|
||||
'bg-[var(--surface-active)]'
|
||||
'bg-[var(--surface-active)]',
|
||||
idx === highlightedIndex &&
|
||||
workspaces.length > WORKSPACE_SEARCH_THRESHOLD &&
|
||||
workspace.id !== workspaceId &&
|
||||
menuOpenWorkspaceId !== workspace.id &&
|
||||
'bg-[var(--surface-hover)]'
|
||||
)}
|
||||
onClick={() => onWorkspaceSwitch(workspace)}
|
||||
onClick={(e) => {
|
||||
if (e.metaKey || e.ctrlKey) {
|
||||
window.open(`/workspace/${workspace.id}/home`, '_blank')
|
||||
return
|
||||
}
|
||||
onWorkspaceSwitch(workspace)
|
||||
}}
|
||||
onAuxClick={(e) => {
|
||||
if (e.button === 1) {
|
||||
e.preventDefault()
|
||||
window.open(`/workspace/${workspace.id}/home`, '_blank')
|
||||
}
|
||||
}}
|
||||
onContextMenu={(e) => handleContextMenu(e, workspace)}
|
||||
>
|
||||
<span className='min-w-0 flex-1 truncate'>{workspace.name}</span>
|
||||
|
||||
@@ -128,6 +128,7 @@ export const ConfluenceBlock: BlockConfig<ConfluenceResponse> = {
|
||||
title: 'Title',
|
||||
type: 'short-input',
|
||||
placeholder: 'Enter title for the page',
|
||||
required: { field: 'operation', value: 'create' },
|
||||
condition: { field: 'operation', value: ['create', 'update'] },
|
||||
},
|
||||
{
|
||||
@@ -135,6 +136,7 @@ export const ConfluenceBlock: BlockConfig<ConfluenceResponse> = {
|
||||
title: 'Content',
|
||||
type: 'long-input',
|
||||
placeholder: 'Enter content for the page',
|
||||
required: { field: 'operation', value: 'create' },
|
||||
condition: { field: 'operation', value: ['create', 'update'] },
|
||||
},
|
||||
{
|
||||
@@ -766,6 +768,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
|
||||
title: 'Title',
|
||||
type: 'short-input',
|
||||
placeholder: 'Enter title',
|
||||
required: { field: 'operation', value: ['create', 'create_blogpost'] },
|
||||
condition: {
|
||||
field: 'operation',
|
||||
value: ['create', 'update', 'create_blogpost', 'update_blogpost', 'update_space'],
|
||||
@@ -776,6 +779,7 @@ export const ConfluenceV2Block: BlockConfig<ConfluenceResponse> = {
|
||||
title: 'Content',
|
||||
type: 'long-input',
|
||||
placeholder: 'Enter content',
|
||||
required: { field: 'operation', value: ['create', 'create_blogpost'] },
|
||||
condition: {
|
||||
field: 'operation',
|
||||
value: ['create', 'update', 'create_blogpost', 'update_blogpost'],
|
||||
|
||||
@@ -91,7 +91,7 @@ export const JiraBlock: BlockConfig<JiraResponse> = {
|
||||
placeholder: 'Select Jira project',
|
||||
dependsOn: ['credential', 'domain'],
|
||||
mode: 'basic',
|
||||
required: { field: 'operation', value: ['write', 'update', 'read-bulk'] },
|
||||
required: { field: 'operation', value: ['write', 'read-bulk'] },
|
||||
},
|
||||
// Manual project ID input (advanced mode)
|
||||
{
|
||||
@@ -102,7 +102,7 @@ export const JiraBlock: BlockConfig<JiraResponse> = {
|
||||
placeholder: 'Enter Jira project ID',
|
||||
dependsOn: ['credential', 'domain'],
|
||||
mode: 'advanced',
|
||||
required: { field: 'operation', value: ['write', 'update', 'read-bulk'] },
|
||||
required: { field: 'operation', value: ['write', 'read-bulk'] },
|
||||
},
|
||||
// Issue selector (basic mode)
|
||||
{
|
||||
@@ -218,9 +218,8 @@ export const JiraBlock: BlockConfig<JiraResponse> = {
|
||||
id: 'summary',
|
||||
title: 'New Summary',
|
||||
type: 'short-input',
|
||||
required: true,
|
||||
required: { field: 'operation', value: 'write' },
|
||||
placeholder: 'Enter new summary for the issue',
|
||||
dependsOn: ['projectId'],
|
||||
condition: { field: 'operation', value: ['update', 'write'] },
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
@@ -240,7 +239,6 @@ Return ONLY the summary text - no explanations.`,
|
||||
title: 'New Description',
|
||||
type: 'long-input',
|
||||
placeholder: 'Enter new description for the issue',
|
||||
dependsOn: ['projectId'],
|
||||
condition: { field: 'operation', value: ['update', 'write'] },
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
@@ -279,7 +277,6 @@ Return ONLY the description text - no explanations.`,
|
||||
title: 'Assignee Account ID',
|
||||
type: 'short-input',
|
||||
placeholder: 'Assignee account ID (e.g., 5b109f2e9729b51b54dc274d)',
|
||||
dependsOn: ['projectId'],
|
||||
condition: { field: 'operation', value: ['write', 'update'] },
|
||||
},
|
||||
{
|
||||
@@ -287,7 +284,6 @@ Return ONLY the description text - no explanations.`,
|
||||
title: 'Priority',
|
||||
type: 'short-input',
|
||||
placeholder: 'Priority ID or name (e.g., "10000" or "High")',
|
||||
dependsOn: ['projectId'],
|
||||
condition: { field: 'operation', value: ['write', 'update'] },
|
||||
},
|
||||
{
|
||||
@@ -295,7 +291,6 @@ Return ONLY the description text - no explanations.`,
|
||||
title: 'Labels',
|
||||
type: 'short-input',
|
||||
placeholder: 'Comma-separated labels (e.g., bug, urgent)',
|
||||
dependsOn: ['projectId'],
|
||||
condition: { field: 'operation', value: ['write', 'update'] },
|
||||
},
|
||||
{
|
||||
@@ -303,7 +298,6 @@ Return ONLY the description text - no explanations.`,
|
||||
title: 'Due Date',
|
||||
type: 'short-input',
|
||||
placeholder: 'YYYY-MM-DD (e.g., 2024-12-31)',
|
||||
dependsOn: ['projectId'],
|
||||
condition: { field: 'operation', value: ['write', 'update'] },
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
@@ -332,7 +326,6 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n
|
||||
title: 'Environment',
|
||||
type: 'long-input',
|
||||
placeholder: 'Environment information (e.g., Production, Staging)',
|
||||
dependsOn: ['projectId'],
|
||||
condition: { field: 'operation', value: ['write', 'update'] },
|
||||
},
|
||||
{
|
||||
@@ -340,7 +333,6 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n
|
||||
title: 'Custom Field ID',
|
||||
type: 'short-input',
|
||||
placeholder: 'e.g., customfield_10001 or 10001',
|
||||
dependsOn: ['projectId'],
|
||||
condition: { field: 'operation', value: ['write', 'update'] },
|
||||
},
|
||||
{
|
||||
@@ -348,7 +340,6 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n
|
||||
title: 'Custom Field Value',
|
||||
type: 'short-input',
|
||||
placeholder: 'Value for the custom field',
|
||||
dependsOn: ['projectId'],
|
||||
condition: { field: 'operation', value: ['write', 'update'] },
|
||||
},
|
||||
{
|
||||
@@ -356,7 +347,6 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n
|
||||
title: 'Components',
|
||||
type: 'short-input',
|
||||
placeholder: 'Comma-separated component names',
|
||||
dependsOn: ['projectId'],
|
||||
condition: { field: 'operation', value: ['write', 'update'] },
|
||||
},
|
||||
{
|
||||
@@ -364,7 +354,6 @@ Return ONLY the date string in YYYY-MM-DD format - no explanations, no quotes, n
|
||||
title: 'Fix Versions',
|
||||
type: 'short-input',
|
||||
placeholder: 'Comma-separated fix version names',
|
||||
dependsOn: ['projectId'],
|
||||
condition: { field: 'operation', value: ['write', 'update'] },
|
||||
},
|
||||
{
|
||||
|
||||
@@ -68,6 +68,13 @@ export const MicrosoftExcelBlock: BlockConfig<MicrosoftExcelResponse> = {
|
||||
dependsOn: ['credential'],
|
||||
mode: 'basic',
|
||||
},
|
||||
{
|
||||
id: 'driveId',
|
||||
title: 'Drive ID (SharePoint)',
|
||||
type: 'short-input',
|
||||
placeholder: 'Leave empty for OneDrive, or enter drive ID for SharePoint',
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'manualSpreadsheetId',
|
||||
title: 'Spreadsheet ID',
|
||||
@@ -249,9 +256,17 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`,
|
||||
}
|
||||
},
|
||||
params: (params) => {
|
||||
const { oauthCredential, values, spreadsheetId, tableName, worksheetName, ...rest } = params
|
||||
const {
|
||||
oauthCredential,
|
||||
values,
|
||||
spreadsheetId,
|
||||
tableName,
|
||||
worksheetName,
|
||||
driveId,
|
||||
siteId: _siteId,
|
||||
...rest
|
||||
} = params
|
||||
|
||||
// Use canonical param ID (raw subBlock IDs are deleted after serialization)
|
||||
const effectiveSpreadsheetId = spreadsheetId ? String(spreadsheetId).trim() : ''
|
||||
|
||||
let parsedValues
|
||||
@@ -276,6 +291,7 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`,
|
||||
const baseParams = {
|
||||
...rest,
|
||||
spreadsheetId: effectiveSpreadsheetId,
|
||||
driveId: driveId ? String(driveId).trim() : undefined,
|
||||
values: parsedValues,
|
||||
oauthCredential,
|
||||
}
|
||||
@@ -302,6 +318,7 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`,
|
||||
operation: { type: 'string', description: 'Operation to perform' },
|
||||
oauthCredential: { type: 'string', description: 'Microsoft Excel access token' },
|
||||
spreadsheetId: { type: 'string', description: 'Spreadsheet identifier (canonical param)' },
|
||||
driveId: { type: 'string', description: 'Drive ID for SharePoint document libraries' },
|
||||
range: { type: 'string', description: 'Cell range' },
|
||||
tableName: { type: 'string', description: 'Table name' },
|
||||
worksheetName: { type: 'string', description: 'Worksheet name' },
|
||||
@@ -377,6 +394,47 @@ export const MicrosoftExcelV2Block: BlockConfig<MicrosoftExcelV2Response> = {
|
||||
placeholder: 'Enter credential ID',
|
||||
required: true,
|
||||
},
|
||||
// File Source selector (both modes)
|
||||
{
|
||||
id: 'fileSource',
|
||||
title: 'File Source',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'OneDrive', id: 'onedrive' },
|
||||
{ label: 'SharePoint', id: 'sharepoint' },
|
||||
],
|
||||
value: () => 'onedrive',
|
||||
},
|
||||
// SharePoint Site Selector (basic mode, only when SharePoint is selected)
|
||||
{
|
||||
id: 'siteSelector',
|
||||
title: 'SharePoint Site',
|
||||
type: 'file-selector',
|
||||
canonicalParamId: 'siteId',
|
||||
serviceId: 'sharepoint',
|
||||
selectorKey: 'sharepoint.sites',
|
||||
requiredScopes: [],
|
||||
placeholder: 'Select a SharePoint site',
|
||||
dependsOn: ['credential', 'fileSource'],
|
||||
condition: { field: 'fileSource', value: 'sharepoint' },
|
||||
required: { field: 'fileSource', value: 'sharepoint' },
|
||||
mode: 'basic',
|
||||
},
|
||||
// SharePoint Drive Selector (basic mode, only when SharePoint is selected)
|
||||
{
|
||||
id: 'driveSelector',
|
||||
title: 'Document Library',
|
||||
type: 'file-selector',
|
||||
canonicalParamId: 'driveId',
|
||||
serviceId: 'microsoft-excel',
|
||||
selectorKey: 'microsoft.excel.drives',
|
||||
selectorAllowSearch: false,
|
||||
placeholder: 'Select a document library',
|
||||
dependsOn: ['credential', 'siteSelector', 'fileSource'],
|
||||
condition: { field: 'fileSource', value: 'sharepoint' },
|
||||
required: { field: 'fileSource', value: 'sharepoint' },
|
||||
mode: 'basic',
|
||||
},
|
||||
// Spreadsheet Selector (basic mode)
|
||||
{
|
||||
id: 'spreadsheetId',
|
||||
@@ -388,9 +446,20 @@ export const MicrosoftExcelV2Block: BlockConfig<MicrosoftExcelV2Response> = {
|
||||
requiredScopes: [],
|
||||
mimeType: 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet',
|
||||
placeholder: 'Select a spreadsheet',
|
||||
dependsOn: ['credential'],
|
||||
dependsOn: { all: ['credential', 'fileSource'], any: ['credential', 'driveSelector'] },
|
||||
mode: 'basic',
|
||||
},
|
||||
// Drive ID for SharePoint (advanced mode, only when SharePoint is selected)
|
||||
{
|
||||
id: 'manualDriveId',
|
||||
title: 'Drive ID',
|
||||
type: 'short-input',
|
||||
canonicalParamId: 'driveId',
|
||||
placeholder: 'Enter the SharePoint drive ID',
|
||||
condition: { field: 'fileSource', value: 'sharepoint' },
|
||||
dependsOn: ['fileSource'],
|
||||
mode: 'advanced',
|
||||
},
|
||||
// Manual Spreadsheet ID (advanced mode)
|
||||
{
|
||||
id: 'manualSpreadsheetId',
|
||||
@@ -398,7 +467,7 @@ export const MicrosoftExcelV2Block: BlockConfig<MicrosoftExcelV2Response> = {
|
||||
type: 'short-input',
|
||||
canonicalParamId: 'spreadsheetId',
|
||||
placeholder: 'Enter spreadsheet ID',
|
||||
dependsOn: ['credential'],
|
||||
dependsOn: { all: ['credential'], any: ['credential', 'manualDriveId'] },
|
||||
mode: 'advanced',
|
||||
},
|
||||
// Sheet Name Selector (basic mode)
|
||||
@@ -412,7 +481,10 @@ export const MicrosoftExcelV2Block: BlockConfig<MicrosoftExcelV2Response> = {
|
||||
selectorAllowSearch: false,
|
||||
placeholder: 'Select a sheet',
|
||||
required: true,
|
||||
dependsOn: { all: ['credential'], any: ['spreadsheetId', 'manualSpreadsheetId'] },
|
||||
dependsOn: {
|
||||
all: ['credential'],
|
||||
any: ['spreadsheetId', 'manualSpreadsheetId', 'driveSelector'],
|
||||
},
|
||||
mode: 'basic',
|
||||
},
|
||||
// Manual Sheet Name (advanced mode)
|
||||
@@ -423,7 +495,10 @@ export const MicrosoftExcelV2Block: BlockConfig<MicrosoftExcelV2Response> = {
|
||||
canonicalParamId: 'sheetName',
|
||||
placeholder: 'Name of the sheet/tab (e.g., Sheet1)',
|
||||
required: true,
|
||||
dependsOn: ['credential'],
|
||||
dependsOn: {
|
||||
all: ['credential'],
|
||||
any: ['credential', 'manualDriveId'],
|
||||
},
|
||||
mode: 'advanced',
|
||||
},
|
||||
// Cell Range (optional for read/write)
|
||||
@@ -514,11 +589,20 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`,
|
||||
fallbackToolId: 'microsoft_excel_read_v2',
|
||||
}),
|
||||
params: (params) => {
|
||||
const { oauthCredential, values, spreadsheetId, sheetName, cellRange, ...rest } = params
|
||||
const {
|
||||
oauthCredential,
|
||||
values,
|
||||
spreadsheetId,
|
||||
sheetName,
|
||||
cellRange,
|
||||
driveId,
|
||||
siteId: _siteId,
|
||||
fileSource: _fileSource,
|
||||
...rest
|
||||
} = params
|
||||
|
||||
const parsedValues = values ? JSON.parse(values as string) : undefined
|
||||
|
||||
// Use canonical param IDs (raw subBlock IDs are deleted after serialization)
|
||||
const effectiveSpreadsheetId = spreadsheetId ? String(spreadsheetId).trim() : ''
|
||||
const effectiveSheetName = sheetName ? String(sheetName).trim() : ''
|
||||
|
||||
@@ -535,6 +619,7 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`,
|
||||
spreadsheetId: effectiveSpreadsheetId,
|
||||
sheetName: effectiveSheetName,
|
||||
cellRange: cellRange ? (cellRange as string).trim() : undefined,
|
||||
driveId: driveId ? String(driveId).trim() : undefined,
|
||||
values: parsedValues,
|
||||
oauthCredential,
|
||||
}
|
||||
@@ -543,7 +628,10 @@ Return ONLY the JSON array - no explanations, no markdown, no extra text.`,
|
||||
},
|
||||
inputs: {
|
||||
operation: { type: 'string', description: 'Operation to perform' },
|
||||
fileSource: { type: 'string', description: 'File source (onedrive or sharepoint)' },
|
||||
oauthCredential: { type: 'string', description: 'Microsoft Excel access token' },
|
||||
siteId: { type: 'string', description: 'SharePoint site ID (used for drive/file browsing)' },
|
||||
driveId: { type: 'string', description: 'Drive ID for SharePoint document libraries' },
|
||||
spreadsheetId: { type: 'string', description: 'Spreadsheet identifier (canonical param)' },
|
||||
sheetName: { type: 'string', description: 'Name of the sheet/tab (canonical param)' },
|
||||
cellRange: { type: 'string', description: 'Cell range (e.g., A1:D10)' },
|
||||
|
||||
@@ -70,7 +70,7 @@ export function WorkflowNotificationEmail({
|
||||
const message = alertReason
|
||||
? 'An alert was triggered for your workflow.'
|
||||
: isError
|
||||
? 'Your workflow execution failed.'
|
||||
? 'Your workflow run failed.'
|
||||
: 'Your workflow completed successfully.'
|
||||
|
||||
return (
|
||||
@@ -102,7 +102,7 @@ export function WorkflowNotificationEmail({
|
||||
</Section>
|
||||
|
||||
<Link href={logUrl} style={{ textDecoration: 'none' }}>
|
||||
<Text style={baseStyles.button}>View Execution Log</Text>
|
||||
<Text style={baseStyles.button}>View Run Log</Text>
|
||||
</Link>
|
||||
|
||||
{rateLimits && (rateLimits.sync || rateLimits.async) ? (
|
||||
|
||||
@@ -32,7 +32,7 @@ export function PlayOutline(props: SVGProps<SVGSVGElement>) {
|
||||
<svg
|
||||
width='24'
|
||||
height='24'
|
||||
viewBox='-1 -2 24 24'
|
||||
viewBox='0 0 24 24'
|
||||
fill='none'
|
||||
stroke='currentColor'
|
||||
strokeWidth='1.75'
|
||||
@@ -42,7 +42,7 @@ export function PlayOutline(props: SVGProps<SVGSVGElement>) {
|
||||
aria-hidden='true'
|
||||
{...props}
|
||||
>
|
||||
<path d='M7.5 3.5C7.5 2.672 8.452 2.18 9.128 2.66L18.128 9.16C18.72 9.58 18.72 10.46 18.128 10.88L9.128 17.38C8.452 17.86 7.5 17.368 7.5 16.54V3.5Z' />
|
||||
<path d='M14.26 5.39C16.17 6.48 17.67 7.33 18.73 8.11C19.81 8.89 20.6 9.71 20.89 10.79C21.09 11.58 21.09 12.42 20.89 13.21C20.6 14.29 19.81 15.11 18.73 15.89C17.67 16.67 16.17 17.52 14.26 18.61C12.42 19.65 10.87 20.53 9.69 21.04C8.51 21.54 7.42 21.8 6.37 21.5C5.6 21.28 4.89 20.86 4.33 20.29C3.56 19.51 3.25 18.44 3.1 17.15C2.96 15.87 2.96 14.19 2.96 12.06V11.94C2.96 9.81 2.96 8.13 3.1 6.85C3.25 5.56 3.56 4.49 4.33 3.71C4.89 3.14 5.6 2.72 6.37 2.5C7.42 2.2 8.51 2.46 9.69 2.96C10.87 3.47 12.42 4.35 14.26 5.39Z' />
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ ogImage: /blog/copilot/cover.png
|
||||
ogAlt: 'Sim Copilot technical overview'
|
||||
about: ['AI Assistants', 'Agentic Workflows', 'Retrieval Augmented Generation']
|
||||
timeRequired: PT7M
|
||||
canonical: https://sim.ai/blog/copilot
|
||||
canonical: https://www.sim.ai/blog/copilot
|
||||
featured: false
|
||||
draft: true
|
||||
---
|
||||
|
||||
@@ -12,7 +12,7 @@ ogImage: /blog/emcn/cover.png
|
||||
ogAlt: 'Emcn design system cover'
|
||||
about: ['Design Systems', 'Component Libraries', 'Design Tokens', 'Accessibility']
|
||||
timeRequired: PT6M
|
||||
canonical: https://sim.ai/blog/emcn
|
||||
canonical: https://www.sim.ai/blog/emcn
|
||||
featured: false
|
||||
draft: true
|
||||
---
|
||||
|
||||
@@ -12,7 +12,7 @@ ogImage: /blog/enterprise/cover.png
|
||||
ogAlt: 'Sim Enterprise features overview'
|
||||
about: ['Enterprise Software', 'Security', 'Compliance', 'Self-Hosting']
|
||||
timeRequired: PT10M
|
||||
canonical: https://sim.ai/blog/enterprise
|
||||
canonical: https://www.sim.ai/blog/enterprise
|
||||
featured: true
|
||||
draft: false
|
||||
---
|
||||
|
||||
@@ -12,7 +12,7 @@ ogImage: /blog/executor/cover.png
|
||||
ogAlt: 'Sim Executor technical overview'
|
||||
about: ['Execution', 'Workflow Orchestration']
|
||||
timeRequired: PT12M
|
||||
canonical: https://sim.ai/blog/executor
|
||||
canonical: https://www.sim.ai/blog/executor
|
||||
featured: false
|
||||
draft: false
|
||||
---
|
||||
|
||||
@@ -8,11 +8,11 @@ authors:
|
||||
- emir
|
||||
readingTime: 10
|
||||
tags: [Release, Mothership, Tables, Knowledge Base, Connectors, RAG, Sim]
|
||||
ogImage: /blog/mothership/cover.png
|
||||
ogImage: /blog/mothership/cover.jpg
|
||||
ogAlt: 'Sim v0.6 release announcement'
|
||||
about: ['AI Agents', 'Workflow Automation', 'Developer Tools']
|
||||
timeRequired: PT10M
|
||||
canonical: https://sim.ai/blog/mothership
|
||||
canonical: https://www.sim.ai/blog/mothership
|
||||
featured: true
|
||||
draft: false
|
||||
---
|
||||
|
||||
@@ -9,7 +9,7 @@ authors:
|
||||
readingTime: 12
|
||||
tags: [Multiplayer, Realtime, Collaboration, WebSockets, Architecture]
|
||||
ogImage: /blog/multiplayer/cover.png
|
||||
canonical: https://sim.ai/blog/multiplayer
|
||||
canonical: https://www.sim.ai/blog/multiplayer
|
||||
draft: false
|
||||
---
|
||||
|
||||
|
||||
@@ -9,7 +9,7 @@ authors:
|
||||
readingTime: 9
|
||||
tags: [AI Agents, Workflow Automation, OpenAI AgentKit, n8n, Sim, MCP]
|
||||
ogImage: /blog/openai-vs-n8n-vs-sim/workflow.png
|
||||
canonical: https://sim.ai/blog/openai-vs-n8n-vs-sim
|
||||
canonical: https://www.sim.ai/blog/openai-vs-n8n-vs-sim
|
||||
draft: false
|
||||
---
|
||||
|
||||
|
||||
@@ -13,7 +13,7 @@ ogImage: /blog/series-a/cover.png
|
||||
ogAlt: 'Sim team photo in front of neon logo'
|
||||
about: ['Artificial Intelligence', 'Agentic Workflows', 'Startups', 'Funding']
|
||||
timeRequired: PT4M
|
||||
canonical: https://sim.ai/blog/series-a
|
||||
canonical: https://www.sim.ai/blog/series-a
|
||||
featured: true
|
||||
draft: false
|
||||
---
|
||||
|
||||
@@ -12,7 +12,7 @@ ogImage: /blog/v0-5/cover.png
|
||||
ogAlt: 'Sim v0.5 release announcement'
|
||||
about: ['AI Agents', 'Workflow Automation', 'Developer Tools']
|
||||
timeRequired: PT8M
|
||||
canonical: https://sim.ai/blog/v0-5
|
||||
canonical: https://www.sim.ai/blog/v0-5
|
||||
featured: true
|
||||
draft: false
|
||||
---
|
||||
|
||||
@@ -1,5 +1,5 @@
|
||||
import type { Metadata } from 'next'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { getBaseUrl, SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { getBrandConfig } from '@/ee/whitelabeling/branding'
|
||||
|
||||
/**
|
||||
@@ -150,7 +150,7 @@ export function generateStructuredData() {
|
||||
creator: {
|
||||
'@type': 'Organization',
|
||||
name: 'Sim',
|
||||
url: 'https://sim.ai',
|
||||
url: SITE_URL,
|
||||
},
|
||||
featureList: [
|
||||
'AI Workspace for Teams',
|
||||
|
||||
@@ -292,9 +292,9 @@ export function useCancelExecution() {
|
||||
const res = await fetch(`/api/workflows/${workflowId}/executions/${executionId}/cancel`, {
|
||||
method: 'POST',
|
||||
})
|
||||
if (!res.ok) throw new Error('Failed to cancel execution')
|
||||
if (!res.ok) throw new Error('Failed to cancel run')
|
||||
const data = await res.json()
|
||||
if (!data.success) throw new Error('Failed to cancel execution')
|
||||
if (!data.success) throw new Error('Failed to cancel run')
|
||||
return data
|
||||
},
|
||||
onMutate: async ({ executionId }) => {
|
||||
|
||||
@@ -485,21 +485,23 @@ export function useRemoveChatResource(chatId?: string) {
|
||||
onMutate: async ({ resourceType, resourceId }) => {
|
||||
if (!chatId) return
|
||||
await queryClient.cancelQueries({ queryKey: taskKeys.detail(chatId) })
|
||||
const previous = queryClient.getQueryData<TaskChatHistory>(taskKeys.detail(chatId))
|
||||
if (previous) {
|
||||
queryClient.setQueryData<TaskChatHistory>(taskKeys.detail(chatId), {
|
||||
...previous,
|
||||
resources: previous.resources.filter(
|
||||
(r) => !(r.type === resourceType && r.id === resourceId)
|
||||
),
|
||||
})
|
||||
}
|
||||
return { previous }
|
||||
const removed: TaskChatHistory['resources'] = []
|
||||
queryClient.setQueryData<TaskChatHistory>(taskKeys.detail(chatId), (prev) => {
|
||||
if (!prev) return prev
|
||||
const next: TaskChatHistory['resources'] = []
|
||||
for (const r of prev.resources) {
|
||||
if (r.type === resourceType && r.id === resourceId) removed.push(r)
|
||||
else next.push(r)
|
||||
}
|
||||
return removed.length > 0 ? { ...prev, resources: next } : prev
|
||||
})
|
||||
return { removed }
|
||||
},
|
||||
onError: (_err, _variables, context) => {
|
||||
if (context?.previous && chatId) {
|
||||
queryClient.setQueryData(taskKeys.detail(chatId), context.previous)
|
||||
}
|
||||
if (!chatId || !context?.removed.length) return
|
||||
queryClient.setQueryData<TaskChatHistory>(taskKeys.detail(chatId), (prev) =>
|
||||
prev ? { ...prev, resources: [...prev.resources, ...context.removed] } : prev
|
||||
)
|
||||
},
|
||||
onSettled: () => {
|
||||
if (chatId) {
|
||||
|
||||
@@ -1504,6 +1504,7 @@ const registry: Record<SelectorKey, SelectorDefinition> = {
|
||||
'microsoft.excel.sheets',
|
||||
context.oauthCredential ?? 'none',
|
||||
context.spreadsheetId ?? 'none',
|
||||
context.driveId ?? 'none',
|
||||
],
|
||||
enabled: ({ context }) => Boolean(context.oauthCredential && context.spreadsheetId),
|
||||
fetchList: async ({ context }: SelectorQueryArgs) => {
|
||||
@@ -1517,6 +1518,7 @@ const registry: Record<SelectorKey, SelectorDefinition> = {
|
||||
searchParams: {
|
||||
credentialId,
|
||||
spreadsheetId: context.spreadsheetId,
|
||||
driveId: context.driveId,
|
||||
workflowId: context.workflowId,
|
||||
},
|
||||
}
|
||||
@@ -1527,6 +1529,54 @@ const registry: Record<SelectorKey, SelectorDefinition> = {
|
||||
}))
|
||||
},
|
||||
},
|
||||
'microsoft.excel.drives': {
|
||||
key: 'microsoft.excel.drives',
|
||||
staleTime: SELECTOR_STALE,
|
||||
getQueryKey: ({ context }: SelectorQueryArgs) => [
|
||||
'selectors',
|
||||
'microsoft.excel.drives',
|
||||
context.oauthCredential ?? 'none',
|
||||
context.siteId ?? 'none',
|
||||
],
|
||||
enabled: ({ context }) => Boolean(context.oauthCredential && context.siteId),
|
||||
fetchList: async ({ context }: SelectorQueryArgs) => {
|
||||
const credentialId = ensureCredential(context, 'microsoft.excel.drives')
|
||||
if (!context.siteId) {
|
||||
throw new Error('Missing site ID for microsoft.excel.drives selector')
|
||||
}
|
||||
const body = JSON.stringify({
|
||||
credential: credentialId,
|
||||
workflowId: context.workflowId,
|
||||
siteId: context.siteId,
|
||||
})
|
||||
const data = await fetchJson<{ drives: { id: string; name: string }[] }>(
|
||||
'/api/tools/microsoft_excel/drives',
|
||||
{ method: 'POST', body }
|
||||
)
|
||||
return (data.drives || []).map((drive) => ({
|
||||
id: drive.id,
|
||||
label: drive.name,
|
||||
}))
|
||||
},
|
||||
fetchById: async ({ context, detailId }: SelectorQueryArgs) => {
|
||||
if (!detailId || !context.siteId) return null
|
||||
const credentialId = ensureCredential(context, 'microsoft.excel.drives')
|
||||
const data = await fetchJson<{ drive: { id: string; name: string } }>(
|
||||
'/api/tools/microsoft_excel/drives',
|
||||
{
|
||||
method: 'POST',
|
||||
body: JSON.stringify({
|
||||
credential: credentialId,
|
||||
workflowId: context.workflowId,
|
||||
siteId: context.siteId,
|
||||
driveId: detailId,
|
||||
}),
|
||||
}
|
||||
)
|
||||
if (!data.drive) return null
|
||||
return { id: data.drive.id, label: data.drive.name }
|
||||
},
|
||||
},
|
||||
'microsoft.excel': {
|
||||
key: 'microsoft.excel',
|
||||
staleTime: SELECTOR_STALE,
|
||||
@@ -1534,6 +1584,7 @@ const registry: Record<SelectorKey, SelectorDefinition> = {
|
||||
'selectors',
|
||||
'microsoft.excel',
|
||||
context.oauthCredential ?? 'none',
|
||||
context.driveId ?? 'none',
|
||||
search ?? '',
|
||||
],
|
||||
enabled: ({ context }) => Boolean(context.oauthCredential),
|
||||
@@ -1545,6 +1596,7 @@ const registry: Record<SelectorKey, SelectorDefinition> = {
|
||||
searchParams: {
|
||||
credentialId,
|
||||
query: search,
|
||||
driveId: context.driveId,
|
||||
workflowId: context.workflowId,
|
||||
},
|
||||
}
|
||||
|
||||
@@ -40,6 +40,7 @@ export type SelectorKey =
|
||||
| 'onedrive.folders'
|
||||
| 'sharepoint.sites'
|
||||
| 'microsoft.excel'
|
||||
| 'microsoft.excel.drives'
|
||||
| 'microsoft.excel.sheets'
|
||||
| 'microsoft.word'
|
||||
| 'microsoft.planner'
|
||||
@@ -75,6 +76,7 @@ export interface SelectorContext {
|
||||
siteId?: string
|
||||
collectionId?: string
|
||||
spreadsheetId?: string
|
||||
driveId?: string
|
||||
excludeWorkflowId?: string
|
||||
baseId?: string
|
||||
datasetId?: string
|
||||
|
||||
@@ -1,100 +0,0 @@
|
||||
'use client'
|
||||
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
|
||||
const TICK_MS = 16
|
||||
const MIN_CHARS_PER_TICK = 3
|
||||
const CHASE_FACTOR = 0.3
|
||||
const RESUME_IDLE_MS = 140
|
||||
const RESUME_RAMP_MS = 180
|
||||
|
||||
function easeOutCubic(t: number): number {
|
||||
const clamped = Math.max(0, Math.min(1, t))
|
||||
return 1 - (1 - clamped) ** 3
|
||||
}
|
||||
|
||||
/**
|
||||
* Progressively reveals streaming text character-by-character at a steady
|
||||
* rate regardless of how the data arrives.
|
||||
*
|
||||
* Small deltas (individual tokens) reveal at the base rate of 3 chars per
|
||||
* 16 ms. Large gaps (burst arrivals) catch up exponentially via
|
||||
* CHASE_FACTOR so the reveal never falls far behind.
|
||||
*
|
||||
* When `isStreaming` is false the target is returned directly.
|
||||
*/
|
||||
export function useStreamingText(target: string, isStreaming: boolean): string {
|
||||
const [displayed, setDisplayed] = useState(target)
|
||||
const revealedRef = useRef(target)
|
||||
const targetRef = useRef(target)
|
||||
const lastTargetLengthRef = useRef(target.length)
|
||||
const lastTargetChangeAtRef = useRef(Date.now())
|
||||
const resumeStartedAtRef = useRef<number | null>(null)
|
||||
|
||||
targetRef.current = target
|
||||
|
||||
useEffect(() => {
|
||||
const now = Date.now()
|
||||
const previousLength = lastTargetLengthRef.current
|
||||
const nextLength = target.length
|
||||
|
||||
if (nextLength > previousLength) {
|
||||
const idleFor = now - lastTargetChangeAtRef.current
|
||||
if (isStreaming && idleFor >= RESUME_IDLE_MS) {
|
||||
resumeStartedAtRef.current = now
|
||||
}
|
||||
lastTargetChangeAtRef.current = now
|
||||
} else if (nextLength < previousLength) {
|
||||
lastTargetChangeAtRef.current = now
|
||||
resumeStartedAtRef.current = null
|
||||
}
|
||||
|
||||
lastTargetLengthRef.current = nextLength
|
||||
}, [target, isStreaming])
|
||||
|
||||
useEffect(() => {
|
||||
if (isStreaming) return
|
||||
if (revealedRef.current === target) return
|
||||
revealedRef.current = target
|
||||
lastTargetChangeAtRef.current = Date.now()
|
||||
lastTargetLengthRef.current = target.length
|
||||
resumeStartedAtRef.current = null
|
||||
setDisplayed(target)
|
||||
}, [target, isStreaming])
|
||||
|
||||
useEffect(() => {
|
||||
if (!isStreaming) return
|
||||
|
||||
if (targetRef.current.length < revealedRef.current.length) {
|
||||
revealedRef.current = ''
|
||||
}
|
||||
|
||||
const timer = setInterval(() => {
|
||||
const now = Date.now()
|
||||
const current = revealedRef.current
|
||||
const tgt = targetRef.current
|
||||
if (current.length >= tgt.length) return
|
||||
|
||||
const gap = tgt.length - current.length
|
||||
const normalChars = Math.max(MIN_CHARS_PER_TICK, Math.ceil(gap * CHASE_FACTOR))
|
||||
|
||||
let chars = normalChars
|
||||
const resumeStartedAt = resumeStartedAtRef.current
|
||||
if (resumeStartedAt !== null) {
|
||||
const progress = easeOutCubic((now - resumeStartedAt) / RESUME_RAMP_MS)
|
||||
chars = Math.max(MIN_CHARS_PER_TICK, Math.ceil(normalChars * progress))
|
||||
if (progress >= 1) {
|
||||
resumeStartedAtRef.current = null
|
||||
}
|
||||
}
|
||||
|
||||
chars = Math.min(gap, chars)
|
||||
revealedRef.current = tgt.slice(0, current.length + chars)
|
||||
setDisplayed(revealedRef.current)
|
||||
}, TICK_MS)
|
||||
|
||||
return () => clearInterval(timer)
|
||||
}, [isStreaming])
|
||||
|
||||
return displayed
|
||||
}
|
||||
@@ -1,5 +1,6 @@
|
||||
import type { Metadata } from 'next'
|
||||
import type { BlogMeta } from '@/lib/blog/schema'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
export function buildPostMetadata(post: BlogMeta): Metadata {
|
||||
const base = new URL(post.canonical)
|
||||
@@ -85,10 +86,10 @@ export function buildArticleJsonLd(post: BlogMeta) {
|
||||
publisher: {
|
||||
'@type': 'Organization',
|
||||
name: 'Sim',
|
||||
url: 'https://sim.ai',
|
||||
url: SITE_URL,
|
||||
logo: {
|
||||
'@type': 'ImageObject',
|
||||
url: 'https://sim.ai/logo/primary/medium.png',
|
||||
url: `${SITE_URL}/logo/primary/medium.png`,
|
||||
},
|
||||
},
|
||||
mainEntityOfPage: {
|
||||
@@ -112,8 +113,8 @@ export function buildBreadcrumbJsonLd(post: BlogMeta) {
|
||||
return {
|
||||
'@type': 'BreadcrumbList',
|
||||
itemListElement: [
|
||||
{ '@type': 'ListItem', position: 1, name: 'Home', item: 'https://sim.ai' },
|
||||
{ '@type': 'ListItem', position: 2, name: 'Blog', item: 'https://sim.ai/blog' },
|
||||
{ '@type': 'ListItem', position: 1, name: 'Home', item: SITE_URL },
|
||||
{ '@type': 'ListItem', position: 2, name: 'Blog', item: `${SITE_URL}/blog` },
|
||||
{ '@type': 'ListItem', position: 3, name: post.title, item: post.canonical },
|
||||
],
|
||||
}
|
||||
@@ -150,22 +151,22 @@ export function buildCollectionPageJsonLd() {
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'CollectionPage',
|
||||
name: 'Sim Blog',
|
||||
url: 'https://sim.ai/blog',
|
||||
url: `${SITE_URL}/blog`,
|
||||
description: 'Announcements, insights, and guides for building AI agents.',
|
||||
publisher: {
|
||||
'@type': 'Organization',
|
||||
name: 'Sim',
|
||||
url: 'https://sim.ai',
|
||||
url: SITE_URL,
|
||||
logo: {
|
||||
'@type': 'ImageObject',
|
||||
url: 'https://sim.ai/logo/primary/medium.png',
|
||||
url: `${SITE_URL}/logo/primary/medium.png`,
|
||||
},
|
||||
},
|
||||
inLanguage: 'en-US',
|
||||
isPartOf: {
|
||||
'@type': 'WebSite',
|
||||
name: 'Sim',
|
||||
url: 'https://sim.ai',
|
||||
url: SITE_URL,
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
@@ -17,6 +17,7 @@ export interface ToolCatalogEntry {
|
||||
| 'create_job'
|
||||
| 'create_workflow'
|
||||
| 'create_workspace_mcp_server'
|
||||
| 'debug'
|
||||
| 'delete_file'
|
||||
| 'delete_folder'
|
||||
| 'delete_workflow'
|
||||
@@ -70,6 +71,7 @@ export interface ToolCatalogEntry {
|
||||
| 'respond'
|
||||
| 'restore_resource'
|
||||
| 'revert_to_version'
|
||||
| 'run'
|
||||
| 'run_block'
|
||||
| 'run_from_block'
|
||||
| 'run_workflow'
|
||||
@@ -105,6 +107,7 @@ export interface ToolCatalogEntry {
|
||||
| 'create_job'
|
||||
| 'create_workflow'
|
||||
| 'create_workspace_mcp_server'
|
||||
| 'debug'
|
||||
| 'delete_file'
|
||||
| 'delete_folder'
|
||||
| 'delete_workflow'
|
||||
@@ -158,6 +161,7 @@ export interface ToolCatalogEntry {
|
||||
| 'respond'
|
||||
| 'restore_resource'
|
||||
| 'revert_to_version'
|
||||
| 'run'
|
||||
| 'run_block'
|
||||
| 'run_from_block'
|
||||
| 'run_workflow'
|
||||
@@ -187,11 +191,13 @@ export interface ToolCatalogEntry {
|
||||
subagentId?:
|
||||
| 'agent'
|
||||
| 'auth'
|
||||
| 'debug'
|
||||
| 'deploy'
|
||||
| 'file'
|
||||
| 'job'
|
||||
| 'knowledge'
|
||||
| 'research'
|
||||
| 'run'
|
||||
| 'superagent'
|
||||
| 'table'
|
||||
| 'workflow'
|
||||
@@ -444,6 +450,31 @@ export const CreateWorkspaceMcpServer: ToolCatalogEntry = {
|
||||
requiredPermission: 'admin',
|
||||
}
|
||||
|
||||
export const Debug: ToolCatalogEntry = {
|
||||
id: 'debug',
|
||||
name: 'debug',
|
||||
route: 'subagent',
|
||||
mode: 'async',
|
||||
parameters: {
|
||||
properties: {
|
||||
context: {
|
||||
description:
|
||||
'Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.',
|
||||
type: 'string',
|
||||
},
|
||||
request: {
|
||||
description:
|
||||
'What to debug. Include error messages, block IDs, and any context about the failure.',
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['request'],
|
||||
type: 'object',
|
||||
},
|
||||
subagentId: 'debug',
|
||||
internal: true,
|
||||
}
|
||||
|
||||
export const DeleteFile: ToolCatalogEntry = {
|
||||
id: 'delete_file',
|
||||
name: 'delete_file',
|
||||
@@ -2039,7 +2070,8 @@ export const Read: ToolCatalogEntry = {
|
||||
},
|
||||
path: {
|
||||
type: 'string',
|
||||
description: "Path to the file to read (e.g. 'workflows/My Workflow/state.json').",
|
||||
description:
|
||||
"Path to the file to read (e.g. 'workflows/My Workflow/state.json' or 'workflows/Projects/Q1/My Workflow/state.json').",
|
||||
},
|
||||
},
|
||||
required: ['path'],
|
||||
@@ -2231,6 +2263,26 @@ export const RevertToVersion: ToolCatalogEntry = {
|
||||
requiredPermission: 'admin',
|
||||
}
|
||||
|
||||
export const Run: ToolCatalogEntry = {
|
||||
id: 'run',
|
||||
name: 'run',
|
||||
route: 'subagent',
|
||||
mode: 'async',
|
||||
parameters: {
|
||||
properties: {
|
||||
context: {
|
||||
description: 'Pre-gathered context: workflow state, block IDs, input requirements.',
|
||||
type: 'string',
|
||||
},
|
||||
request: { description: 'What to run or what logs to check.', type: 'string' },
|
||||
},
|
||||
required: ['request'],
|
||||
type: 'object',
|
||||
},
|
||||
subagentId: 'run',
|
||||
internal: true,
|
||||
}
|
||||
|
||||
export const RunBlock: ToolCatalogEntry = {
|
||||
id: 'run_block',
|
||||
name: 'run_block',
|
||||
@@ -3264,6 +3316,7 @@ export const TOOL_CATALOG: Record<string, ToolCatalogEntry> = {
|
||||
[CreateJob.id]: CreateJob,
|
||||
[CreateWorkflow.id]: CreateWorkflow,
|
||||
[CreateWorkspaceMcpServer.id]: CreateWorkspaceMcpServer,
|
||||
[Debug.id]: Debug,
|
||||
[DeleteFile.id]: DeleteFile,
|
||||
[DeleteFolder.id]: DeleteFolder,
|
||||
[DeleteWorkflow.id]: DeleteWorkflow,
|
||||
@@ -3317,6 +3370,7 @@ export const TOOL_CATALOG: Record<string, ToolCatalogEntry> = {
|
||||
[Respond.id]: Respond,
|
||||
[RestoreResource.id]: RestoreResource,
|
||||
[RevertToVersion.id]: RevertToVersion,
|
||||
[Run.id]: Run,
|
||||
[RunBlock.id]: RunBlock,
|
||||
[RunFromBlock.id]: RunFromBlock,
|
||||
[RunWorkflow.id]: RunWorkflow,
|
||||
|
||||
@@ -266,6 +266,25 @@ export const TOOL_RUNTIME_SCHEMAS: Record<string, ToolRuntimeSchemaEntry> = {
|
||||
},
|
||||
resultSchema: undefined,
|
||||
},
|
||||
debug: {
|
||||
parameters: {
|
||||
properties: {
|
||||
context: {
|
||||
description:
|
||||
'Pre-gathered context: workflow state JSON, block schemas, error logs. The debug agent will skip re-reading anything included here.',
|
||||
type: 'string',
|
||||
},
|
||||
request: {
|
||||
description:
|
||||
'What to debug. Include error messages, block IDs, and any context about the failure.',
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['request'],
|
||||
type: 'object',
|
||||
},
|
||||
resultSchema: undefined,
|
||||
},
|
||||
delete_file: {
|
||||
parameters: {
|
||||
type: 'object',
|
||||
@@ -1872,7 +1891,8 @@ export const TOOL_RUNTIME_SCHEMAS: Record<string, ToolRuntimeSchemaEntry> = {
|
||||
},
|
||||
path: {
|
||||
type: 'string',
|
||||
description: "Path to the file to read (e.g. 'workflows/My Workflow/state.json').",
|
||||
description:
|
||||
"Path to the file to read (e.g. 'workflows/My Workflow/state.json' or 'workflows/Projects/Q1/My Workflow/state.json').",
|
||||
},
|
||||
},
|
||||
required: ['path'],
|
||||
@@ -2070,6 +2090,23 @@ export const TOOL_RUNTIME_SCHEMAS: Record<string, ToolRuntimeSchemaEntry> = {
|
||||
},
|
||||
resultSchema: undefined,
|
||||
},
|
||||
run: {
|
||||
parameters: {
|
||||
properties: {
|
||||
context: {
|
||||
description: 'Pre-gathered context: workflow state, block IDs, input requirements.',
|
||||
type: 'string',
|
||||
},
|
||||
request: {
|
||||
description: 'What to run or what logs to check.',
|
||||
type: 'string',
|
||||
},
|
||||
},
|
||||
required: ['request'],
|
||||
type: 'object',
|
||||
},
|
||||
resultSchema: undefined,
|
||||
},
|
||||
run_block: {
|
||||
parameters: {
|
||||
type: 'object',
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context'
|
||||
import { SIM_AGENT_API_URL } from '@/lib/copilot/constants'
|
||||
import {
|
||||
MothershipStreamV1EventType,
|
||||
@@ -16,8 +17,10 @@ import type {
|
||||
} from '@/lib/copilot/request/types'
|
||||
import { prepareExecutionContext } from '@/lib/copilot/tools/handlers/context'
|
||||
import { env } from '@/lib/core/config/env'
|
||||
import { isHosted } from '@/lib/core/config/feature-flags'
|
||||
import { generateId } from '@/lib/core/utils/uuid'
|
||||
import { getEffectiveDecryptedEnv } from '@/lib/environment/utils'
|
||||
import { getWorkflowById } from '@/lib/workflows/utils'
|
||||
|
||||
const logger = createLogger('CopilotSubagentOrchestrator')
|
||||
|
||||
@@ -49,10 +52,40 @@ export async function orchestrateSubagentStream(
|
||||
options: SubagentOrchestratorOptions
|
||||
): Promise<SubagentOrchestratorResult> {
|
||||
const { userId, workflowId, workspaceId, userPermission } = options
|
||||
const execContext = await buildExecutionContext(userId, workflowId, workspaceId)
|
||||
const chatId =
|
||||
(typeof requestPayload.chatId === 'string' && requestPayload.chatId) || generateId()
|
||||
const execContext = await buildExecutionContext(userId, workflowId, workspaceId, chatId)
|
||||
let resolvedWorkflowName =
|
||||
typeof requestPayload.workflowName === 'string' ? requestPayload.workflowName : undefined
|
||||
let resolvedWorkspaceId =
|
||||
execContext.workspaceId ||
|
||||
(typeof requestPayload.workspaceId === 'string' ? requestPayload.workspaceId : workspaceId)
|
||||
|
||||
if (workflowId && (!resolvedWorkflowName || !resolvedWorkspaceId)) {
|
||||
const workflow = await getWorkflowById(workflowId)
|
||||
resolvedWorkflowName ||= workflow?.name || undefined
|
||||
resolvedWorkspaceId ||= workflow?.workspaceId || undefined
|
||||
}
|
||||
|
||||
let resolvedWorkspaceContext =
|
||||
typeof requestPayload.workspaceContext === 'string'
|
||||
? requestPayload.workspaceContext
|
||||
: undefined
|
||||
if (!resolvedWorkspaceContext && resolvedWorkspaceId) {
|
||||
try {
|
||||
resolvedWorkspaceContext = await generateWorkspaceContext(resolvedWorkspaceId, userId)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to generate workspace context for subagent request', {
|
||||
agentId,
|
||||
workspaceId: resolvedWorkspaceId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const msgId = requestPayload?.messageId
|
||||
const context = createStreamingContext({
|
||||
chatId,
|
||||
messageId: typeof msgId === 'string' ? msgId : generateId(),
|
||||
})
|
||||
|
||||
@@ -69,8 +102,13 @@ export async function orchestrateSubagentStream(
|
||||
},
|
||||
body: JSON.stringify({
|
||||
...requestPayload,
|
||||
chatId,
|
||||
userId,
|
||||
stream: true,
|
||||
...(resolvedWorkflowName ? { workflowName: resolvedWorkflowName } : {}),
|
||||
...(resolvedWorkspaceId ? { workspaceId: resolvedWorkspaceId } : {}),
|
||||
...(resolvedWorkspaceContext ? { workspaceContext: resolvedWorkspaceContext } : {}),
|
||||
isHosted,
|
||||
...(userPermission ? { userPermission } : {}),
|
||||
}),
|
||||
},
|
||||
@@ -135,16 +173,18 @@ function normalizeStructuredResult(data: unknown): SubagentOrchestratorResult['s
|
||||
async function buildExecutionContext(
|
||||
userId: string,
|
||||
workflowId?: string,
|
||||
workspaceId?: string
|
||||
workspaceId?: string,
|
||||
chatId?: string
|
||||
): Promise<ExecutionContext> {
|
||||
if (workflowId) {
|
||||
return prepareExecutionContext(userId, workflowId)
|
||||
return prepareExecutionContext(userId, workflowId, chatId, { workspaceId })
|
||||
}
|
||||
const decryptedEnvVars = await getEffectiveDecryptedEnv(userId, workspaceId)
|
||||
return {
|
||||
userId,
|
||||
workflowId: workflowId || '',
|
||||
workspaceId,
|
||||
chatId,
|
||||
decryptedEnvVars,
|
||||
}
|
||||
}
|
||||
|
||||
@@ -284,16 +284,18 @@ CAN DO:
|
||||
- Configure block settings and connections
|
||||
- Set environment variables and workflow variables
|
||||
- Move, rename, delete workflows and folders
|
||||
- Run or inspect workflows through the nested run/debug specialists when validation is needed
|
||||
- Delegate deployment or auth setup to the nested specialists when needed
|
||||
|
||||
CANNOT DO:
|
||||
- Run or test workflows (use sim_test separately)
|
||||
- Deploy workflows (use sim_deploy separately)
|
||||
- Replace dedicated testing flows like sim_test when you want a standalone execution-only pass
|
||||
- Replace dedicated deploy flows like sim_deploy when you want deployment as a separate step
|
||||
|
||||
WORKFLOW:
|
||||
1. Call create_workflow to get a workflowId (for new workflows)
|
||||
2. Call sim_workflow with the request and workflowId
|
||||
3. Workflow agent gathers info and builds in one pass
|
||||
4. Call sim_test to verify it works
|
||||
3. Workflow agent gathers info, builds, and can delegate run/debug/auth/deploy help in one pass
|
||||
4. Call sim_test when you want a dedicated execution-only verification pass
|
||||
5. Optionally call sim_deploy to make it externally accessible`,
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
@@ -375,7 +377,7 @@ ALSO CAN:
|
||||
},
|
||||
{
|
||||
name: 'sim_test',
|
||||
agentId: 'test',
|
||||
agentId: 'run',
|
||||
description: `Run a workflow and verify its outputs. Works on both deployed and undeployed (draft) workflows. Use after building to verify correctness.
|
||||
|
||||
Supports full and partial execution:
|
||||
@@ -476,7 +478,7 @@ Supports full and partial execution:
|
||||
name: 'sim_info',
|
||||
agentId: 'info',
|
||||
description:
|
||||
"Inspect a workflow's blocks, connections, outputs, variables, and metadata. Use for questions about the Sim platform itself — how blocks work, what integrations are available, platform concepts, etc. Always provide workflowId to scope results to a specific workflow.",
|
||||
"Inspect a workflow's blocks, connections, outputs, variables, and metadata. Use for questions about the Sim platform itself — how blocks work, what integrations are available, platform concepts, etc. Provide workflowId when you want results scoped to a specific workflow.",
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
@@ -488,22 +490,6 @@ Supports full and partial execution:
|
||||
},
|
||||
annotations: { readOnlyHint: true },
|
||||
},
|
||||
{
|
||||
name: 'sim_workflow',
|
||||
agentId: 'workflow',
|
||||
description:
|
||||
'Manage workflow-level configuration: environment variables, settings, scheduling, and deployment status. Use for any data about a specific workflow — its settings, credentials, variables, or deployment state.',
|
||||
inputSchema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
request: { type: 'string' },
|
||||
workflowId: { type: 'string' },
|
||||
context: { type: 'object' },
|
||||
},
|
||||
required: ['request'],
|
||||
},
|
||||
annotations: { destructiveHint: false },
|
||||
},
|
||||
{
|
||||
name: 'sim_research',
|
||||
agentId: 'research',
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
import { getEnv } from '@/lib/core/config/env'
|
||||
import { isProd } from '@/lib/core/config/feature-flags'
|
||||
|
||||
/** Canonical base URL for the public-facing marketing site. No trailing slash. */
|
||||
export const SITE_URL = 'https://www.sim.ai'
|
||||
|
||||
function hasHttpProtocol(url: string): boolean {
|
||||
return /^https?:\/\//i.test(url)
|
||||
}
|
||||
|
||||
@@ -555,7 +555,7 @@ export class LoggingSession {
|
||||
models: {},
|
||||
}
|
||||
|
||||
const message = error?.message || 'Execution failed before starting blocks'
|
||||
const message = error?.message || 'Run failed before starting blocks'
|
||||
|
||||
const errorSpan: TraceSpan = {
|
||||
id: 'workflow-error-root',
|
||||
@@ -994,7 +994,7 @@ export class LoggingSession {
|
||||
traceSpans: params?.traceSpans,
|
||||
endedAt: params?.endedAt,
|
||||
totalDurationMs: params?.totalDurationMs,
|
||||
errorMessage: 'Execution was cancelled',
|
||||
errorMessage: 'Run was cancelled',
|
||||
isError: false,
|
||||
finalizationPath: 'cancelled',
|
||||
finalOutput: { cancelled: true },
|
||||
@@ -1021,7 +1021,7 @@ export class LoggingSession {
|
||||
traceSpans: params?.traceSpans,
|
||||
endedAt: params?.endedAt,
|
||||
totalDurationMs: params?.totalDurationMs,
|
||||
errorMessage: 'Execution paused but failed to store full trace spans',
|
||||
errorMessage: 'Run paused but failed to store full trace spans',
|
||||
isError: false,
|
||||
finalizationPath: 'paused',
|
||||
finalOutput: { paused: true },
|
||||
@@ -1041,7 +1041,7 @@ export class LoggingSession {
|
||||
requestId?: string
|
||||
): Promise<void> {
|
||||
try {
|
||||
const message = errorMessage || 'Execution failed'
|
||||
const message = errorMessage || 'Run failed'
|
||||
await db
|
||||
.update(workflowExecutionLogs)
|
||||
.set({
|
||||
|
||||
@@ -57,25 +57,25 @@ export const FILTER_DEFINITIONS: FilterDefinition[] = [
|
||||
{
|
||||
key: 'cost',
|
||||
label: 'Cost',
|
||||
description: 'Filter by execution cost',
|
||||
description: 'Filter by run cost',
|
||||
options: [
|
||||
{
|
||||
value: '>0.01',
|
||||
label: 'Over 2 credits',
|
||||
description: 'Executions costing more than 2 credits',
|
||||
description: 'Runs costing more than 2 credits',
|
||||
},
|
||||
{
|
||||
value: '<0.005',
|
||||
label: 'Under 1 credit',
|
||||
description: 'Executions costing less than 1 credit',
|
||||
description: 'Runs costing less than 1 credit',
|
||||
},
|
||||
{
|
||||
value: '>0.05',
|
||||
label: 'Over 10 credits',
|
||||
description: 'Executions costing more than 10 credits',
|
||||
description: 'Runs costing more than 10 credits',
|
||||
},
|
||||
{ value: '=0', label: 'Free', description: 'Free executions' },
|
||||
{ value: '>0', label: 'Paid', description: 'Executions with cost' },
|
||||
{ value: '=0', label: 'Free', description: 'Free runs' },
|
||||
{ value: '>0', label: 'Paid', description: 'Runs with cost' },
|
||||
],
|
||||
},
|
||||
{
|
||||
@@ -104,13 +104,13 @@ export const FILTER_DEFINITIONS: FilterDefinition[] = [
|
||||
{
|
||||
key: 'duration',
|
||||
label: 'Duration',
|
||||
description: 'Filter by execution duration',
|
||||
description: 'Filter by run duration',
|
||||
options: [
|
||||
{ value: '>5s', label: 'Over 5s', description: 'Executions longer than 5 seconds' },
|
||||
{ value: '<1s', label: 'Under 1s', description: 'Executions shorter than 1 second' },
|
||||
{ value: '>10s', label: 'Over 10s', description: 'Executions longer than 10 seconds' },
|
||||
{ value: '>30s', label: 'Over 30s', description: 'Executions longer than 30 seconds' },
|
||||
{ value: '<500ms', label: 'Under 0.5s', description: 'Very fast executions' },
|
||||
{ value: '>5s', label: 'Over 5s', description: 'Runs longer than 5 seconds' },
|
||||
{ value: '<1s', label: 'Under 1s', description: 'Runs shorter than 1 second' },
|
||||
{ value: '>10s', label: 'Over 10s', description: 'Runs longer than 10 seconds' },
|
||||
{ value: '>30s', label: 'Over 30s', description: 'Runs longer than 30 seconds' },
|
||||
{ value: '<500ms', label: 'Under 0.5s', description: 'Very fast runs' },
|
||||
],
|
||||
},
|
||||
]
|
||||
@@ -225,8 +225,8 @@ export class SearchSuggestions {
|
||||
suggestions.push({
|
||||
id: 'filter-key-executionId',
|
||||
value: 'executionId:',
|
||||
label: 'Execution ID',
|
||||
description: 'Filter by execution ID',
|
||||
label: 'Run ID',
|
||||
description: 'Filter by run ID',
|
||||
category: 'filters',
|
||||
})
|
||||
|
||||
@@ -283,7 +283,7 @@ export class SearchSuggestions {
|
||||
id: `filter-value-trigger-${t.value}`,
|
||||
value: `trigger:${t.value}`,
|
||||
label: t.label,
|
||||
description: `${t.label}-triggered executions`,
|
||||
description: `${t.label}-triggered runs`,
|
||||
category: 'trigger' as const,
|
||||
color: t.color,
|
||||
}))
|
||||
@@ -604,7 +604,7 @@ export class SearchSuggestions {
|
||||
id: `trigger-match-${trigger.value}`,
|
||||
value: `trigger:${trigger.value}`,
|
||||
label: trigger.label,
|
||||
description: `${trigger.label}-triggered executions`,
|
||||
description: `${trigger.label}-triggered runs`,
|
||||
category: 'trigger' as const,
|
||||
color: trigger.color,
|
||||
}))
|
||||
|
||||
@@ -17,6 +17,7 @@ export const SELECTOR_CONTEXT_FIELDS = new Set<keyof SelectorContext>([
|
||||
'siteId',
|
||||
'collectionId',
|
||||
'spreadsheetId',
|
||||
'driveId',
|
||||
'fileId',
|
||||
'baseId',
|
||||
'datasetId',
|
||||
|
||||
@@ -149,6 +149,15 @@ const nextConfig: NextConfig = {
|
||||
],
|
||||
async headers() {
|
||||
return [
|
||||
{
|
||||
source: '/:all*(svg|jpg|jpeg|png|gif|ico|webp|avif|woff|woff2|ttf|eot)',
|
||||
headers: [
|
||||
{
|
||||
key: 'Cache-Control',
|
||||
value: 'public, max-age=86400, stale-while-revalidate=604800',
|
||||
},
|
||||
],
|
||||
},
|
||||
{
|
||||
source: '/.well-known/:path*',
|
||||
headers: [
|
||||
@@ -386,12 +395,12 @@ const nextConfig: NextConfig = {
|
||||
redirects.push(
|
||||
{
|
||||
source: '/building/:path*',
|
||||
destination: 'https://sim.ai/blog/:path*',
|
||||
destination: 'https://www.sim.ai/blog/:path*',
|
||||
permanent: true,
|
||||
},
|
||||
{
|
||||
source: '/studio/:path*',
|
||||
destination: 'https://sim.ai/blog/:path*',
|
||||
destination: 'https://www.sim.ai/blog/:path*',
|
||||
permanent: true,
|
||||
}
|
||||
)
|
||||
|
||||
|
Before Width: | Height: | Size: 180 KiB After Width: | Height: | Size: 81 KiB |
BIN
apps/sim/public/blog/mothership/cover.jpg
Normal file
|
After Width: | Height: | Size: 100 KiB |
|
Before Width: | Height: | Size: 1.1 MiB |
|
Before Width: | Height: | Size: 106 KiB After Width: | Height: | Size: 32 KiB |
|
Before Width: | Height: | Size: 120 KiB After Width: | Height: | Size: 27 KiB |
|
Before Width: | Height: | Size: 275 KiB After Width: | Height: | Size: 67 KiB |
|
Before Width: | Height: | Size: 765 KiB After Width: | Height: | Size: 19 KiB |
@@ -6,6 +6,7 @@ import type {
|
||||
MicrosoftExcelV2ToolParams,
|
||||
} from '@/tools/microsoft_excel/types'
|
||||
import {
|
||||
getItemBasePath,
|
||||
getSpreadsheetWebUrl,
|
||||
trimTrailingEmptyRowsAndColumns,
|
||||
} from '@/tools/microsoft_excel/utils'
|
||||
@@ -35,6 +36,13 @@ export const readTool: ToolConfig<MicrosoftExcelToolParams, MicrosoftExcelReadRe
|
||||
visibility: 'user-or-llm',
|
||||
description: 'The ID of the spreadsheet/workbook to read from (e.g., "01ABC123DEF456")',
|
||||
},
|
||||
driveId: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description:
|
||||
'The ID of the drive containing the spreadsheet. Required for SharePoint files. If omitted, uses personal OneDrive.',
|
||||
},
|
||||
range: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
@@ -51,18 +59,17 @@ export const readTool: ToolConfig<MicrosoftExcelToolParams, MicrosoftExcelReadRe
|
||||
throw new Error('Spreadsheet ID is required')
|
||||
}
|
||||
|
||||
const basePath = getItemBasePath(spreadsheetId, params.driveId)
|
||||
|
||||
if (!params.range) {
|
||||
// When no range is provided, first fetch the first worksheet name (to avoid hardcoding "Sheet1")
|
||||
// We'll read its default range after in transformResponse
|
||||
return `https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}/workbook/worksheets?$select=name&$orderby=position&$top=1`
|
||||
return `${basePath}/workbook/worksheets?$select=name&$orderby=position&$top=1`
|
||||
}
|
||||
|
||||
const rangeInput = params.range.trim()
|
||||
|
||||
// If the input contains no '!', treat it as a sheet name only and fetch usedRange
|
||||
if (!rangeInput.includes('!')) {
|
||||
const sheetOnly = encodeURIComponent(rangeInput)
|
||||
return `https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}/workbook/worksheets('${sheetOnly}')/usedRange(valuesOnly=true)`
|
||||
return `${basePath}/workbook/worksheets('${sheetOnly}')/usedRange(valuesOnly=true)`
|
||||
}
|
||||
|
||||
const match = rangeInput.match(/^([^!]+)!(.+)$/)
|
||||
@@ -76,7 +83,7 @@ export const readTool: ToolConfig<MicrosoftExcelToolParams, MicrosoftExcelReadRe
|
||||
const sheetName = encodeURIComponent(match[1])
|
||||
const address = encodeURIComponent(match[2])
|
||||
|
||||
return `https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}/workbook/worksheets('${sheetName}')/range(address='${address}')`
|
||||
return `${basePath}/workbook/worksheets('${sheetName}')/range(address='${address}')`
|
||||
},
|
||||
method: 'GET',
|
||||
headers: (params) => {
|
||||
@@ -91,6 +98,9 @@ export const readTool: ToolConfig<MicrosoftExcelToolParams, MicrosoftExcelReadRe
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response, params?: MicrosoftExcelToolParams) => {
|
||||
const spreadsheetId = params?.spreadsheetId?.trim() || ''
|
||||
const driveId = params?.driveId
|
||||
|
||||
// If we came from the worksheets listing (no range provided), resolve first sheet name then fetch range
|
||||
if (response.url.includes('/workbook/worksheets?')) {
|
||||
const listData = await response.json()
|
||||
@@ -100,23 +110,19 @@ export const readTool: ToolConfig<MicrosoftExcelToolParams, MicrosoftExcelReadRe
|
||||
throw new Error('No worksheets found in the Excel workbook')
|
||||
}
|
||||
|
||||
const spreadsheetIdFromUrl = response.url.split('/drive/items/')[1]?.split('/')[0] || ''
|
||||
const accessToken = params?.accessToken
|
||||
if (!accessToken) {
|
||||
throw new Error('Access token is required to read Excel range')
|
||||
}
|
||||
|
||||
// Use usedRange(valuesOnly=true) to fetch only populated cells, avoiding thousands of empty rows
|
||||
const rangeUrl = `https://graph.microsoft.com/v1.0/me/drive/items/${encodeURIComponent(
|
||||
spreadsheetIdFromUrl
|
||||
)}/workbook/worksheets('${encodeURIComponent(firstSheetName)}')/usedRange(valuesOnly=true)`
|
||||
const basePath = getItemBasePath(spreadsheetId, driveId)
|
||||
const rangeUrl = `${basePath}/workbook/worksheets('${encodeURIComponent(firstSheetName)}')/usedRange(valuesOnly=true)`
|
||||
|
||||
const rangeResp = await fetch(rangeUrl, {
|
||||
headers: { Authorization: `Bearer ${accessToken}` },
|
||||
})
|
||||
|
||||
if (!rangeResp.ok) {
|
||||
// Normalize Microsoft Graph sheet/range errors to a friendly message
|
||||
throw new Error(
|
||||
'Invalid range provided or worksheet not found. Provide a range like "Sheet1!A1:B2" or just the sheet name to read the whole sheet'
|
||||
)
|
||||
@@ -124,20 +130,12 @@ export const readTool: ToolConfig<MicrosoftExcelToolParams, MicrosoftExcelReadRe
|
||||
|
||||
const data = await rangeResp.json()
|
||||
|
||||
// usedRange returns an address (A1 notation) and values matrix
|
||||
const address: string = data.address || data.addressLocal || `${firstSheetName}!A1`
|
||||
const rawValues: ExcelCellValue[][] = data.values || []
|
||||
|
||||
const values = trimTrailingEmptyRowsAndColumns(rawValues)
|
||||
|
||||
// Fetch the browser-accessible web URL
|
||||
const webUrl = await getSpreadsheetWebUrl(spreadsheetIdFromUrl, accessToken)
|
||||
|
||||
const metadata = {
|
||||
spreadsheetId: spreadsheetIdFromUrl,
|
||||
properties: {},
|
||||
spreadsheetUrl: webUrl,
|
||||
}
|
||||
const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken, driveId)
|
||||
|
||||
const result: MicrosoftExcelReadResponse = {
|
||||
success: true,
|
||||
@@ -147,8 +145,8 @@ export const readTool: ToolConfig<MicrosoftExcelToolParams, MicrosoftExcelReadRe
|
||||
values,
|
||||
},
|
||||
metadata: {
|
||||
spreadsheetId: metadata.spreadsheetId,
|
||||
spreadsheetUrl: metadata.spreadsheetUrl,
|
||||
spreadsheetId,
|
||||
spreadsheetUrl: webUrl,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -159,21 +157,11 @@ export const readTool: ToolConfig<MicrosoftExcelToolParams, MicrosoftExcelReadRe
|
||||
// Normal path: caller supplied a range; just return the parsed result
|
||||
const data = await response.json()
|
||||
|
||||
const urlParts = response.url.split('/drive/items/')
|
||||
const spreadsheetId = urlParts[1]?.split('/')[0] || ''
|
||||
|
||||
// Fetch the browser-accessible web URL
|
||||
const accessToken = params?.accessToken
|
||||
if (!accessToken) {
|
||||
throw new Error('Access token is required')
|
||||
}
|
||||
const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken)
|
||||
|
||||
const metadata = {
|
||||
spreadsheetId,
|
||||
properties: {},
|
||||
spreadsheetUrl: webUrl,
|
||||
}
|
||||
const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken, driveId)
|
||||
|
||||
const address: string = data.address || data.addressLocal || data.range || ''
|
||||
const rawValues: ExcelCellValue[][] = data.values || []
|
||||
@@ -187,8 +175,8 @@ export const readTool: ToolConfig<MicrosoftExcelToolParams, MicrosoftExcelReadRe
|
||||
values,
|
||||
},
|
||||
metadata: {
|
||||
spreadsheetId: metadata.spreadsheetId,
|
||||
spreadsheetUrl: metadata.spreadsheetUrl,
|
||||
spreadsheetId,
|
||||
spreadsheetUrl: webUrl,
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -240,6 +228,13 @@ export const readV2Tool: ToolConfig<MicrosoftExcelV2ToolParams, MicrosoftExcelV2
|
||||
visibility: 'user-or-llm',
|
||||
description: 'The ID of the spreadsheet/workbook to read from (e.g., "01ABC123DEF456")',
|
||||
},
|
||||
driveId: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description:
|
||||
'The ID of the drive containing the spreadsheet. Required for SharePoint files. If omitted, uses personal OneDrive.',
|
||||
},
|
||||
sheetName: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
@@ -267,17 +262,17 @@ export const readV2Tool: ToolConfig<MicrosoftExcelV2ToolParams, MicrosoftExcelV2
|
||||
throw new Error('Sheet name is required')
|
||||
}
|
||||
|
||||
const basePath = getItemBasePath(spreadsheetId, params.driveId)
|
||||
const encodedSheetName = encodeURIComponent(sheetName)
|
||||
|
||||
// If no cell range specified, fetch usedRange
|
||||
if (!params.cellRange) {
|
||||
return `https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}/workbook/worksheets('${encodedSheetName}')/usedRange(valuesOnly=true)`
|
||||
return `${basePath}/workbook/worksheets('${encodedSheetName}')/usedRange(valuesOnly=true)`
|
||||
}
|
||||
|
||||
const cellRange = params.cellRange.trim()
|
||||
const encodedAddress = encodeURIComponent(cellRange)
|
||||
|
||||
return `https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}/workbook/worksheets('${encodedSheetName}')/range(address='${encodedAddress}')`
|
||||
return `${basePath}/workbook/worksheets('${encodedSheetName}')/range(address='${encodedAddress}')`
|
||||
},
|
||||
method: 'GET',
|
||||
headers: (params) => {
|
||||
@@ -294,20 +289,19 @@ export const readV2Tool: ToolConfig<MicrosoftExcelV2ToolParams, MicrosoftExcelV2
|
||||
transformResponse: async (response: Response, params?: MicrosoftExcelV2ToolParams) => {
|
||||
const data = await response.json()
|
||||
|
||||
const urlParts = response.url.split('/drive/items/')
|
||||
const spreadsheetId = urlParts[1]?.split('/')[0] || ''
|
||||
const spreadsheetId = params?.spreadsheetId?.trim() || ''
|
||||
const driveId = params?.driveId
|
||||
|
||||
const accessToken = params?.accessToken
|
||||
if (!accessToken) {
|
||||
throw new Error('Access token is required')
|
||||
}
|
||||
const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken)
|
||||
const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken, driveId)
|
||||
|
||||
const address: string = data.address || data.addressLocal || ''
|
||||
const rawValues: ExcelCellValue[][] = data.values || []
|
||||
const values = trimTrailingEmptyRowsAndColumns(rawValues)
|
||||
|
||||
// Extract sheet name from address (format: SheetName!A1:B2)
|
||||
const sheetName = params?.sheetName || address.split('!')[0] || ''
|
||||
|
||||
return {
|
||||
|
||||
@@ -2,7 +2,7 @@ import type {
|
||||
MicrosoftExcelTableAddResponse,
|
||||
MicrosoftExcelTableToolParams,
|
||||
} from '@/tools/microsoft_excel/types'
|
||||
import { getSpreadsheetWebUrl } from '@/tools/microsoft_excel/utils'
|
||||
import { getItemBasePath, getSpreadsheetWebUrl } from '@/tools/microsoft_excel/utils'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const tableAddTool: ToolConfig<
|
||||
@@ -33,6 +33,13 @@ export const tableAddTool: ToolConfig<
|
||||
description:
|
||||
'The ID of the spreadsheet/workbook containing the table (e.g., "01ABC123DEF456")',
|
||||
},
|
||||
driveId: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description:
|
||||
'The ID of the drive containing the spreadsheet. Required for SharePoint files. If omitted, uses personal OneDrive.',
|
||||
},
|
||||
tableName: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
@@ -51,7 +58,8 @@ export const tableAddTool: ToolConfig<
|
||||
request: {
|
||||
url: (params) => {
|
||||
const tableName = encodeURIComponent(params.tableName)
|
||||
return `https://graph.microsoft.com/v1.0/me/drive/items/${params.spreadsheetId}/workbook/tables('${tableName}')/rows/add`
|
||||
const basePath = getItemBasePath(params.spreadsheetId, params.driveId)
|
||||
return `${basePath}/workbook/tables('${tableName}')/rows/add`
|
||||
},
|
||||
method: 'POST',
|
||||
headers: (params) => ({
|
||||
@@ -106,34 +114,26 @@ export const tableAddTool: ToolConfig<
|
||||
transformResponse: async (response: Response, params?: MicrosoftExcelTableToolParams) => {
|
||||
const data = await response.json()
|
||||
|
||||
const urlParts = response.url.split('/drive/items/')
|
||||
const spreadsheetId = urlParts[1]?.split('/')[0] || ''
|
||||
const spreadsheetId = params?.spreadsheetId?.trim() || ''
|
||||
const driveId = params?.driveId
|
||||
|
||||
// Fetch the browser-accessible web URL
|
||||
const accessToken = params?.accessToken
|
||||
if (!accessToken) {
|
||||
throw new Error('Access token is required')
|
||||
}
|
||||
const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken)
|
||||
const webUrl = await getSpreadsheetWebUrl(spreadsheetId, accessToken, driveId)
|
||||
|
||||
const metadata = {
|
||||
spreadsheetId,
|
||||
spreadsheetUrl: webUrl,
|
||||
}
|
||||
|
||||
const result = {
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
index: data.index || 0,
|
||||
values: data.values || [],
|
||||
metadata: {
|
||||
spreadsheetId: metadata.spreadsheetId,
|
||||
spreadsheetUrl: metadata.spreadsheetUrl,
|
||||
spreadsheetId,
|
||||
spreadsheetUrl: webUrl,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
return result
|
||||
},
|
||||
|
||||
outputs: {
|
||||
|
||||