mirror of
https://github.com/simstudioai/sim.git
synced 2026-04-28 03:00:29 -04:00
Compare commits
127 Commits
fix/log-so
...
v0.6.45
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
010435c53b | ||
|
|
cd8c5bd0b8 | ||
|
|
f0285adc38 | ||
|
|
a39dc158cf | ||
|
|
05c1c5b1f6 | ||
|
|
5274efd8f9 | ||
|
|
0b36c8bcb6 | ||
|
|
842aa2c254 | ||
|
|
46ffc4904e | ||
|
|
ff71a07e8f | ||
|
|
22d4639f13 | ||
|
|
80095788fc | ||
|
|
61b33e5978 | ||
|
|
29fbad2874 | ||
|
|
e281ca0dac | ||
|
|
cbf0a139ed | ||
|
|
751eeaccd4 | ||
|
|
1bf2d95813 | ||
|
|
3a1b1a8032 | ||
|
|
3d6660ba4d | ||
|
|
48e174b21f | ||
|
|
7529a75ac0 | ||
|
|
6b2e83bf58 | ||
|
|
fc07922536 | ||
|
|
367415f649 | ||
|
|
ff2e369c20 | ||
|
|
64cdab24f7 | ||
|
|
3838b6e892 | ||
|
|
a51333aa2f | ||
|
|
0ac05397eb | ||
|
|
8a8bc1b0e6 | ||
|
|
48d5101151 | ||
|
|
9c1b0bc15f | ||
|
|
0e6ada4bdb | ||
|
|
85fda999b5 | ||
|
|
a4da8beb20 | ||
|
|
bd9dcf1ec0 | ||
|
|
6ce299bb23 | ||
|
|
c75d7b9ddc | ||
|
|
c71ae49da0 | ||
|
|
d6dc9f73cd | ||
|
|
6587afb97e | ||
|
|
e23557fdfe | ||
|
|
0abcc6e813 | ||
|
|
d238052fe8 | ||
|
|
c0db9de07b | ||
|
|
7491d70a67 | ||
|
|
4375f9921a | ||
|
|
fb4fb9e869 | ||
|
|
5ab85c6930 | ||
|
|
eba48e815f | ||
|
|
cd7e413607 | ||
|
|
cfe55914c9 | ||
|
|
e3d0e74cc4 | ||
|
|
ffda34442b | ||
|
|
cd3e24b79b | ||
|
|
6d2deb1b33 | ||
|
|
10341ae4a5 | ||
|
|
8b57476957 | ||
|
|
6ef40c5b21 | ||
|
|
4309d0619a | ||
|
|
85f1d96859 | ||
|
|
bc31710c1c | ||
|
|
30c5e82ab0 | ||
|
|
6a4f5f2074 | ||
|
|
74d0a47525 | ||
|
|
c8525852d4 | ||
|
|
20cc0185bf | ||
|
|
cbfab1ceaa | ||
|
|
1acafe8763 | ||
|
|
c1d788ce94 | ||
|
|
bad78ccb59 | ||
|
|
8bbca9ba05 | ||
|
|
34f77e00bc | ||
|
|
fb5ebd3bed | ||
|
|
2e85361ed6 | ||
|
|
59de6bbb43 | ||
|
|
2b9fb19899 | ||
|
|
266bc2141d | ||
|
|
6099683e5a | ||
|
|
4f40c4ce3e | ||
|
|
3efbd1d612 | ||
|
|
04c1f8e475 | ||
|
|
476669fd55 | ||
|
|
4074109362 | ||
|
|
171485d3b6 | ||
|
|
d33acf426d | ||
|
|
bce638dd75 | ||
|
|
05b5588a7b | ||
|
|
32bdf3cfa5 | ||
|
|
12deb0f5b4 | ||
|
|
3c8bb4076c | ||
|
|
c393791f04 | ||
|
|
fc3e762b1f | ||
|
|
70f04c003b | ||
|
|
7bd271ae5b | ||
|
|
8e222fa369 | ||
|
|
b67c068817 | ||
|
|
d778b3d35b | ||
|
|
dc7d876a34 | ||
|
|
f8f3758649 | ||
|
|
db230785d3 | ||
|
|
9fbe514dbd | ||
|
|
139213ef45 | ||
|
|
a8468a6056 | ||
|
|
3e85218142 | ||
|
|
c5cc336847 | ||
|
|
5f33432dc2 | ||
|
|
c83349200c | ||
|
|
1856635927 | ||
|
|
91ce55e547 | ||
|
|
694f4a5895 | ||
|
|
cf233bb497 | ||
|
|
4700590e64 | ||
|
|
1189400167 | ||
|
|
621aa65b91 | ||
|
|
c21876ab40 | ||
|
|
a1173ee712 | ||
|
|
579d240cee | ||
|
|
d7da35ba0b | ||
|
|
d6ec115348 | ||
|
|
3f508e445f | ||
|
|
316bc8cdcc | ||
|
|
d889f32697 | ||
|
|
28af223a9f | ||
|
|
a54dcbe949 | ||
|
|
0b9019d9a2 |
@@ -14,6 +14,20 @@ When the user asks you to create a block:
|
||||
2. Configure all subBlocks with proper types, conditions, and dependencies
|
||||
3. Wire up tools correctly
|
||||
|
||||
## Hard Rule: No Guessed Tool Outputs
|
||||
|
||||
Blocks depend on tool outputs. If the underlying tool response schema is not documented or live-verified, you MUST tell the user instead of guessing block outputs.
|
||||
|
||||
- Do NOT invent block outputs for undocumented tool responses
|
||||
- Do NOT describe unknown JSON shapes as if they were confirmed
|
||||
- Do NOT wire fields into the block just because they seem likely to exist
|
||||
|
||||
If the tool outputs are not known, do one of these instead:
|
||||
1. Ask the user for sample tool responses
|
||||
2. Ask the user for test credentials so the tool responses can be verified
|
||||
3. Limit the block to operations whose outputs are documented
|
||||
4. Leave uncertain outputs out and explicitly tell the user what remains unknown
|
||||
|
||||
## Block Configuration Structure
|
||||
|
||||
```typescript
|
||||
@@ -575,6 +589,8 @@ Use `type: 'json'` with a descriptive string when:
|
||||
- It represents a list/array of items
|
||||
- The shape varies by operation
|
||||
|
||||
If the output shape is unknown because the underlying tool response is undocumented, you MUST tell the user and stop. Unknown is not the same as variable. Never guess block outputs.
|
||||
|
||||
## V2 Block Pattern
|
||||
|
||||
When creating V2 blocks (alongside legacy V1):
|
||||
@@ -829,3 +845,4 @@ After creating the block, you MUST validate it against every tool it references:
|
||||
- Type coercions in `tools.config.params` for any params that need conversion (Number(), Boolean(), JSON.parse())
|
||||
3. **Verify block outputs** cover the key fields returned by all tools
|
||||
4. **Verify conditions** — each subBlock should only show for the operations that actually use it
|
||||
5. **If any tool outputs are still unknown**, explicitly tell the user instead of guessing block outputs
|
||||
|
||||
@@ -15,6 +15,21 @@ When the user asks you to create a connector:
|
||||
3. Create the connector directory and config
|
||||
4. Register it in the connector registry
|
||||
|
||||
## Hard Rule: No Guessed Response Or Document Schemas
|
||||
|
||||
If the service docs do not clearly show the document list response, document fetch response, pagination shape, or metadata fields, you MUST tell the user instead of guessing.
|
||||
|
||||
- Do NOT invent document fields
|
||||
- Do NOT guess pagination cursors or next-page fields
|
||||
- Do NOT infer metadata/tag mappings from unrelated endpoints
|
||||
- Do NOT fabricate `ExternalDocument` content structure from partial docs
|
||||
|
||||
If the source schema is unknown, do one of these instead:
|
||||
1. Ask the user for sample API responses
|
||||
2. Ask the user for test credentials so you can verify live payloads
|
||||
3. Implement only the documented parts of the connector
|
||||
4. Leave the connector incomplete and explicitly say which fields remain unknown
|
||||
|
||||
## Directory Structure
|
||||
|
||||
Create files in `apps/sim/connectors/{service}/`:
|
||||
@@ -92,6 +107,8 @@ export const {service}Connector: ConnectorConfig = {
|
||||
}
|
||||
```
|
||||
|
||||
Only map fields in `listDocuments`, `getDocument`, `validateConfig`, and `mapTags` when the source payload shape is documented or live-verified. If not, tell the user and stop rather than guessing.
|
||||
|
||||
### API key connector example
|
||||
|
||||
```typescript
|
||||
|
||||
@@ -29,6 +29,21 @@ Before writing any code:
|
||||
- Required vs optional parameters
|
||||
- Response structures
|
||||
|
||||
### Hard Rule: No Guessed Response Schemas
|
||||
|
||||
If the official docs do not clearly show the response JSON shape for an endpoint, you MUST stop and tell the user exactly which outputs are unknown.
|
||||
|
||||
- Do NOT guess response field names
|
||||
- Do NOT infer nested JSON paths from related endpoints
|
||||
- Do NOT invent output properties just because they seem likely
|
||||
- Do NOT implement `transformResponse` against unverified payload shapes
|
||||
|
||||
If response schemas are missing or incomplete, do one of the following before proceeding:
|
||||
1. Ask the user for sample responses
|
||||
2. Ask the user for test credentials so you can verify the live payload
|
||||
3. Reduce the scope to only endpoints whose response shapes are documented
|
||||
4. Leave the tool unimplemented and explicitly report why
|
||||
|
||||
## Step 2: Create Tools
|
||||
|
||||
### Directory Structure
|
||||
@@ -103,6 +118,7 @@ export const {service}{Action}Tool: ToolConfig<Params, Response> = {
|
||||
- Set `optional: true` for outputs that may not exist
|
||||
- Never output raw JSON dumps - extract meaningful fields
|
||||
- When using `type: 'json'` and you know the object shape, define `properties` with the inner fields so downstream consumers know the structure. Only use bare `type: 'json'` when the shape is truly dynamic
|
||||
- If you do not know the response JSON shape from docs or verified examples, you MUST tell the user and stop. Never guess outputs or response mappings.
|
||||
|
||||
## Step 3: Create Block
|
||||
|
||||
@@ -450,6 +466,8 @@ If creating V2 versions (API-aligned outputs):
|
||||
- [ ] Verified block subBlocks cover all required tool params with correct conditions
|
||||
- [ ] Verified block outputs match what the tools actually return
|
||||
- [ ] Verified `tools.config.params` correctly maps and coerces all param types
|
||||
- [ ] Verified every tool output and `transformResponse` path against documented or live-verified JSON responses
|
||||
- [ ] If any response schema remained unknown, explicitly told the user instead of guessing
|
||||
|
||||
## Example Command
|
||||
|
||||
|
||||
@@ -14,6 +14,21 @@ When the user asks you to create tools for a service:
|
||||
2. Create the tools directory structure
|
||||
3. Generate properly typed tool configurations
|
||||
|
||||
## Hard Rule: No Guessed Response Schemas
|
||||
|
||||
If the docs do not clearly show the response JSON for a tool, you MUST tell the user exactly which outputs are unknown and stop short of guessing.
|
||||
|
||||
- Do NOT invent response field names
|
||||
- Do NOT infer nested paths from nearby endpoints
|
||||
- Do NOT guess array item shapes
|
||||
- Do NOT write `transformResponse` against unverified payloads
|
||||
|
||||
If the response shape is unknown, do one of these instead:
|
||||
1. Ask the user for sample responses
|
||||
2. Ask the user for test credentials so you can verify live responses
|
||||
3. Implement only the endpoints whose outputs are documented
|
||||
4. Leave the tool unimplemented and explicitly say why
|
||||
|
||||
## Directory Structure
|
||||
|
||||
Create files in `apps/sim/tools/{service}/`:
|
||||
@@ -187,6 +202,8 @@ items: {
|
||||
|
||||
Only use bare `type: 'json'` without `properties` when the shape is truly dynamic or unknown.
|
||||
|
||||
If the response shape is unknown because the docs do not provide it, you MUST tell the user and stop. Unknown is not the same as dynamic. Never guess outputs.
|
||||
|
||||
## Critical Rules for transformResponse
|
||||
|
||||
### Handle Nullable Fields
|
||||
@@ -441,7 +458,9 @@ After creating all tools, you MUST validate every tool before finishing:
|
||||
- All output fields match what the API actually returns
|
||||
- No fields are missing from outputs that the API provides
|
||||
- No extra fields are defined in outputs that the API doesn't return
|
||||
- Every output field and JSON path is backed by docs or live-verified sample responses
|
||||
3. **Verify consistency** across tools:
|
||||
- Shared types in `types.ts` match all tools that use them
|
||||
- Tool IDs in the barrel export match the tool file definitions
|
||||
- Error handling is consistent (error checks, meaningful messages)
|
||||
4. **If any response schema is still unknown**, explicitly tell the user instead of guessing
|
||||
|
||||
@@ -14,6 +14,21 @@ You are an expert at creating webhook triggers for Sim. You understand the trigg
|
||||
3. Create a provider handler if custom auth, formatting, or subscriptions are needed
|
||||
4. Register triggers and connect them to the block
|
||||
|
||||
## Hard Rule: No Guessed Webhook Payload Schemas
|
||||
|
||||
If the service docs do not clearly show the webhook payload JSON for an event, you MUST tell the user instead of guessing trigger outputs or `formatInput` mappings.
|
||||
|
||||
- Do NOT invent payload field names
|
||||
- Do NOT guess nested event object paths
|
||||
- Do NOT infer output fields from the UI or marketing docs
|
||||
- Do NOT write `formatInput` against unverified webhook bodies
|
||||
|
||||
If the payload shape is unknown, do one of these instead:
|
||||
1. Ask the user for sample webhook payloads
|
||||
2. Ask the user for a test webhook source so you can inspect a real event
|
||||
3. Implement only the event registration/setup portions whose payloads are documented
|
||||
4. Leave the trigger unimplemented and explicitly say which payload fields are unknown
|
||||
|
||||
## Directory Structure
|
||||
|
||||
```
|
||||
|
||||
25
.agents/skills/cleanup/SKILL.md
Normal file
25
.agents/skills/cleanup/SKILL.md
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
name: cleanup
|
||||
description: Run all code quality skills in sequence — effects, memo, callbacks, state, React Query, and emcn design review
|
||||
---
|
||||
|
||||
# Cleanup
|
||||
|
||||
Arguments:
|
||||
- scope: what to review (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## Steps
|
||||
|
||||
Run each of these skills in order on the specified scope, passing through the scope and fix arguments. After each skill completes, move to the next. Do not skip any.
|
||||
|
||||
1. `/you-might-not-need-an-effect $ARGUMENTS`
|
||||
2. `/you-might-not-need-a-memo $ARGUMENTS`
|
||||
3. `/you-might-not-need-a-callback $ARGUMENTS`
|
||||
4. `/you-might-not-need-state $ARGUMENTS`
|
||||
5. `/react-query-best-practices $ARGUMENTS`
|
||||
6. `/emcn-design-review $ARGUMENTS`
|
||||
|
||||
After all skills have run, output a summary of what was found and fixed (or proposed) across all six passes.
|
||||
335
.agents/skills/emcn-design-review/SKILL.md
Normal file
335
.agents/skills/emcn-design-review/SKILL.md
Normal file
@@ -0,0 +1,335 @@
|
||||
---
|
||||
name: emcn-design-review
|
||||
description: Review UI code for alignment with the emcn design system — components, tokens, patterns, and conventions
|
||||
---
|
||||
|
||||
# EMCN Design Review
|
||||
|
||||
Arguments:
|
||||
- scope: what to review (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## Context
|
||||
|
||||
This codebase uses **emcn**, a custom component library built on Radix UI primitives with CVA (class-variance-authority) variants and CSS variable design tokens. All UI must use emcn components and tokens — never raw HTML elements or hardcoded colors.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the emcn barrel export at `apps/sim/components/emcn/components/index.ts` to know what's available
|
||||
2. Read `apps/sim/app/_styles/globals.css` for the full set of CSS variable tokens
|
||||
3. Analyze the specified scope against every rule below
|
||||
4. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
|
||||
---
|
||||
|
||||
## Imports
|
||||
|
||||
- Import components from `@/components/emcn`, never from subpaths
|
||||
- Import icons from `@/components/emcn/icons` or `lucide-react`
|
||||
- Import `cn` from `@/lib/core/utils/cn` for conditional class merging
|
||||
- Import app-specific wrappers (Select, VerifiedBadge) from `@/components/ui`
|
||||
|
||||
```tsx
|
||||
// Good
|
||||
import { Button, Modal, Badge } from '@/components/emcn'
|
||||
// Bad
|
||||
import { Button } from '@/components/emcn/components/button/button'
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Design Tokens (CSS Variables)
|
||||
|
||||
Never use raw color values. Always use CSS variable tokens via Tailwind arbitrary values: `text-[var(--text-primary)]`, not `text-gray-500` or `#333`. The CSS variable pattern is canonical (1,700+ uses) — do not use Tailwind semantic classes like `text-muted-foreground`.
|
||||
|
||||
### Text hierarchy
|
||||
| Token | Use |
|
||||
|-------|-----|
|
||||
| `text-[var(--text-primary)]` | Main content text |
|
||||
| `text-[var(--text-secondary)]` | Secondary/supporting text |
|
||||
| `text-[var(--text-tertiary)]` | Tertiary text |
|
||||
| `text-[var(--text-muted)]` | Disabled, placeholder text |
|
||||
| `text-[var(--text-icon)]` | Icon tinting |
|
||||
| `text-[var(--text-inverse)]` | Text on dark backgrounds |
|
||||
| `text-[var(--text-error)]` | Error/warning messages |
|
||||
|
||||
### Surfaces (elevation)
|
||||
| Token | Use |
|
||||
|-------|-----|
|
||||
| `bg-[var(--bg)]` | Page background |
|
||||
| `bg-[var(--surface-2)]` through `bg-[var(--surface-7)]` | Increasing elevation |
|
||||
| `bg-[var(--surface-hover)]` | Hover state backgrounds |
|
||||
| `bg-[var(--surface-active)]` | Active/selected backgrounds |
|
||||
|
||||
### Borders
|
||||
| Token | Use |
|
||||
|-------|-----|
|
||||
| `border-[var(--border)]` | Default borders |
|
||||
| `border-[var(--border-1)]` | Stronger borders (inputs, cards) |
|
||||
| `border-[var(--border-muted)]` | Subtle dividers |
|
||||
|
||||
### Status
|
||||
| Token | Use |
|
||||
|-------|-----|
|
||||
| `--success` | Success states |
|
||||
| `--error` | Error states |
|
||||
| `--caution` | Warning states |
|
||||
|
||||
### Brand
|
||||
| Token | Use |
|
||||
|-------|-----|
|
||||
| `--brand-secondary` | Brand color |
|
||||
| `--brand-accent` | Accent/CTA color |
|
||||
|
||||
### Shadows
|
||||
Use shadow tokens, never raw box-shadow values:
|
||||
- `shadow-subtle`, `shadow-medium`, `shadow-overlay`
|
||||
- `shadow-kbd`, `shadow-card`
|
||||
|
||||
### Z-Index
|
||||
Use z-index tokens for layering:
|
||||
- `z-[var(--z-dropdown)]` (100), `z-[var(--z-modal)]` (200), `z-[var(--z-popover)]` (300), `z-[var(--z-tooltip)]` (400), `z-[var(--z-toast)]` (500)
|
||||
|
||||
---
|
||||
|
||||
## Component Usage Rules
|
||||
|
||||
### Buttons
|
||||
Available variants: `default`, `primary`, `destructive`, `ghost`, `outline`, `active`, `secondary`, `tertiary`, `subtle`, `ghost-secondary`, `3d`
|
||||
|
||||
| Action type | Variant | Frequency |
|
||||
|-------------|---------|-----------|
|
||||
| Toolbar, icon-only, utility actions | `ghost` | Most common (28%) |
|
||||
| Primary action (create, save, submit) | `primary` | Very common (24%) |
|
||||
| Cancel, close, secondary action | `default` | Common |
|
||||
| Delete, remove, destructive action | `destructive` | Targeted use only |
|
||||
| Active/selected state | `active` | Targeted use only |
|
||||
| Toggle, mode switch | `outline` | Moderate |
|
||||
|
||||
Sizes: `sm` (compact, 32% of buttons) or `md` (default, used when no size specified). Never create custom button styles — use an existing variant.
|
||||
|
||||
Buttons without an explicit variant prop get `default` styling. This is acceptable for cancel/secondary actions.
|
||||
|
||||
### Modals (Dialogs)
|
||||
Use `Modal` + subcomponents. Never build custom dialog overlays.
|
||||
|
||||
```tsx
|
||||
<Modal open={open} onOpenChange={setOpen}>
|
||||
<ModalContent size="sm">
|
||||
<ModalHeader>Title</ModalHeader>
|
||||
<ModalBody>Content</ModalBody>
|
||||
<ModalFooter>
|
||||
<Button variant="default" onClick={() => setOpen(false)}>Cancel</Button>
|
||||
<Button variant="primary" onClick={handleSubmit}>Save</Button>
|
||||
</ModalFooter>
|
||||
</ModalContent>
|
||||
</Modal>
|
||||
```
|
||||
|
||||
Modal sizes by frequency: `sm` (440px, most common — confirmations and simple dialogs), `md` (500px, forms), `lg` (600px, content-heavy), `xl` (800px, rare), `full` (1200px, rare).
|
||||
|
||||
Footer buttons: Cancel on left (`variant="default"`), primary action on right. This pattern is followed 100% across the codebase.
|
||||
|
||||
### Delete/Remove Confirmations
|
||||
Always use Modal with `size="sm"`. The established pattern:
|
||||
|
||||
```tsx
|
||||
<Modal open={open} onOpenChange={setOpen}>
|
||||
<ModalContent size="sm">
|
||||
<ModalHeader>Delete {itemType}</ModalHeader>
|
||||
<ModalBody>
|
||||
<p>Description of consequences</p>
|
||||
<p className="text-[var(--text-error)]">Warning about irreversibility</p>
|
||||
</ModalBody>
|
||||
<ModalFooter>
|
||||
<Button variant="default" onClick={() => setOpen(false)}>Cancel</Button>
|
||||
<Button variant="destructive" onClick={handleDelete} disabled={isDeleting}>
|
||||
Delete
|
||||
</Button>
|
||||
</ModalFooter>
|
||||
</ModalContent>
|
||||
</Modal>
|
||||
```
|
||||
|
||||
Rules:
|
||||
- Title: "Delete {ItemType}" or "Remove {ItemType}" (use "Remove" for membership/association changes)
|
||||
- Include consequence description
|
||||
- Use `text-[var(--text-error)]` for warning text when the action is irreversible
|
||||
- `variant="destructive"` for the action button (100% compliance)
|
||||
- `variant="default"` for cancel (100% compliance)
|
||||
- Cancel left, destructive right (100% compliance)
|
||||
- For high-risk deletes (workspaces), require typing the name to confirm
|
||||
- Include recovery info if soft-delete: "You can restore it from Recently Deleted in Settings"
|
||||
|
||||
### Toast Notifications
|
||||
Use the imperative `toast` API from `@/components/emcn`. Never build custom notification UI.
|
||||
|
||||
```tsx
|
||||
import { toast } from '@/components/emcn'
|
||||
|
||||
toast.success('Item saved')
|
||||
toast.error('Something went wrong')
|
||||
toast.success('Deleted', { action: { label: 'Undo', onClick: handleUndo } })
|
||||
```
|
||||
|
||||
Variants: `default`, `success`, `error`. Auto-dismiss after 5s. Supports optional action buttons with callbacks.
|
||||
|
||||
### Badges
|
||||
Use semantic color variants for status:
|
||||
|
||||
| Status | Variant | Usage |
|
||||
|--------|---------|-------|
|
||||
| Error, failed, disconnected | `red` | Most common (15 uses) |
|
||||
| Metadata, roles, auth types, scopes | `gray-secondary` | Very common (12 uses) |
|
||||
| Type annotations (TS types, field types) | `type` | Very common (12 uses) |
|
||||
| Success, active, enabled, running | `green` | Common (7 uses) |
|
||||
| Neutral, default, unknown | `gray` | Common (6 uses) |
|
||||
| Outline, parameters, public | `outline` | Moderate (6 uses) |
|
||||
| Warning, processing | `amber` | Moderate (5 uses) |
|
||||
| Paused, warning | `orange` | Occasional |
|
||||
| Info, queued | `blue` | Occasional |
|
||||
| Data types (arrays) | `purple` | Occasional |
|
||||
| Generic with border | `default` | Occasional |
|
||||
|
||||
Use `dot` prop for status indicators (19 instances in codebase). `icon` prop is available but rarely used.
|
||||
|
||||
### Tooltips
|
||||
Use `Tooltip` from emcn with namespace pattern:
|
||||
|
||||
```tsx
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger asChild>
|
||||
<Button variant="ghost">{icon}</Button>
|
||||
</Tooltip.Trigger>
|
||||
<Tooltip.Content>Helpful text</Tooltip.Content>
|
||||
</Tooltip.Root>
|
||||
```
|
||||
|
||||
Use tooltips for icon-only buttons and truncated text. Don't tooltip self-explanatory elements.
|
||||
|
||||
### Popovers
|
||||
Use for filters, option menus, and nested navigation:
|
||||
|
||||
```tsx
|
||||
<Popover open={open} onOpenChange={setOpen} size="sm">
|
||||
<PopoverTrigger asChild>
|
||||
<Button variant="ghost">Trigger</Button>
|
||||
</PopoverTrigger>
|
||||
<PopoverContent side="bottom" align="end" minWidth={160}>
|
||||
<PopoverSection>Section Title</PopoverSection>
|
||||
<PopoverItem active={isActive} onClick={handleClick}>
|
||||
Item Label
|
||||
</PopoverItem>
|
||||
<PopoverDivider />
|
||||
</PopoverContent>
|
||||
</Popover>
|
||||
```
|
||||
|
||||
### Dropdown Menus
|
||||
Use for context menus and action menus:
|
||||
|
||||
```tsx
|
||||
<DropdownMenu>
|
||||
<DropdownMenuTrigger asChild>
|
||||
<Button variant="ghost">
|
||||
<MoreHorizontal className="h-[14px] w-[14px]" />
|
||||
</Button>
|
||||
</DropdownMenuTrigger>
|
||||
<DropdownMenuContent align="end">
|
||||
<DropdownMenuItem onClick={handleEdit}>Edit</DropdownMenuItem>
|
||||
<DropdownMenuSeparator />
|
||||
<DropdownMenuItem onClick={handleDelete} className="text-[var(--text-error)]">
|
||||
Delete
|
||||
</DropdownMenuItem>
|
||||
</DropdownMenuContent>
|
||||
</DropdownMenu>
|
||||
```
|
||||
|
||||
Destructive items go last, after a separator, in error color.
|
||||
|
||||
### Forms
|
||||
Use `FormField` wrapper for labeled inputs:
|
||||
|
||||
```tsx
|
||||
<FormField label="Name" htmlFor="name" error={errors.name} optional>
|
||||
<Input id="name" value={name} onChange={e => setName(e.target.value)} />
|
||||
</FormField>
|
||||
```
|
||||
|
||||
Rules:
|
||||
- Use `Input` from emcn, never raw `<input>` (exception: hidden file inputs)
|
||||
- Use `Textarea` from emcn, never raw `<textarea>`
|
||||
- Use `FormField` for label + input + error layout
|
||||
- Mark optional fields with `optional` prop
|
||||
- Show errors inline below the input
|
||||
- Use `Combobox` for searchable selects
|
||||
- Use `TagInput` for multi-value inputs
|
||||
|
||||
### Loading States
|
||||
Use `Skeleton` for content placeholders:
|
||||
|
||||
```tsx
|
||||
<Skeleton className="h-5 w-[200px] rounded-md" />
|
||||
```
|
||||
|
||||
Rules:
|
||||
- Mirror the actual UI structure with skeletons
|
||||
- Match exact dimensions of the final content
|
||||
- Use `rounded-md` to match component radius
|
||||
- Stack multiple skeletons for lists
|
||||
|
||||
### Icons
|
||||
Standard sizing — `h-[14px] w-[14px]` is the dominant pattern (400+ uses):
|
||||
|
||||
```tsx
|
||||
<Icon className="h-[14px] w-[14px] text-[var(--text-icon)]" />
|
||||
```
|
||||
|
||||
Size scale by frequency:
|
||||
1. `h-[14px] w-[14px]` — default for inline icons (most common)
|
||||
2. `h-[16px] w-[16px]` — slightly larger inline icons
|
||||
3. `h-3 w-3` (12px) — compact/tight spaces
|
||||
4. `h-4 w-4` (16px) — Tailwind equivalent, also common
|
||||
5. `h-3.5 w-3.5` (14px) — Tailwind equivalent of 14px
|
||||
6. `h-5 w-5` (20px) — larger icons, section headers
|
||||
|
||||
Use `text-[var(--text-icon)]` for icon color (113+ uses in codebase).
|
||||
|
||||
---
|
||||
|
||||
## Styling Rules
|
||||
|
||||
1. **Use `cn()` for conditional classes**: `cn('base', condition && 'conditional')` — never template literal concatenation like `` `base ${condition ? 'active' : ''}` ``
|
||||
2. **Inline styles**: Avoid. Exception: dynamic values that can't be expressed as Tailwind classes (e.g., `style={{ width: dynamicVar }}` or CSS variable references). Never use inline styles for colors or static values.
|
||||
3. **Never hardcode colors**: Use CSS variable tokens. Never `text-gray-500`, `bg-red-100`, `#fff`, or `rgb()`. Always `text-[var(--text-*)]`, `bg-[var(--surface-*)]`, etc.
|
||||
4. **Never use Tailwind semantic color classes**: Use `text-[var(--text-muted)]` not `text-muted-foreground`. The CSS variable pattern is canonical.
|
||||
5. **Never use global styles**: Keep all styling local to components
|
||||
6. **Hover states**: Use `hover-hover:` pseudo-class for hover-capable devices
|
||||
7. **Transitions**: Use `transition-colors` for color changes, `transition-colors duration-100` for fast hover
|
||||
8. **Border radius**: `rounded-lg` (large cards), `rounded-md` (medium), `rounded-sm` (small), `rounded-xs` (tiny)
|
||||
9. **Typography**: Use semantic sizes — `text-small` (13px), `text-caption` (12px), `text-xs` (11px), `text-micro` (10px)
|
||||
10. **Font weight**: Use `font-medium` for emphasis, avoid `font-bold` unless for headings
|
||||
11. **Spacing**: Use Tailwind gap/padding utilities. Common patterns: `gap-2`, `gap-3`, `px-4 py-2.5`
|
||||
|
||||
---
|
||||
|
||||
## Anti-patterns to flag
|
||||
|
||||
- Raw HTML `<button>` instead of Button component (exception: inside Radix primitives)
|
||||
- Raw HTML `<input>` instead of Input component (exception: hidden file inputs, read-only checkboxes in markdown)
|
||||
- Hardcoded Tailwind default colors (`text-gray-*`, `bg-red-*`, `text-blue-*`)
|
||||
- Hex values in className (`bg-[#fff]`, `text-[#333]`)
|
||||
- Tailwind semantic classes (`text-muted-foreground`) instead of CSS variables (`text-[var(--text-muted)]`)
|
||||
- Custom modal/dialog implementations instead of `Modal`
|
||||
- Custom toast/notification implementations instead of `toast`
|
||||
- Inline styles for colors or static values (dynamic values are acceptable)
|
||||
- Template literal className concatenation instead of `cn()`
|
||||
- Wrong button variant for the action type
|
||||
- Missing loading/skeleton states
|
||||
- Missing error states on forms
|
||||
- Importing from emcn subpaths instead of barrel export
|
||||
- Using arbitrary z-index (`z-50`, `z-[9999]`) instead of z-index tokens
|
||||
- Custom shadows instead of shadow tokens
|
||||
- Icon sizes that don't follow the established scale (default to `h-[14px] w-[14px]`)
|
||||
54
.agents/skills/react-query-best-practices/SKILL.md
Normal file
54
.agents/skills/react-query-best-practices/SKILL.md
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
name: react-query-best-practices
|
||||
description: Audit React Query usage for best practices — key factories, staleTime, mutations, and server state ownership
|
||||
---
|
||||
|
||||
# React Query Best Practices
|
||||
|
||||
Arguments:
|
||||
- scope: what to analyze (default: your current changes). Examples: "diff to main", "PR #123", "src/hooks/queries/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## Context
|
||||
|
||||
This codebase uses React Query (TanStack Query) as the single source of truth for all server state. All query hooks live in `hooks/queries/`. Zustand is used only for client-only UI state. Server data must never be duplicated into useState or Zustand outside of mutation callbacks that coordinate cross-store state.
|
||||
|
||||
## References
|
||||
|
||||
Read these before analyzing:
|
||||
1. https://tkdodo.eu/blog/practical-react-query — foundational defaults, custom hooks, avoiding local state copies
|
||||
2. https://tkdodo.eu/blog/effective-react-query-keys — key factory pattern, hierarchical keys, fuzzy invalidation
|
||||
3. https://tkdodo.eu/blog/react-query-as-a-state-manager — React Query IS your server state manager
|
||||
|
||||
## Rules to enforce
|
||||
|
||||
### Query key factories
|
||||
- Every file in `hooks/queries/` must have a hierarchical key factory with an `all` root key
|
||||
- Keys must include intermediate plural keys (`lists`, `details`) for prefix invalidation
|
||||
- Key factories are colocated with their query hooks, not in a global keys file
|
||||
|
||||
### Query hooks
|
||||
- Every `queryFn` must forward `signal` for request cancellation
|
||||
- Every query must have an explicit `staleTime` (default 0 is almost never correct)
|
||||
- `keepPreviousData` / `placeholderData` only on variable-key queries (where params change), never on static keys
|
||||
- Use `enabled` to prevent queries from running without required params
|
||||
|
||||
### Mutations
|
||||
- Use `onSettled` (not `onSuccess`) for cache reconciliation — it fires on both success and error
|
||||
- For optimistic updates: save previous data in `onMutate`, roll back in `onError`
|
||||
- Use targeted invalidation (`entityKeys.lists()`) not broad (`entityKeys.all`) when possible
|
||||
- Don't include mutation objects in `useCallback` deps — `.mutate()` is stable
|
||||
|
||||
### Server state ownership
|
||||
- Never copy query data into useState. Use query data directly in components.
|
||||
- Never copy query data into Zustand stores (exception: mutation callbacks that coordinate cross-store state like temp ID replacement)
|
||||
- The query cache is not a local state manager — `setQueryData` is for optimistic updates only
|
||||
- Forms are the one deliberate exception: copy server data into local form state with `staleTime: Infinity`
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the references above to understand the guidelines
|
||||
2. Analyze the specified scope against the rules listed above
|
||||
3. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
@@ -52,6 +52,20 @@ Fetch the official API docs for the service. This is the **source of truth** for
|
||||
|
||||
Use Context7 (resolve-library-id → query-docs) or WebFetch to retrieve documentation. If both fail, note which claims are based on training knowledge vs verified docs.
|
||||
|
||||
### Hard Rule: No Guessed Source Schemas
|
||||
|
||||
If the service docs do not clearly show document list responses, document fetch responses, metadata fields, or pagination shapes, you MUST tell the user instead of guessing.
|
||||
|
||||
- Do NOT infer document fields from unrelated endpoints
|
||||
- Do NOT guess pagination cursors or response wrappers
|
||||
- Do NOT assume metadata keys that are not documented
|
||||
- Do NOT treat probable shapes as validated
|
||||
|
||||
If a schema is unknown, validation must explicitly recommend:
|
||||
1. sample API responses,
|
||||
2. live test credentials, or
|
||||
3. trimming the connector to only documented fields.
|
||||
|
||||
## Step 3: Validate API Endpoints
|
||||
|
||||
For **every** API call in the connector (`listDocuments`, `getDocument`, `validateConfig`, and any helper functions), verify against the API docs:
|
||||
@@ -93,6 +107,7 @@ For **every** API call in the connector (`listDocuments`, `getDocument`, `valida
|
||||
- [ ] Field names extracted match what the API actually returns
|
||||
- [ ] Nullable fields are handled with `?? null` or `|| undefined`
|
||||
- [ ] Error responses are checked before accessing data fields
|
||||
- [ ] Every extracted field and pagination value is backed by official docs or live-verified sample payloads
|
||||
|
||||
## Step 4: Validate OAuth Scopes (if OAuth connector)
|
||||
|
||||
@@ -304,6 +319,7 @@ After fixing, confirm:
|
||||
1. `bun run lint` passes
|
||||
2. TypeScript compiles clean
|
||||
3. Re-read all modified files to verify fixes are correct
|
||||
4. Any remaining unknown source schemas were explicitly reported to the user instead of guessed
|
||||
|
||||
## Checklist Summary
|
||||
|
||||
|
||||
@@ -41,6 +41,20 @@ Fetch the official API docs for the service. This is the **source of truth** for
|
||||
- Pagination patterns (which param name, which response field)
|
||||
- Rate limits and error formats
|
||||
|
||||
### Hard Rule: No Guessed Response Schemas
|
||||
|
||||
If the official docs do not clearly show the response JSON shape for an endpoint, you MUST tell the user instead of guessing.
|
||||
|
||||
- Do NOT assume field names from nearby endpoints
|
||||
- Do NOT infer nested JSON paths without evidence
|
||||
- Do NOT treat "likely" fields as confirmed outputs
|
||||
- Do NOT accept implementation guesses as valid just because they are defensive
|
||||
|
||||
If a response schema is unknown, the validation must explicitly call that out and require:
|
||||
1. sample responses from the user,
|
||||
2. live test credentials for verification, or
|
||||
3. trimming the tool/block down to only documented fields.
|
||||
|
||||
## Step 3: Validate Tools
|
||||
|
||||
For **every** tool file, check:
|
||||
@@ -81,6 +95,7 @@ For **every** tool file, check:
|
||||
- [ ] All optional arrays use `?? []`
|
||||
- [ ] Error cases are handled: checks for missing/empty data and returns meaningful error
|
||||
- [ ] Does NOT do raw JSON dumps — extracts meaningful, individual fields
|
||||
- [ ] Every extracted field is backed by official docs or live-verified sample payloads
|
||||
|
||||
### Outputs
|
||||
- [ ] All output fields match what the API actually returns
|
||||
@@ -267,6 +282,7 @@ After fixing, confirm:
|
||||
1. `bun run lint` passes with no fixes needed
|
||||
2. TypeScript compiles clean (no type errors)
|
||||
3. Re-read all modified files to verify fixes are correct
|
||||
4. Any remaining unknown response schemas were explicitly reported to the user instead of guessed
|
||||
|
||||
## Checklist Summary
|
||||
|
||||
|
||||
@@ -44,6 +44,20 @@ Fetch the service's official webhook documentation. This is the **source of trut
|
||||
- Webhook subscription API (create/delete endpoints, if applicable)
|
||||
- Retry behavior and delivery guarantees
|
||||
|
||||
### Hard Rule: No Guessed Webhook Payload Schemas
|
||||
|
||||
If the official docs do not clearly show the webhook payload JSON for an event, you MUST tell the user instead of guessing.
|
||||
|
||||
- Do NOT invent payload field names
|
||||
- Do NOT infer nested payload paths without evidence
|
||||
- Do NOT treat likely event shapes as verified
|
||||
- Do NOT accept `formatInput` mappings that are not backed by docs or live payloads
|
||||
|
||||
If a payload schema is unknown, validation must explicitly recommend:
|
||||
1. sample webhook payloads,
|
||||
2. a live test webhook source, or
|
||||
3. trimming the trigger to only documented outputs.
|
||||
|
||||
## Step 3: Validate Trigger Definitions
|
||||
|
||||
### utils.ts
|
||||
@@ -93,6 +107,7 @@ Fetch the service's official webhook documentation. This is the **source of trut
|
||||
- [ ] Nested output paths exist at the correct depth (e.g., `resource.id` actually has `resource: { id: ... }`)
|
||||
- [ ] `null` is used for missing optional fields (not empty strings or empty objects)
|
||||
- [ ] Returns `{ input: { ... } }` — not a bare object
|
||||
- [ ] Every mapped payload field is backed by official docs or live-verified webhook payloads
|
||||
|
||||
### Idempotency
|
||||
- [ ] `extractIdempotencyId` returns a stable, unique key per delivery
|
||||
@@ -195,6 +210,7 @@ After fixing, confirm:
|
||||
1. `bun run type-check` passes
|
||||
2. Re-read all modified files to verify fixes are correct
|
||||
3. Provider handler tests pass (if they exist): `bun test {service}`
|
||||
4. Any remaining unknown webhook payload schemas were explicitly reported to the user instead of guessed
|
||||
|
||||
## Checklist Summary
|
||||
|
||||
|
||||
51
.agents/skills/you-might-not-need-a-callback/SKILL.md
Normal file
51
.agents/skills/you-might-not-need-a-callback/SKILL.md
Normal file
@@ -0,0 +1,51 @@
|
||||
---
|
||||
name: you-might-not-need-a-callback
|
||||
description: Analyze and fix useCallback anti-patterns in your code
|
||||
---
|
||||
|
||||
# You Might Not Need a Callback
|
||||
|
||||
Arguments:
|
||||
- scope: what to analyze (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## References
|
||||
|
||||
Read before analyzing:
|
||||
1. https://react.dev/reference/react/useCallback — official docs on when useCallback is actually needed
|
||||
|
||||
## When useCallback IS needed
|
||||
|
||||
- Passing a callback to a child wrapped in `React.memo` (to preserve referential equality)
|
||||
- The callback is a dependency of another hook (`useEffect`, `useMemo`)
|
||||
- The callback is used in a custom hook that documents referential stability requirements
|
||||
|
||||
## Anti-patterns to detect
|
||||
|
||||
1. **useCallback on functions not passed as props or deps**: If the function is only called within the same component and isn't in any dependency array, useCallback adds overhead for no benefit. Just declare the function normally.
|
||||
2. **useCallback with exhaustive deps that change every render**: If the dependency array includes values that change on every render, useCallback recalculates every time. The memoization is wasted. Either stabilize the deps (use refs) or remove the useCallback.
|
||||
3. **useCallback on event handlers passed to native elements**: `<button onClick={handleClick}>` — native elements don't benefit from stable references. Only child components wrapped in React.memo do.
|
||||
4. **useCallback wrapping a function that creates new objects/arrays**: If the callback returns `{ ...newObj }` or `[...newArr]`, memoizing the callback doesn't prevent the child from re-rendering due to new return values. The memoization is at the wrong level.
|
||||
5. **useCallback with an empty dep array when deps are needed**: Stale closures — the callback captures outdated values. Either add proper deps or use refs for values that shouldn't trigger re-creation.
|
||||
6. **Pairing useCallback with React.memo unnecessarily**: If the child component is cheap to render, neither useCallback nor React.memo adds value. Only optimize when you've measured a performance problem.
|
||||
7. **useCallback in custom hooks that don't need stable references**: Not every hook return needs to be memoized. Only stabilize callbacks when consumers depend on referential equality.
|
||||
|
||||
## Codebase-specific notes
|
||||
|
||||
This codebase uses a ref pattern for stable callbacks in hooks:
|
||||
```tsx
|
||||
const idRef = useRef(id)
|
||||
useEffect(() => { idRef.current = id }, [id])
|
||||
const fetchData = useCallback(async () => {
|
||||
// use idRef.current instead of id
|
||||
}, []) // empty deps because refs are used
|
||||
```
|
||||
This pattern is correct — don't flag it as an anti-pattern.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the reference above
|
||||
2. Analyze the specified scope for the anti-patterns listed above
|
||||
3. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
33
.agents/skills/you-might-not-need-a-memo/SKILL.md
Normal file
33
.agents/skills/you-might-not-need-a-memo/SKILL.md
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
name: you-might-not-need-a-memo
|
||||
description: Analyze and fix useMemo/React.memo anti-patterns in your code
|
||||
---
|
||||
|
||||
# You Might Not Need a Memo
|
||||
|
||||
Arguments:
|
||||
- scope: what to analyze (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## References
|
||||
|
||||
Read before analyzing:
|
||||
1. https://overreacted.io/before-you-memo/ — two techniques to avoid memo entirely
|
||||
|
||||
## Anti-patterns to detect
|
||||
|
||||
1. **Wrapping a slow component in React.memo when state can be moved down**: If a component re-renders because of state it doesn't use, move that state into a smaller child component instead of memoizing. The slow component stops re-rendering without memo.
|
||||
2. **Wrapping in React.memo when children can be lifted up**: If a parent owns state that changes frequently, extract the stateful part and pass the expensive subtree as `children`. Children passed as props don't re-render when the parent's state changes.
|
||||
3. **useMemo on cheap computations**: Filtering or mapping a small array, string concatenation, simple arithmetic — these don't need memoization. Only memoize when you've measured a performance problem.
|
||||
4. **useMemo with constantly-changing deps**: If the dependency array changes on every render, useMemo does nothing — it recalculates every time. Fix the deps or remove the memo.
|
||||
5. **useMemo to create objects/arrays passed as props**: Instead of memoizing to prevent child re-renders, consider whether the child even needs referential stability. If the child doesn't use React.memo or pass it to a dep array, the memo is wasted.
|
||||
6. **React.memo on components that always receive new props**: If the parent always passes new objects, arrays, or callbacks, React.memo's shallow comparison always fails. Fix the parent instead of memoizing the child.
|
||||
7. **useMemo for derived state**: If you're computing a value from props or state, just compute it inline during render. React renders are fast. `const fullName = first + ' ' + last` doesn't need useMemo.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the reference above to understand the two core techniques (move state down, lift content up)
|
||||
2. Analyze the specified scope for the anti-patterns listed above
|
||||
3. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
38
.agents/skills/you-might-not-need-state/SKILL.md
Normal file
38
.agents/skills/you-might-not-need-state/SKILL.md
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
name: you-might-not-need-state
|
||||
description: Analyze and fix unnecessary useState, derived state, and server-state-in-local-state anti-patterns
|
||||
---
|
||||
|
||||
# You Might Not Need State
|
||||
|
||||
Arguments:
|
||||
- scope: what to analyze (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## Context
|
||||
|
||||
This codebase uses React Query for all server state and Zustand for client-only global state. useState should only be used for ephemeral UI concerns (open/closed, hover, local form input). Server data should never be copied into useState or Zustand — React Query is the single source of truth.
|
||||
|
||||
## References
|
||||
|
||||
Read these before analyzing:
|
||||
1. https://react.dev/learn/choosing-the-state-structure — 5 principles for structuring state
|
||||
2. https://tkdodo.eu/blog/dont-over-use-state — never store derived/computed values in state
|
||||
3. https://tkdodo.eu/blog/putting-props-to-use-state — never mirror props into state via useEffect
|
||||
|
||||
## Anti-patterns to detect
|
||||
|
||||
1. **Derived state stored in useState**: If a value can be computed from props, other state, or query data, compute it inline during render instead of storing it in state.
|
||||
2. **Server state copied into useState**: Never `useState` + `useEffect` to sync React Query data into local state. Use query data directly. The only exception is forms where users edit server data.
|
||||
3. **Props mirrored into state**: Never `useState(prop)` + `useEffect(() => setState(prop))`. Use the prop directly, or use a key to reset component state.
|
||||
4. **Chained useEffect state updates**: Never chain Effects that set state to trigger other Effects. Calculate all derived values in the event handler or inline during render.
|
||||
5. **Storing objects when an ID suffices**: Store `selectedId` not a copy of the selected object. Derive the object: `items.find(i => i.id === selectedId)`.
|
||||
6. **State that duplicates Zustand or React Query**: If the data already lives in a store or query cache, don't create a parallel useState.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the references above to understand the guidelines
|
||||
2. Analyze the specified scope for the anti-patterns listed above
|
||||
3. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
@@ -1,17 +1,17 @@
|
||||
---
|
||||
description: Create webhook triggers for a Sim integration using the generic trigger builder
|
||||
description: Create webhook or polling triggers for a Sim integration
|
||||
argument-hint: <service-name>
|
||||
---
|
||||
|
||||
# Add Trigger
|
||||
|
||||
You are an expert at creating webhook triggers for Sim. You understand the trigger system, the generic `buildTriggerSubBlocks` helper, and how triggers connect to blocks.
|
||||
You are an expert at creating webhook and polling triggers for Sim. You understand the trigger system, the generic `buildTriggerSubBlocks` helper, polling infrastructure, and how triggers connect to blocks.
|
||||
|
||||
## Your Task
|
||||
|
||||
1. Research what webhook events the service supports
|
||||
2. Create the trigger files using the generic builder
|
||||
3. Create a provider handler if custom auth, formatting, or subscriptions are needed
|
||||
1. Research what webhook events the service supports — if the service lacks reliable webhooks, use polling
|
||||
2. Create the trigger files using the generic builder (webhook) or manual config (polling)
|
||||
3. Create a provider handler (webhook) or polling handler (polling)
|
||||
4. Register triggers and connect them to the block
|
||||
|
||||
## Directory Structure
|
||||
@@ -146,23 +146,37 @@ export const TRIGGER_REGISTRY: TriggerRegistry = {
|
||||
|
||||
### Block file (`apps/sim/blocks/blocks/{service}.ts`)
|
||||
|
||||
Wire triggers into the block so the trigger UI appears and `generate-docs.ts` discovers them. Two changes are needed:
|
||||
|
||||
1. **Spread trigger subBlocks** at the end of the block's `subBlocks` array
|
||||
2. **Add `triggers` property** after `outputs` with `enabled: true` and `available: [...]`
|
||||
|
||||
```typescript
|
||||
import { getTrigger } from '@/triggers'
|
||||
|
||||
export const {Service}Block: BlockConfig = {
|
||||
// ...
|
||||
triggers: {
|
||||
enabled: true,
|
||||
available: ['{service}_event_a', '{service}_event_b'],
|
||||
},
|
||||
subBlocks: [
|
||||
// Regular tool subBlocks first...
|
||||
...getTrigger('{service}_event_a').subBlocks,
|
||||
...getTrigger('{service}_event_b').subBlocks,
|
||||
],
|
||||
// ... tools, inputs, outputs ...
|
||||
triggers: {
|
||||
enabled: true,
|
||||
available: ['{service}_event_a', '{service}_event_b'],
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
**Versioned blocks (V1 + V2):** Many integrations have a hidden V1 block and a visible V2 block. Where you add the trigger wiring depends on how V2 inherits from V1:
|
||||
|
||||
- **V2 uses `...V1Block` spread** (e.g., Google Calendar): Add trigger to V1 — V2 inherits both `subBlocks` and `triggers` automatically.
|
||||
- **V2 defines its own `subBlocks`** (e.g., Google Sheets): Add trigger to V2 (the visible block). V1 is hidden and doesn't need it.
|
||||
- **Single block, no V2** (e.g., Google Drive): Add trigger directly.
|
||||
|
||||
`generate-docs.ts` deduplicates by base type (first match wins). If V1 is processed first without triggers, the V2 triggers won't appear in `integrations.json`. Always verify by checking the output after running the script.
|
||||
|
||||
## Provider Handler
|
||||
|
||||
All provider-specific webhook logic lives in a single handler file: `apps/sim/lib/webhooks/providers/{service}.ts`.
|
||||
@@ -327,6 +341,121 @@ export function buildOutputs(): Record<string, TriggerOutput> {
|
||||
}
|
||||
```
|
||||
|
||||
## Polling Triggers
|
||||
|
||||
Use polling when the service lacks reliable webhooks (e.g., Google Sheets, Google Drive, Google Calendar, Gmail, RSS, IMAP). Polling triggers do NOT use `buildTriggerSubBlocks` — they define subBlocks manually.
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```
|
||||
apps/sim/triggers/{service}/
|
||||
├── index.ts # Barrel export
|
||||
└── poller.ts # TriggerConfig with polling: true
|
||||
|
||||
apps/sim/lib/webhooks/polling/
|
||||
└── {service}.ts # PollingProviderHandler implementation
|
||||
```
|
||||
|
||||
### Polling Handler (`apps/sim/lib/webhooks/polling/{service}.ts`)
|
||||
|
||||
```typescript
|
||||
import { pollingIdempotency } from '@/lib/core/idempotency/service'
|
||||
import type { PollingProviderHandler, PollWebhookContext } from '@/lib/webhooks/polling/types'
|
||||
import { markWebhookFailed, markWebhookSuccess, resolveOAuthCredential, updateWebhookProviderConfig } from '@/lib/webhooks/polling/utils'
|
||||
import { processPolledWebhookEvent } from '@/lib/webhooks/processor'
|
||||
|
||||
export const {service}PollingHandler: PollingProviderHandler = {
|
||||
provider: '{service}',
|
||||
label: '{Service}',
|
||||
|
||||
async pollWebhook(ctx: PollWebhookContext): Promise<'success' | 'failure'> {
|
||||
const { webhookData, workflowData, requestId, logger } = ctx
|
||||
const webhookId = webhookData.id
|
||||
|
||||
try {
|
||||
// For OAuth services:
|
||||
const accessToken = await resolveOAuthCredential(webhookData, '{service}', requestId, logger)
|
||||
const config = webhookData.providerConfig as unknown as {Service}WebhookConfig
|
||||
|
||||
// First poll: seed state, emit nothing
|
||||
if (!config.lastCheckedTimestamp) {
|
||||
await updateWebhookProviderConfig(webhookId, { lastCheckedTimestamp: new Date().toISOString() }, logger)
|
||||
await markWebhookSuccess(webhookId, logger)
|
||||
return 'success'
|
||||
}
|
||||
|
||||
// Fetch changes since last poll, process with idempotency
|
||||
// ...
|
||||
|
||||
await markWebhookSuccess(webhookId, logger)
|
||||
return 'success'
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error processing {service} webhook ${webhookId}:`, error)
|
||||
await markWebhookFailed(webhookId, logger)
|
||||
return 'failure'
|
||||
}
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
**Key patterns:**
|
||||
- First poll seeds state and emits nothing (avoids flooding with existing data)
|
||||
- Use `pollingIdempotency.executeWithIdempotency(provider, key, callback)` for dedup
|
||||
- Use `processPolledWebhookEvent(webhookData, workflowData, payload, requestId)` to fire the workflow
|
||||
- Use `updateWebhookProviderConfig(webhookId, partialConfig, logger)` for read-merge-write on state
|
||||
- Use the latest server-side timestamp from API responses (not wall clock) to avoid clock skew
|
||||
|
||||
### Trigger Config (`apps/sim/triggers/{service}/poller.ts`)
|
||||
|
||||
```typescript
|
||||
import { {Service}Icon } from '@/components/icons'
|
||||
import type { TriggerConfig } from '@/triggers/types'
|
||||
|
||||
export const {service}PollingTrigger: TriggerConfig = {
|
||||
id: '{service}_poller',
|
||||
name: '{Service} Trigger',
|
||||
provider: '{service}',
|
||||
description: 'Triggers when ...',
|
||||
version: '1.0.0',
|
||||
icon: {Service}Icon,
|
||||
polling: true, // REQUIRED — routes to polling infrastructure
|
||||
|
||||
subBlocks: [
|
||||
{ id: 'triggerCredentials', type: 'oauth-input', title: 'Credentials', serviceId: '{service}', requiredScopes: [], required: true, mode: 'trigger', supportsCredentialSets: true },
|
||||
// ... service-specific config fields (dropdowns, inputs, switches) ...
|
||||
{ id: 'triggerInstructions', type: 'text', title: 'Setup Instructions', hideFromPreview: true, mode: 'trigger', defaultValue: '...' },
|
||||
],
|
||||
|
||||
outputs: {
|
||||
// Must match the payload shape from processPolledWebhookEvent
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### Registration (3 places)
|
||||
|
||||
1. **`apps/sim/triggers/constants.ts`** — add provider to `POLLING_PROVIDERS` Set
|
||||
2. **`apps/sim/lib/webhooks/polling/registry.ts`** — import handler, add to `POLLING_HANDLERS`
|
||||
3. **`apps/sim/triggers/registry.ts`** — import trigger config, add to `TRIGGER_REGISTRY`
|
||||
|
||||
### Helm Cron Job
|
||||
|
||||
Add to `helm/sim/values.yaml` under the existing polling cron jobs:
|
||||
|
||||
```yaml
|
||||
{service}WebhookPoll:
|
||||
schedule: "*/1 * * * *"
|
||||
concurrencyPolicy: Forbid
|
||||
url: "http://sim:3000/api/webhooks/poll/{service}"
|
||||
```
|
||||
|
||||
### Reference Implementations
|
||||
|
||||
- Simple: `apps/sim/lib/webhooks/polling/rss.ts` + `apps/sim/triggers/rss/poller.ts`
|
||||
- Complex (OAuth, attachments): `apps/sim/lib/webhooks/polling/gmail.ts` + `apps/sim/triggers/gmail/poller.ts`
|
||||
- Cursor-based (changes API): `apps/sim/lib/webhooks/polling/google-drive.ts`
|
||||
- Timestamp-based: `apps/sim/lib/webhooks/polling/google-calendar.ts`
|
||||
|
||||
## Checklist
|
||||
|
||||
### Trigger Definition
|
||||
@@ -352,7 +481,17 @@ export function buildOutputs(): Record<string, TriggerOutput> {
|
||||
- [ ] NO changes to `route.ts`, `provider-subscriptions.ts`, or `deploy.ts`
|
||||
- [ ] API key field uses `password: true`
|
||||
|
||||
### Polling Trigger (if applicable)
|
||||
- [ ] Handler implements `PollingProviderHandler` at `lib/webhooks/polling/{service}.ts`
|
||||
- [ ] Trigger config has `polling: true` and defines subBlocks manually (no `buildTriggerSubBlocks`)
|
||||
- [ ] Provider string matches across: trigger config, handler, `POLLING_PROVIDERS`, polling registry
|
||||
- [ ] First poll seeds state and emits nothing
|
||||
- [ ] Added provider to `POLLING_PROVIDERS` in `triggers/constants.ts`
|
||||
- [ ] Added handler to `POLLING_HANDLERS` in `lib/webhooks/polling/registry.ts`
|
||||
- [ ] Added cron job to `helm/sim/values.yaml`
|
||||
- [ ] Payload shape matches trigger `outputs` schema
|
||||
|
||||
### Testing
|
||||
- [ ] `bun run type-check` passes
|
||||
- [ ] Manually verify `formatInput` output keys match trigger `outputs` keys
|
||||
- [ ] Manually verify output keys match trigger `outputs` keys
|
||||
- [ ] Trigger UI shows correctly in the block
|
||||
|
||||
25
.claude/commands/cleanup.md
Normal file
25
.claude/commands/cleanup.md
Normal file
@@ -0,0 +1,25 @@
|
||||
---
|
||||
description: Run all code quality skills in sequence — effects, memo, callbacks, state, React Query, and emcn design review
|
||||
argument-hint: [scope] [fix=true|false]
|
||||
---
|
||||
|
||||
# Cleanup
|
||||
|
||||
Arguments:
|
||||
- scope: what to review (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## Steps
|
||||
|
||||
Run each of these skills in order on the specified scope, passing through the scope and fix arguments. After each skill completes, move to the next. Do not skip any.
|
||||
|
||||
1. `/you-might-not-need-an-effect $ARGUMENTS`
|
||||
2. `/you-might-not-need-a-memo $ARGUMENTS`
|
||||
3. `/you-might-not-need-a-callback $ARGUMENTS`
|
||||
4. `/you-might-not-need-state $ARGUMENTS`
|
||||
5. `/react-query-best-practices $ARGUMENTS`
|
||||
6. `/emcn-design-review $ARGUMENTS`
|
||||
|
||||
After all skills have run, output a summary of what was found and fixed (or proposed) across all six passes.
|
||||
79
.claude/commands/emcn-design-review.md
Normal file
79
.claude/commands/emcn-design-review.md
Normal file
@@ -0,0 +1,79 @@
|
||||
---
|
||||
description: Review UI code for alignment with the emcn design system — components, tokens, patterns, and conventions
|
||||
argument-hint: [scope] [fix=true|false]
|
||||
---
|
||||
|
||||
# EMCN Design Review
|
||||
|
||||
Arguments:
|
||||
- scope: what to review (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## Context
|
||||
|
||||
This codebase uses **emcn**, a custom component library built on Radix UI primitives with CVA variants and CSS variable design tokens. All UI must use emcn components and tokens.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the emcn barrel export at `apps/sim/components/emcn/components/index.ts` to know what's available
|
||||
2. Read `apps/sim/app/_styles/globals.css` for CSS variable tokens
|
||||
3. Analyze the specified scope against every rule below
|
||||
4. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
|
||||
---
|
||||
|
||||
## Imports
|
||||
|
||||
- Import from `@/components/emcn` barrel, never subpaths
|
||||
- Icons from `@/components/emcn/icons` or `lucide-react`
|
||||
- Use `cn` from `@/lib/core/utils/cn` for conditional classes
|
||||
|
||||
## Design Tokens
|
||||
|
||||
Use CSS variable pattern (`text-[var(--text-primary)]`), never Tailwind semantics (`text-muted-foreground`) or hardcoded colors (`text-gray-500`, `#333`).
|
||||
|
||||
**Text**: `--text-primary`, `--text-secondary`, `--text-tertiary`, `--text-muted`, `--text-icon`, `--text-inverse`, `--text-error`
|
||||
**Surfaces**: `--bg`, `--surface-2` through `--surface-7`, `--surface-hover`, `--surface-active`
|
||||
**Borders**: `--border`, `--border-1`, `--border-muted`
|
||||
**Z-Index**: `--z-dropdown` (100), `--z-modal` (200), `--z-popover` (300), `--z-tooltip` (400), `--z-toast` (500)
|
||||
**Shadows**: `shadow-subtle`, `shadow-medium`, `shadow-overlay`, `shadow-card`
|
||||
|
||||
## Buttons
|
||||
|
||||
| Action | Variant |
|
||||
|--------|---------|
|
||||
| Toolbar, icon-only | `ghost` (most common, 28%) |
|
||||
| Create, save, submit | `primary` (24%) |
|
||||
| Cancel, close | `default` |
|
||||
| Delete, remove | `destructive` |
|
||||
| Selected state | `active` |
|
||||
| Toggle | `outline` |
|
||||
|
||||
## Delete/Remove Confirmations
|
||||
|
||||
Modal `size="sm"`, title "Delete/Remove {ItemType}", `variant="destructive"` action button, `variant="default"` cancel. Cancel left, action right (100% compliance). Use `text-[var(--text-error)]` for irreversible warnings.
|
||||
|
||||
## Toast
|
||||
|
||||
`toast.success()`, `toast.error()`, `toast()` from `@/components/emcn`. Never custom notification UI.
|
||||
|
||||
## Badges
|
||||
|
||||
`red`=error/failed, `gray-secondary`=metadata/roles, `type`=type annotations, `green`=success/active, `gray`=neutral, `amber`=processing, `orange`=paused, `blue`=info. Use `dot` prop for status indicators.
|
||||
|
||||
## Icons
|
||||
|
||||
Default: `h-[14px] w-[14px]` (400+ uses). Color: `text-[var(--text-icon)]`. Scale: 14px > 16px > 12px > 20px.
|
||||
|
||||
## Anti-patterns to flag
|
||||
|
||||
- Raw `<button>`/`<input>` instead of emcn components
|
||||
- Hardcoded colors (`text-gray-*`, `#hex`, `rgb()`)
|
||||
- Tailwind semantics (`text-muted-foreground`) instead of CSS variables
|
||||
- Template literal className instead of `cn()`
|
||||
- Inline styles for colors/static values (dynamic values OK)
|
||||
- Importing from emcn subpaths instead of barrel
|
||||
- Arbitrary z-index instead of tokens
|
||||
- Wrong button variant for action type
|
||||
54
.claude/commands/react-query-best-practices.md
Normal file
54
.claude/commands/react-query-best-practices.md
Normal file
@@ -0,0 +1,54 @@
|
||||
---
|
||||
description: Audit React Query usage for best practices — key factories, staleTime, mutations, and server state ownership
|
||||
argument-hint: [scope] [fix=true|false]
|
||||
---
|
||||
|
||||
# React Query Best Practices
|
||||
|
||||
Arguments:
|
||||
- scope: what to analyze (default: your current changes). Examples: "diff to main", "PR #123", "src/hooks/queries/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## Context
|
||||
|
||||
This codebase uses React Query (TanStack Query) as the single source of truth for all server state. All query hooks live in `hooks/queries/`. Zustand is used only for client-only UI state. Server data must never be duplicated into useState or Zustand outside of mutation callbacks that coordinate cross-store state.
|
||||
|
||||
## References
|
||||
|
||||
Read these before analyzing:
|
||||
1. https://tkdodo.eu/blog/practical-react-query — foundational defaults, custom hooks, avoiding local state copies
|
||||
2. https://tkdodo.eu/blog/effective-react-query-keys — key factory pattern, hierarchical keys, fuzzy invalidation
|
||||
3. https://tkdodo.eu/blog/react-query-as-a-state-manager — React Query IS your server state manager
|
||||
|
||||
## Rules to enforce
|
||||
|
||||
### Query key factories
|
||||
- Every file in `hooks/queries/` must have a hierarchical key factory with an `all` root key
|
||||
- Keys must include intermediate plural keys (`lists`, `details`) for prefix invalidation
|
||||
- Key factories are colocated with their query hooks, not in a global keys file
|
||||
|
||||
### Query hooks
|
||||
- Every `queryFn` must forward `signal` for request cancellation
|
||||
- Every query must have an explicit `staleTime` (default 0 is almost never correct)
|
||||
- `keepPreviousData` / `placeholderData` only on variable-key queries (where params change), never on static keys
|
||||
- Use `enabled` to prevent queries from running without required params
|
||||
|
||||
### Mutations
|
||||
- Use `onSettled` (not `onSuccess`) for cache reconciliation — it fires on both success and error
|
||||
- For optimistic updates: save previous data in `onMutate`, roll back in `onError`
|
||||
- Use targeted invalidation (`entityKeys.lists()`) not broad (`entityKeys.all`) when possible
|
||||
- Don't include mutation objects in `useCallback` deps — `.mutate()` is stable
|
||||
|
||||
### Server state ownership
|
||||
- Never copy query data into useState. Use query data directly in components.
|
||||
- Never copy query data into Zustand stores (exception: mutation callbacks that coordinate cross-store state like temp ID replacement)
|
||||
- The query cache is not a local state manager — `setQueryData` is for optimistic updates only
|
||||
- Forms are the one deliberate exception: copy server data into local form state with `staleTime: Infinity`
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the references above to understand the guidelines
|
||||
2. Analyze the specified scope against the rules listed above
|
||||
3. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
35
.claude/commands/you-might-not-need-a-callback.md
Normal file
35
.claude/commands/you-might-not-need-a-callback.md
Normal file
@@ -0,0 +1,35 @@
|
||||
---
|
||||
description: Analyze and fix useCallback anti-patterns in your code
|
||||
argument-hint: [scope] [fix=true|false]
|
||||
---
|
||||
|
||||
# You Might Not Need a Callback
|
||||
|
||||
Arguments:
|
||||
- scope: what to analyze (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## References
|
||||
|
||||
Read before analyzing:
|
||||
1. https://react.dev/reference/react/useCallback — official docs on when useCallback is actually needed
|
||||
|
||||
## Anti-patterns to detect
|
||||
|
||||
1. **useCallback on functions not passed as props or deps**: No benefit if only called within the same component.
|
||||
2. **useCallback with deps that change every render**: Memoization is wasted.
|
||||
3. **useCallback on handlers passed to native elements**: `<button onClick={fn}>` doesn't benefit from stable references.
|
||||
4. **useCallback wrapping functions that return new objects/arrays**: Memoization at the wrong level.
|
||||
5. **useCallback with empty deps when deps are needed**: Stale closures.
|
||||
6. **Pairing useCallback + React.memo unnecessarily**: Only optimize when you've measured a problem.
|
||||
7. **useCallback in hooks that don't need stable references**: Not every hook return needs memoization.
|
||||
|
||||
Note: This codebase uses a ref pattern for stable callbacks (`useRef` + empty deps). That pattern is correct — don't flag it.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the reference above
|
||||
2. Analyze the specified scope for the anti-patterns listed above
|
||||
3. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
33
.claude/commands/you-might-not-need-a-memo.md
Normal file
33
.claude/commands/you-might-not-need-a-memo.md
Normal file
@@ -0,0 +1,33 @@
|
||||
---
|
||||
description: Analyze and fix useMemo/React.memo anti-patterns in your code
|
||||
argument-hint: [scope] [fix=true|false]
|
||||
---
|
||||
|
||||
# You Might Not Need a Memo
|
||||
|
||||
Arguments:
|
||||
- scope: what to analyze (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## References
|
||||
|
||||
Read before analyzing:
|
||||
1. https://overreacted.io/before-you-memo/ — two techniques to avoid memo entirely
|
||||
|
||||
## Anti-patterns to detect
|
||||
|
||||
1. **State can be moved down instead of memoizing**: Move state into a smaller child so the slow component stops re-rendering without memo.
|
||||
2. **Children can be lifted up**: Extract stateful part, pass expensive subtree as `children` — children as props don't re-render when parent state changes.
|
||||
3. **useMemo on cheap computations**: Small array filters, string concat, arithmetic don't need memoization.
|
||||
4. **useMemo with constantly-changing deps**: Deps change every render = useMemo does nothing.
|
||||
5. **useMemo to stabilize props for non-memoized children**: If the child isn't wrapped in React.memo, stable references don't matter.
|
||||
6. **React.memo on components that always receive new props**: Fix the parent instead.
|
||||
7. **useMemo for derived state**: Just compute inline during render.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the reference above
|
||||
2. Analyze the specified scope for the anti-patterns listed above
|
||||
3. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
38
.claude/commands/you-might-not-need-state.md
Normal file
38
.claude/commands/you-might-not-need-state.md
Normal file
@@ -0,0 +1,38 @@
|
||||
---
|
||||
description: Analyze and fix unnecessary useState, derived state, and server-state-in-local-state anti-patterns
|
||||
argument-hint: [scope] [fix=true|false]
|
||||
---
|
||||
|
||||
# You Might Not Need State
|
||||
|
||||
Arguments:
|
||||
- scope: what to analyze (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## Context
|
||||
|
||||
This codebase uses React Query for all server state and Zustand for client-only global state. useState should only be used for ephemeral UI concerns (open/closed, hover, local form input). Server data should never be copied into useState or Zustand — React Query is the single source of truth.
|
||||
|
||||
## References
|
||||
|
||||
Read these before analyzing:
|
||||
1. https://react.dev/learn/choosing-the-state-structure — 5 principles for structuring state
|
||||
2. https://tkdodo.eu/blog/dont-over-use-state — never store derived/computed values in state
|
||||
3. https://tkdodo.eu/blog/putting-props-to-use-state — never mirror props into state via useEffect
|
||||
|
||||
## Anti-patterns to detect
|
||||
|
||||
1. **Derived state stored in useState**: If a value can be computed from props, other state, or query data, compute it inline during render instead of storing it in state.
|
||||
2. **Server state copied into useState**: Never `useState` + `useEffect` to sync React Query data into local state. Use query data directly. The only exception is forms where users edit server data.
|
||||
3. **Props mirrored into state**: Never `useState(prop)` + `useEffect(() => setState(prop))`. Use the prop directly, or use a key to reset component state.
|
||||
4. **Chained useEffect state updates**: Never chain Effects that set state to trigger other Effects. Calculate all derived values in the event handler or inline during render.
|
||||
5. **Storing objects when an ID suffices**: Store `selectedId` not a copy of the selected object. Derive the object: `items.find(i => i.id === selectedId)`.
|
||||
6. **State that duplicates Zustand or React Query**: If the data already lives in a store or query cache, don't create a parallel useState.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the references above to understand the guidelines
|
||||
2. Analyze the specified scope for the anti-patterns listed above
|
||||
3. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
71
.claude/rules/constitution.md
Normal file
71
.claude/rules/constitution.md
Normal file
@@ -0,0 +1,71 @@
|
||||
# Sim — Language & Positioning
|
||||
|
||||
When editing user-facing copy (landing pages, docs, metadata, marketing), follow these rules.
|
||||
|
||||
## Identity
|
||||
|
||||
Sim is the **AI workspace** where teams build and run AI agents. Not a workflow tool, not an agent framework, not an automation platform.
|
||||
|
||||
**Short definition:** Sim is the open-source AI workspace where teams build, deploy, and manage AI agents.
|
||||
|
||||
**Full definition:** Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work — visually, conversationally, or with code.
|
||||
|
||||
## Audience
|
||||
|
||||
**Primary:** Teams building AI agents for their organization — IT, operations, and technical teams who need governance, security, lifecycle management, and collaboration.
|
||||
|
||||
**Secondary:** Individual builders and developers who care about speed, flexibility, and open source.
|
||||
|
||||
## Required Language
|
||||
|
||||
| Concept | Use | Never use |
|
||||
|---------|-----|-----------|
|
||||
| The product | "AI workspace" | "workflow tool", "automation platform", "agent framework" |
|
||||
| Building | "build agents", "create agents" | "create workflows" (unless describing the workflow module specifically) |
|
||||
| Visual builder | "workflow builder" or "visual builder" | "canvas", "graph editor" |
|
||||
| Mothership | "Mothership" (capitalized) | "chat", "AI assistant", "copilot" |
|
||||
| Deployment | "deploy", "ship" | "publish", "activate" |
|
||||
| Audience | "teams", "builders" | "users", "customers" (in marketing copy) |
|
||||
| What agents do | "automate real work" | "automate tasks", "automate workflows" |
|
||||
| Our advantage | "open-source AI workspace" | "open-source platform" |
|
||||
|
||||
## Tone
|
||||
|
||||
- **Direct.** Short sentences. Active voice. Lead with what it does.
|
||||
- **Concrete.** Name specific things — "Slack bots, compliance agents, data pipelines" — not abstractions.
|
||||
- **Confident, not loud.** No exclamation marks or superlatives.
|
||||
- **Simple.** If a 16-year-old can't understand the sentence, rewrite it.
|
||||
|
||||
## Claim Hierarchy
|
||||
|
||||
When describing Sim, always lead with the most differentiated claim:
|
||||
|
||||
1. **What it is:** "The AI workspace for teams"
|
||||
2. **What you do:** "Build, deploy, and manage AI agents"
|
||||
3. **How:** "Visually, conversationally, or with code"
|
||||
4. **Scale:** "1,000+ integrations, every major LLM"
|
||||
5. **Trust:** "Open source. SOC2. Trusted by 100,000+ builders."
|
||||
|
||||
## Module Descriptions
|
||||
|
||||
| Module | One-liner |
|
||||
|--------|-----------|
|
||||
| **Mothership** | Your AI command center. Build and manage everything in natural language. |
|
||||
| **Workflows** | The visual builder. Connect blocks, models, and integrations into agent logic. |
|
||||
| **Knowledge Base** | Your agents' memory. Upload docs, sync sources, build vector databases. |
|
||||
| **Tables** | A database, built in. Store, query, and wire structured data into agent runs. |
|
||||
| **Files** | Upload, create, and share. One store for your team and every agent. |
|
||||
| **Logs** | Full visibility, every run. Trace execution block by block. |
|
||||
|
||||
## What We Never Say
|
||||
|
||||
- Never call Sim "just a workflow tool"
|
||||
- Never compare only on integration count — we win on AI-native capabilities
|
||||
- Never use "no-code" as the primary descriptor — say "visually, conversationally, or with code"
|
||||
- Never promise unshipped features
|
||||
- Never use jargon ("RAG", "vector database", "MCP") without plain-English explanation on public pages
|
||||
- Avoid "agentic workforce" as a primary term — use "AI agents"
|
||||
|
||||
## Vision
|
||||
|
||||
Sim becomes the default environment where teams build AI agents — not a tool you visit for one task, but a workspace you live in. Workflows are one module; Mothership is another. The workspace is the constant; the interface adapts.
|
||||
@@ -1,12 +1,12 @@
|
||||
# Add Trigger
|
||||
|
||||
You are an expert at creating webhook triggers for Sim. You understand the trigger system, the generic `buildTriggerSubBlocks` helper, and how triggers connect to blocks.
|
||||
You are an expert at creating webhook and polling triggers for Sim. You understand the trigger system, the generic `buildTriggerSubBlocks` helper, polling infrastructure, and how triggers connect to blocks.
|
||||
|
||||
## Your Task
|
||||
|
||||
1. Research what webhook events the service supports
|
||||
2. Create the trigger files using the generic builder
|
||||
3. Create a provider handler if custom auth, formatting, or subscriptions are needed
|
||||
1. Research what webhook events the service supports — if the service lacks reliable webhooks, use polling
|
||||
2. Create the trigger files using the generic builder (webhook) or manual config (polling)
|
||||
3. Create a provider handler (webhook) or polling handler (polling)
|
||||
4. Register triggers and connect them to the block
|
||||
|
||||
## Directory Structure
|
||||
@@ -141,23 +141,37 @@ export const TRIGGER_REGISTRY: TriggerRegistry = {
|
||||
|
||||
### Block file (`apps/sim/blocks/blocks/{service}.ts`)
|
||||
|
||||
Wire triggers into the block so the trigger UI appears and `generate-docs.ts` discovers them. Two changes are needed:
|
||||
|
||||
1. **Spread trigger subBlocks** at the end of the block's `subBlocks` array
|
||||
2. **Add `triggers` property** after `outputs` with `enabled: true` and `available: [...]`
|
||||
|
||||
```typescript
|
||||
import { getTrigger } from '@/triggers'
|
||||
|
||||
export const {Service}Block: BlockConfig = {
|
||||
// ...
|
||||
triggers: {
|
||||
enabled: true,
|
||||
available: ['{service}_event_a', '{service}_event_b'],
|
||||
},
|
||||
subBlocks: [
|
||||
// Regular tool subBlocks first...
|
||||
...getTrigger('{service}_event_a').subBlocks,
|
||||
...getTrigger('{service}_event_b').subBlocks,
|
||||
],
|
||||
// ... tools, inputs, outputs ...
|
||||
triggers: {
|
||||
enabled: true,
|
||||
available: ['{service}_event_a', '{service}_event_b'],
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
**Versioned blocks (V1 + V2):** Many integrations have a hidden V1 block and a visible V2 block. Where you add the trigger wiring depends on how V2 inherits from V1:
|
||||
|
||||
- **V2 uses `...V1Block` spread** (e.g., Google Calendar): Add trigger to V1 — V2 inherits both `subBlocks` and `triggers` automatically.
|
||||
- **V2 defines its own `subBlocks`** (e.g., Google Sheets): Add trigger to V2 (the visible block). V1 is hidden and doesn't need it.
|
||||
- **Single block, no V2** (e.g., Google Drive): Add trigger directly.
|
||||
|
||||
`generate-docs.ts` deduplicates by base type (first match wins). If V1 is processed first without triggers, the V2 triggers won't appear in `integrations.json`. Always verify by checking the output after running the script.
|
||||
|
||||
## Provider Handler
|
||||
|
||||
All provider-specific webhook logic lives in a single handler file: `apps/sim/lib/webhooks/providers/{service}.ts`.
|
||||
@@ -322,6 +336,121 @@ export function buildOutputs(): Record<string, TriggerOutput> {
|
||||
}
|
||||
```
|
||||
|
||||
## Polling Triggers
|
||||
|
||||
Use polling when the service lacks reliable webhooks (e.g., Google Sheets, Google Drive, Google Calendar, Gmail, RSS, IMAP). Polling triggers do NOT use `buildTriggerSubBlocks` — they define subBlocks manually.
|
||||
|
||||
### Directory Structure
|
||||
|
||||
```
|
||||
apps/sim/triggers/{service}/
|
||||
├── index.ts # Barrel export
|
||||
└── poller.ts # TriggerConfig with polling: true
|
||||
|
||||
apps/sim/lib/webhooks/polling/
|
||||
└── {service}.ts # PollingProviderHandler implementation
|
||||
```
|
||||
|
||||
### Polling Handler (`apps/sim/lib/webhooks/polling/{service}.ts`)
|
||||
|
||||
```typescript
|
||||
import { pollingIdempotency } from '@/lib/core/idempotency/service'
|
||||
import type { PollingProviderHandler, PollWebhookContext } from '@/lib/webhooks/polling/types'
|
||||
import { markWebhookFailed, markWebhookSuccess, resolveOAuthCredential, updateWebhookProviderConfig } from '@/lib/webhooks/polling/utils'
|
||||
import { processPolledWebhookEvent } from '@/lib/webhooks/processor'
|
||||
|
||||
export const {service}PollingHandler: PollingProviderHandler = {
|
||||
provider: '{service}',
|
||||
label: '{Service}',
|
||||
|
||||
async pollWebhook(ctx: PollWebhookContext): Promise<'success' | 'failure'> {
|
||||
const { webhookData, workflowData, requestId, logger } = ctx
|
||||
const webhookId = webhookData.id
|
||||
|
||||
try {
|
||||
// For OAuth services:
|
||||
const accessToken = await resolveOAuthCredential(webhookData, '{service}', requestId, logger)
|
||||
const config = webhookData.providerConfig as unknown as {Service}WebhookConfig
|
||||
|
||||
// First poll: seed state, emit nothing
|
||||
if (!config.lastCheckedTimestamp) {
|
||||
await updateWebhookProviderConfig(webhookId, { lastCheckedTimestamp: new Date().toISOString() }, logger)
|
||||
await markWebhookSuccess(webhookId, logger)
|
||||
return 'success'
|
||||
}
|
||||
|
||||
// Fetch changes since last poll, process with idempotency
|
||||
// ...
|
||||
|
||||
await markWebhookSuccess(webhookId, logger)
|
||||
return 'success'
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error processing {service} webhook ${webhookId}:`, error)
|
||||
await markWebhookFailed(webhookId, logger)
|
||||
return 'failure'
|
||||
}
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
**Key patterns:**
|
||||
- First poll seeds state and emits nothing (avoids flooding with existing data)
|
||||
- Use `pollingIdempotency.executeWithIdempotency(provider, key, callback)` for dedup
|
||||
- Use `processPolledWebhookEvent(webhookData, workflowData, payload, requestId)` to fire the workflow
|
||||
- Use `updateWebhookProviderConfig(webhookId, partialConfig, logger)` for read-merge-write on state
|
||||
- Use the latest server-side timestamp from API responses (not wall clock) to avoid clock skew
|
||||
|
||||
### Trigger Config (`apps/sim/triggers/{service}/poller.ts`)
|
||||
|
||||
```typescript
|
||||
import { {Service}Icon } from '@/components/icons'
|
||||
import type { TriggerConfig } from '@/triggers/types'
|
||||
|
||||
export const {service}PollingTrigger: TriggerConfig = {
|
||||
id: '{service}_poller',
|
||||
name: '{Service} Trigger',
|
||||
provider: '{service}',
|
||||
description: 'Triggers when ...',
|
||||
version: '1.0.0',
|
||||
icon: {Service}Icon,
|
||||
polling: true, // REQUIRED — routes to polling infrastructure
|
||||
|
||||
subBlocks: [
|
||||
{ id: 'triggerCredentials', type: 'oauth-input', title: 'Credentials', serviceId: '{service}', requiredScopes: [], required: true, mode: 'trigger', supportsCredentialSets: true },
|
||||
// ... service-specific config fields (dropdowns, inputs, switches) ...
|
||||
{ id: 'triggerInstructions', type: 'text', title: 'Setup Instructions', hideFromPreview: true, mode: 'trigger', defaultValue: '...' },
|
||||
],
|
||||
|
||||
outputs: {
|
||||
// Must match the payload shape from processPolledWebhookEvent
|
||||
},
|
||||
}
|
||||
```
|
||||
|
||||
### Registration (3 places)
|
||||
|
||||
1. **`apps/sim/triggers/constants.ts`** — add provider to `POLLING_PROVIDERS` Set
|
||||
2. **`apps/sim/lib/webhooks/polling/registry.ts`** — import handler, add to `POLLING_HANDLERS`
|
||||
3. **`apps/sim/triggers/registry.ts`** — import trigger config, add to `TRIGGER_REGISTRY`
|
||||
|
||||
### Helm Cron Job
|
||||
|
||||
Add to `helm/sim/values.yaml` under the existing polling cron jobs:
|
||||
|
||||
```yaml
|
||||
{service}WebhookPoll:
|
||||
schedule: "*/1 * * * *"
|
||||
concurrencyPolicy: Forbid
|
||||
url: "http://sim:3000/api/webhooks/poll/{service}"
|
||||
```
|
||||
|
||||
### Reference Implementations
|
||||
|
||||
- Simple: `apps/sim/lib/webhooks/polling/rss.ts` + `apps/sim/triggers/rss/poller.ts`
|
||||
- Complex (OAuth, attachments): `apps/sim/lib/webhooks/polling/gmail.ts` + `apps/sim/triggers/gmail/poller.ts`
|
||||
- Cursor-based (changes API): `apps/sim/lib/webhooks/polling/google-drive.ts`
|
||||
- Timestamp-based: `apps/sim/lib/webhooks/polling/google-calendar.ts`
|
||||
|
||||
## Checklist
|
||||
|
||||
### Trigger Definition
|
||||
@@ -347,7 +476,17 @@ export function buildOutputs(): Record<string, TriggerOutput> {
|
||||
- [ ] NO changes to `route.ts`, `provider-subscriptions.ts`, or `deploy.ts`
|
||||
- [ ] API key field uses `password: true`
|
||||
|
||||
### Polling Trigger (if applicable)
|
||||
- [ ] Handler implements `PollingProviderHandler` at `lib/webhooks/polling/{service}.ts`
|
||||
- [ ] Trigger config has `polling: true` and defines subBlocks manually (no `buildTriggerSubBlocks`)
|
||||
- [ ] Provider string matches across: trigger config, handler, `POLLING_PROVIDERS`, polling registry
|
||||
- [ ] First poll seeds state and emits nothing
|
||||
- [ ] Added provider to `POLLING_PROVIDERS` in `triggers/constants.ts`
|
||||
- [ ] Added handler to `POLLING_HANDLERS` in `lib/webhooks/polling/registry.ts`
|
||||
- [ ] Added cron job to `helm/sim/values.yaml`
|
||||
- [ ] Payload shape matches trigger `outputs` schema
|
||||
|
||||
### Testing
|
||||
- [ ] `bun run type-check` passes
|
||||
- [ ] Manually verify `formatInput` output keys match trigger `outputs` keys
|
||||
- [ ] Manually verify output keys match trigger `outputs` keys
|
||||
- [ ] Trigger UI shows correctly in the block
|
||||
|
||||
20
.cursor/commands/cleanup.md
Normal file
20
.cursor/commands/cleanup.md
Normal file
@@ -0,0 +1,20 @@
|
||||
# Cleanup
|
||||
|
||||
Arguments:
|
||||
- scope: what to review (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## Steps
|
||||
|
||||
Run each of these skills in order on the specified scope, passing through the scope and fix arguments. After each skill completes, move to the next. Do not skip any.
|
||||
|
||||
1. `/you-might-not-need-an-effect $ARGUMENTS`
|
||||
2. `/you-might-not-need-a-memo $ARGUMENTS`
|
||||
3. `/you-might-not-need-a-callback $ARGUMENTS`
|
||||
4. `/you-might-not-need-state $ARGUMENTS`
|
||||
5. `/react-query-best-practices $ARGUMENTS`
|
||||
6. `/emcn-design-review $ARGUMENTS`
|
||||
|
||||
After all skills have run, output a summary of what was found and fixed (or proposed) across all six passes.
|
||||
74
.cursor/commands/emcn-design-review.md
Normal file
74
.cursor/commands/emcn-design-review.md
Normal file
@@ -0,0 +1,74 @@
|
||||
# EMCN Design Review
|
||||
|
||||
Arguments:
|
||||
- scope: what to review (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## Context
|
||||
|
||||
This codebase uses **emcn**, a custom component library built on Radix UI primitives with CVA variants and CSS variable design tokens. All UI must use emcn components and tokens.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the emcn barrel export at `apps/sim/components/emcn/components/index.ts` to know what's available
|
||||
2. Read `apps/sim/app/_styles/globals.css` for CSS variable tokens
|
||||
3. Analyze the specified scope against every rule below
|
||||
4. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
|
||||
---
|
||||
|
||||
## Imports
|
||||
|
||||
- Import from `@/components/emcn` barrel, never subpaths
|
||||
- Icons from `@/components/emcn/icons` or `lucide-react`
|
||||
- Use `cn` from `@/lib/core/utils/cn` for conditional classes
|
||||
|
||||
## Design Tokens
|
||||
|
||||
Use CSS variable pattern (`text-[var(--text-primary)]`), never Tailwind semantics (`text-muted-foreground`) or hardcoded colors (`text-gray-500`, `#333`).
|
||||
|
||||
**Text**: `--text-primary`, `--text-secondary`, `--text-tertiary`, `--text-muted`, `--text-icon`, `--text-inverse`, `--text-error`
|
||||
**Surfaces**: `--bg`, `--surface-2` through `--surface-7`, `--surface-hover`, `--surface-active`
|
||||
**Borders**: `--border`, `--border-1`, `--border-muted`
|
||||
**Z-Index**: `--z-dropdown` (100), `--z-modal` (200), `--z-popover` (300), `--z-tooltip` (400), `--z-toast` (500)
|
||||
**Shadows**: `shadow-subtle`, `shadow-medium`, `shadow-overlay`, `shadow-card`
|
||||
|
||||
## Buttons
|
||||
|
||||
| Action | Variant |
|
||||
|--------|---------|
|
||||
| Toolbar, icon-only | `ghost` (most common, 28%) |
|
||||
| Create, save, submit | `primary` (24%) |
|
||||
| Cancel, close | `default` |
|
||||
| Delete, remove | `destructive` |
|
||||
| Selected state | `active` |
|
||||
| Toggle | `outline` |
|
||||
|
||||
## Delete/Remove Confirmations
|
||||
|
||||
Modal `size="sm"`, title "Delete/Remove {ItemType}", `variant="destructive"` action button, `variant="default"` cancel. Cancel left, action right (100% compliance). Use `text-[var(--text-error)]` for irreversible warnings.
|
||||
|
||||
## Toast
|
||||
|
||||
`toast.success()`, `toast.error()`, `toast()` from `@/components/emcn`. Never custom notification UI.
|
||||
|
||||
## Badges
|
||||
|
||||
`red`=error/failed, `gray-secondary`=metadata/roles, `type`=type annotations, `green`=success/active, `gray`=neutral, `amber`=processing, `orange`=paused, `blue`=info. Use `dot` prop for status indicators.
|
||||
|
||||
## Icons
|
||||
|
||||
Default: `h-[14px] w-[14px]` (400+ uses). Color: `text-[var(--text-icon)]`. Scale: 14px > 16px > 12px > 20px.
|
||||
|
||||
## Anti-patterns to flag
|
||||
|
||||
- Raw `<button>`/`<input>` instead of emcn components
|
||||
- Hardcoded colors (`text-gray-*`, `#hex`, `rgb()`)
|
||||
- Tailwind semantics (`text-muted-foreground`) instead of CSS variables
|
||||
- Template literal className instead of `cn()`
|
||||
- Inline styles for colors/static values (dynamic values OK)
|
||||
- Importing from emcn subpaths instead of barrel
|
||||
- Arbitrary z-index instead of tokens
|
||||
- Wrong button variant for action type
|
||||
49
.cursor/commands/react-query-best-practices.md
Normal file
49
.cursor/commands/react-query-best-practices.md
Normal file
@@ -0,0 +1,49 @@
|
||||
# React Query Best Practices
|
||||
|
||||
Arguments:
|
||||
- scope: what to analyze (default: your current changes). Examples: "diff to main", "PR #123", "src/hooks/queries/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## Context
|
||||
|
||||
This codebase uses React Query (TanStack Query) as the single source of truth for all server state. All query hooks live in `hooks/queries/`. Zustand is used only for client-only UI state. Server data must never be duplicated into useState or Zustand outside of mutation callbacks that coordinate cross-store state.
|
||||
|
||||
## References
|
||||
|
||||
Read these before analyzing:
|
||||
1. https://tkdodo.eu/blog/practical-react-query — foundational defaults, custom hooks, avoiding local state copies
|
||||
2. https://tkdodo.eu/blog/effective-react-query-keys — key factory pattern, hierarchical keys, fuzzy invalidation
|
||||
3. https://tkdodo.eu/blog/react-query-as-a-state-manager — React Query IS your server state manager
|
||||
|
||||
## Rules to enforce
|
||||
|
||||
### Query key factories
|
||||
- Every file in `hooks/queries/` must have a hierarchical key factory with an `all` root key
|
||||
- Keys must include intermediate plural keys (`lists`, `details`) for prefix invalidation
|
||||
- Key factories are colocated with their query hooks, not in a global keys file
|
||||
|
||||
### Query hooks
|
||||
- Every `queryFn` must forward `signal` for request cancellation
|
||||
- Every query must have an explicit `staleTime` (default 0 is almost never correct)
|
||||
- `keepPreviousData` / `placeholderData` only on variable-key queries (where params change), never on static keys
|
||||
- Use `enabled` to prevent queries from running without required params
|
||||
|
||||
### Mutations
|
||||
- Use `onSettled` (not `onSuccess`) for cache reconciliation — it fires on both success and error
|
||||
- For optimistic updates: save previous data in `onMutate`, roll back in `onError`
|
||||
- Use targeted invalidation (`entityKeys.lists()`) not broad (`entityKeys.all`) when possible
|
||||
- Don't include mutation objects in `useCallback` deps — `.mutate()` is stable
|
||||
|
||||
### Server state ownership
|
||||
- Never copy query data into useState. Use query data directly in components.
|
||||
- Never copy query data into Zustand stores (exception: mutation callbacks that coordinate cross-store state like temp ID replacement)
|
||||
- The query cache is not a local state manager — `setQueryData` is for optimistic updates only
|
||||
- Forms are the one deliberate exception: copy server data into local form state with `staleTime: Infinity`
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the references above to understand the guidelines
|
||||
2. Analyze the specified scope against the rules listed above
|
||||
3. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
30
.cursor/commands/you-might-not-need-a-callback.md
Normal file
30
.cursor/commands/you-might-not-need-a-callback.md
Normal file
@@ -0,0 +1,30 @@
|
||||
# You Might Not Need a Callback
|
||||
|
||||
Arguments:
|
||||
- scope: what to analyze (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## References
|
||||
|
||||
Read before analyzing:
|
||||
1. https://react.dev/reference/react/useCallback — official docs on when useCallback is actually needed
|
||||
|
||||
## Anti-patterns to detect
|
||||
|
||||
1. **useCallback on functions not passed as props or deps**: No benefit if only called within the same component.
|
||||
2. **useCallback with deps that change every render**: Memoization is wasted.
|
||||
3. **useCallback on handlers passed to native elements**: `<button onClick={fn}>` doesn't benefit from stable references.
|
||||
4. **useCallback wrapping functions that return new objects/arrays**: Memoization at the wrong level.
|
||||
5. **useCallback with empty deps when deps are needed**: Stale closures.
|
||||
6. **Pairing useCallback + React.memo unnecessarily**: Only optimize when you've measured a problem.
|
||||
7. **useCallback in hooks that don't need stable references**: Not every hook return needs memoization.
|
||||
|
||||
Note: This codebase uses a ref pattern for stable callbacks (`useRef` + empty deps). That pattern is correct — don't flag it.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the reference above
|
||||
2. Analyze the specified scope for the anti-patterns listed above
|
||||
3. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
28
.cursor/commands/you-might-not-need-a-memo.md
Normal file
28
.cursor/commands/you-might-not-need-a-memo.md
Normal file
@@ -0,0 +1,28 @@
|
||||
# You Might Not Need a Memo
|
||||
|
||||
Arguments:
|
||||
- scope: what to analyze (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## References
|
||||
|
||||
Read before analyzing:
|
||||
1. https://overreacted.io/before-you-memo/ — two techniques to avoid memo entirely
|
||||
|
||||
## Anti-patterns to detect
|
||||
|
||||
1. **State can be moved down instead of memoizing**: Move state into a smaller child so the slow component stops re-rendering without memo.
|
||||
2. **Children can be lifted up**: Extract stateful part, pass expensive subtree as `children` — children as props don't re-render when parent state changes.
|
||||
3. **useMemo on cheap computations**: Small array filters, string concat, arithmetic don't need memoization.
|
||||
4. **useMemo with constantly-changing deps**: Deps change every render = useMemo does nothing.
|
||||
5. **useMemo to stabilize props for non-memoized children**: If the child isn't wrapped in React.memo, stable references don't matter.
|
||||
6. **React.memo on components that always receive new props**: Fix the parent instead.
|
||||
7. **useMemo for derived state**: Just compute inline during render.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the reference above
|
||||
2. Analyze the specified scope for the anti-patterns listed above
|
||||
3. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
33
.cursor/commands/you-might-not-need-state.md
Normal file
33
.cursor/commands/you-might-not-need-state.md
Normal file
@@ -0,0 +1,33 @@
|
||||
# You Might Not Need State
|
||||
|
||||
Arguments:
|
||||
- scope: what to analyze (default: your current changes). Examples: "diff to main", "PR #123", "src/components/", "whole codebase"
|
||||
- fix: whether to apply fixes (default: true). Set to false to only propose changes.
|
||||
|
||||
User arguments: $ARGUMENTS
|
||||
|
||||
## Context
|
||||
|
||||
This codebase uses React Query for all server state and Zustand for client-only global state. useState should only be used for ephemeral UI concerns (open/closed, hover, local form input). Server data should never be copied into useState or Zustand — React Query is the single source of truth.
|
||||
|
||||
## References
|
||||
|
||||
Read these before analyzing:
|
||||
1. https://react.dev/learn/choosing-the-state-structure — 5 principles for structuring state
|
||||
2. https://tkdodo.eu/blog/dont-over-use-state — never store derived/computed values in state
|
||||
3. https://tkdodo.eu/blog/putting-props-to-use-state — never mirror props into state via useEffect
|
||||
|
||||
## Anti-patterns to detect
|
||||
|
||||
1. **Derived state stored in useState**: If a value can be computed from props, other state, or query data, compute it inline during render instead of storing it in state.
|
||||
2. **Server state copied into useState**: Never `useState` + `useEffect` to sync React Query data into local state. Use query data directly. The only exception is forms where users edit server data.
|
||||
3. **Props mirrored into state**: Never `useState(prop)` + `useEffect(() => setState(prop))`. Use the prop directly, or use a key to reset component state.
|
||||
4. **Chained useEffect state updates**: Never chain Effects that set state to trigger other Effects. Calculate all derived values in the event handler or inline during render.
|
||||
5. **Storing objects when an ID suffices**: Store `selectedId` not a copy of the selected object. Derive the object: `items.find(i => i.id === selectedId)`.
|
||||
6. **State that duplicates Zustand or React Query**: If the data already lives in a store or query cache, don't create a parallel useState.
|
||||
|
||||
## Steps
|
||||
|
||||
1. Read the references above to understand the guidelines
|
||||
2. Analyze the specified scope for the anti-patterns listed above
|
||||
3. If fix=true, apply the fixes. If fix=false, propose the fixes without applying.
|
||||
76
.cursor/rules/constitution.mdc
Normal file
76
.cursor/rules/constitution.mdc
Normal file
@@ -0,0 +1,76 @@
|
||||
---
|
||||
description: Sim product language, positioning, and tone guidelines
|
||||
globs: ["apps/sim/app/(landing)/**", "apps/sim/app/(home)/**", "apps/docs/**", "apps/sim/app/manifest.ts", "apps/sim/app/sitemap.ts", "apps/sim/app/robots.ts", "apps/sim/app/llms.txt/**", "apps/sim/app/llms-full.txt/**", "apps/sim/app/(landing)/**/structured-data*", "apps/docs/**/structured-data*", "**/metadata*", "**/seo*"]
|
||||
---
|
||||
|
||||
# Sim — Language & Positioning
|
||||
|
||||
When editing user-facing copy (landing pages, docs, metadata, marketing), follow these rules.
|
||||
|
||||
## Identity
|
||||
|
||||
Sim is the **AI workspace** where teams build and run AI agents. Not a workflow tool, not an agent framework, not an automation platform.
|
||||
|
||||
**Short definition:** Sim is the open-source AI workspace where teams build, deploy, and manage AI agents.
|
||||
|
||||
**Full definition:** Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work — visually, conversationally, or with code.
|
||||
|
||||
## Audience
|
||||
|
||||
**Primary:** Teams building AI agents for their organization — IT, operations, and technical teams who need governance, security, lifecycle management, and collaboration.
|
||||
|
||||
**Secondary:** Individual builders and developers who care about speed, flexibility, and open source.
|
||||
|
||||
## Required Language
|
||||
|
||||
| Concept | Use | Never use |
|
||||
|---------|-----|-----------|
|
||||
| The product | "AI workspace" | "workflow tool", "automation platform", "agent framework" |
|
||||
| Building | "build agents", "create agents" | "create workflows" (unless describing the workflow module specifically) |
|
||||
| Visual builder | "workflow builder" or "visual builder" | "canvas", "graph editor" |
|
||||
| Mothership | "Mothership" (capitalized) | "chat", "AI assistant", "copilot" |
|
||||
| Deployment | "deploy", "ship" | "publish", "activate" |
|
||||
| Audience | "teams", "builders" | "users", "customers" (in marketing copy) |
|
||||
| What agents do | "automate real work" | "automate tasks", "automate workflows" |
|
||||
| Our advantage | "open-source AI workspace" | "open-source platform" |
|
||||
|
||||
## Tone
|
||||
|
||||
- **Direct.** Short sentences. Active voice. Lead with what it does.
|
||||
- **Concrete.** Name specific things — "Slack bots, compliance agents, data pipelines" — not abstractions.
|
||||
- **Confident, not loud.** No exclamation marks or superlatives.
|
||||
- **Simple.** If a 16-year-old can't understand the sentence, rewrite it.
|
||||
|
||||
## Claim Hierarchy
|
||||
|
||||
When describing Sim, always lead with the most differentiated claim:
|
||||
|
||||
1. **What it is:** "The AI workspace for teams"
|
||||
2. **What you do:** "Build, deploy, and manage AI agents"
|
||||
3. **How:** "Visually, conversationally, or with code"
|
||||
4. **Scale:** "1,000+ integrations, every major LLM"
|
||||
5. **Trust:** "Open source. SOC2. Trusted by 100,000+ builders."
|
||||
|
||||
## Module Descriptions
|
||||
|
||||
| Module | One-liner |
|
||||
|--------|-----------|
|
||||
| **Mothership** | Your AI command center. Build and manage everything in natural language. |
|
||||
| **Workflows** | The visual builder. Connect blocks, models, and integrations into agent logic. |
|
||||
| **Knowledge Base** | Your agents' memory. Upload docs, sync sources, build vector databases. |
|
||||
| **Tables** | A database, built in. Store, query, and wire structured data into agent runs. |
|
||||
| **Files** | Upload, create, and share. One store for your team and every agent. |
|
||||
| **Logs** | Full visibility, every run. Trace execution block by block. |
|
||||
|
||||
## What We Never Say
|
||||
|
||||
- Never call Sim "just a workflow tool"
|
||||
- Never compare only on integration count — we win on AI-native capabilities
|
||||
- Never use "no-code" as the primary descriptor — say "visually, conversationally, or with code"
|
||||
- Never promise unshipped features
|
||||
- Never use jargon ("RAG", "vector database", "MCP") without plain-English explanation on public pages
|
||||
- Avoid "agentic workforce" as a primary term — use "AI agents"
|
||||
|
||||
## Vision
|
||||
|
||||
Sim becomes the default environment where teams build AI agents — not a tool you visit for one task, but a workspace you live in. Workflows are one module; Mothership is another. The workspace is the constant; the interface adapts.
|
||||
28
.github/CODEOWNERS
vendored
Normal file
28
.github/CODEOWNERS
vendored
Normal file
@@ -0,0 +1,28 @@
|
||||
# Copilot/Mothership chat streaming entrypoints and replay surfaces.
|
||||
/apps/sim/app/api/copilot/chat/ @simstudioai/mothership
|
||||
/apps/sim/app/api/copilot/confirm/ @simstudioai/mothership
|
||||
/apps/sim/app/api/copilot/chats/ @simstudioai/mothership
|
||||
/apps/sim/app/api/mothership/chat/ @simstudioai/mothership
|
||||
/apps/sim/app/api/mothership/chats/ @simstudioai/mothership
|
||||
/apps/sim/app/api/mothership/execute/ @simstudioai/mothership
|
||||
/apps/sim/app/api/v1/copilot/chat/ @simstudioai/mothership
|
||||
|
||||
# Server-side stream orchestration, persistence, and protocol.
|
||||
/apps/sim/lib/copilot/chat/ @simstudioai/mothership
|
||||
/apps/sim/lib/copilot/async-runs/ @simstudioai/mothership
|
||||
/apps/sim/lib/copilot/request/ @simstudioai/mothership
|
||||
/apps/sim/lib/copilot/generated/ @simstudioai/mothership
|
||||
/apps/sim/lib/copilot/constants.ts @simstudioai/mothership
|
||||
/apps/sim/lib/core/utils/sse.ts @simstudioai/mothership
|
||||
|
||||
# Stream-time tool execution, confirmations, resource persistence, and handlers.
|
||||
/apps/sim/lib/copilot/tool-executor/ @simstudioai/mothership
|
||||
/apps/sim/lib/copilot/tools/ @simstudioai/mothership
|
||||
/apps/sim/lib/copilot/persistence/ @simstudioai/mothership
|
||||
/apps/sim/lib/copilot/resources/ @simstudioai/mothership
|
||||
|
||||
# Client-side stream consumption, hydration, and reconnect.
|
||||
/apps/sim/app/workspace/*/home/hooks/index.ts @simstudioai/mothership
|
||||
/apps/sim/app/workspace/*/home/hooks/use-chat.ts @simstudioai/mothership
|
||||
/apps/sim/app/workspace/*/home/hooks/use-file-preview-sessions.ts @simstudioai/mothership
|
||||
/apps/sim/hooks/queries/tasks.ts @simstudioai/mothership
|
||||
104
.github/workflows/ci.yml
vendored
104
.github/workflows/ci.yml
vendored
@@ -16,6 +16,7 @@ permissions:
|
||||
jobs:
|
||||
test-build:
|
||||
name: Test and Build
|
||||
if: github.ref != 'refs/heads/dev' || github.event_name == 'pull_request'
|
||||
uses: ./.github/workflows/test-build.yml
|
||||
secrets: inherit
|
||||
|
||||
@@ -45,11 +46,72 @@ jobs:
|
||||
echo "ℹ️ Not a release commit"
|
||||
fi
|
||||
|
||||
# Build AMD64 images and push to ECR immediately (+ GHCR for main)
|
||||
# Dev: build all 3 images for ECR only (no GHCR, no ARM64)
|
||||
build-dev:
|
||||
name: Build Dev ECR
|
||||
needs: [detect-version]
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/dev'
|
||||
runs-on: blacksmith-8vcpu-ubuntu-2404
|
||||
permissions:
|
||||
contents: read
|
||||
id-token: write
|
||||
strategy:
|
||||
fail-fast: false
|
||||
matrix:
|
||||
include:
|
||||
- dockerfile: ./docker/app.Dockerfile
|
||||
ecr_repo_secret: ECR_APP
|
||||
- dockerfile: ./docker/db.Dockerfile
|
||||
ecr_repo_secret: ECR_MIGRATIONS
|
||||
- dockerfile: ./docker/realtime.Dockerfile
|
||||
ecr_repo_secret: ECR_REALTIME
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
role-to-assume: ${{ secrets.DEV_AWS_ROLE_TO_ASSUME }}
|
||||
aws-region: ${{ secrets.DEV_AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
uses: aws-actions/amazon-ecr-login@v2
|
||||
|
||||
- name: Login to Docker Hub
|
||||
uses: docker/login-action@v3
|
||||
with:
|
||||
username: ${{ secrets.DOCKERHUB_USERNAME }}
|
||||
password: ${{ secrets.DOCKERHUB_TOKEN }}
|
||||
|
||||
- name: Set up Docker Buildx
|
||||
uses: useblacksmith/setup-docker-builder@v1
|
||||
|
||||
- name: Resolve ECR repo name
|
||||
id: ecr-repo
|
||||
run: echo "name=$ECR_REPO" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
ECR_REPO: ${{ matrix.ecr_repo_secret == 'ECR_APP' && secrets.ECR_APP || matrix.ecr_repo_secret == 'ECR_MIGRATIONS' && secrets.ECR_MIGRATIONS || matrix.ecr_repo_secret == 'ECR_REALTIME' && secrets.ECR_REALTIME || '' }}
|
||||
|
||||
- name: Build and push
|
||||
uses: useblacksmith/build-push-action@v2
|
||||
with:
|
||||
context: .
|
||||
file: ${{ matrix.dockerfile }}
|
||||
platforms: linux/amd64
|
||||
push: true
|
||||
tags: ${{ steps.login-ecr.outputs.registry }}/${{ steps.ecr-repo.outputs.name }}:dev
|
||||
provenance: false
|
||||
sbom: false
|
||||
|
||||
# Main/staging: build AMD64 images and push to ECR + GHCR
|
||||
build-amd64:
|
||||
name: Build AMD64
|
||||
needs: [test-build, detect-version]
|
||||
if: github.event_name == 'push' && (github.ref == 'refs/heads/main' || github.ref == 'refs/heads/staging' || github.ref == 'refs/heads/dev')
|
||||
if: >-
|
||||
github.event_name == 'push' &&
|
||||
(github.ref == 'refs/heads/main' || github.ref == 'refs/heads/staging')
|
||||
runs-on: blacksmith-8vcpu-ubuntu-2404
|
||||
permissions:
|
||||
contents: read
|
||||
@@ -70,13 +132,13 @@ jobs:
|
||||
ecr_repo_secret: ECR_REALTIME
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
role-to-assume: ${{ github.ref == 'refs/heads/main' && secrets.AWS_ROLE_TO_ASSUME || github.ref == 'refs/heads/dev' && secrets.DEV_AWS_ROLE_TO_ASSUME || secrets.STAGING_AWS_ROLE_TO_ASSUME }}
|
||||
aws-region: ${{ github.ref == 'refs/heads/main' && secrets.AWS_REGION || github.ref == 'refs/heads/dev' && secrets.DEV_AWS_REGION || secrets.STAGING_AWS_REGION }}
|
||||
role-to-assume: ${{ github.ref == 'refs/heads/main' && secrets.AWS_ROLE_TO_ASSUME || secrets.STAGING_AWS_ROLE_TO_ASSUME }}
|
||||
aws-region: ${{ github.ref == 'refs/heads/main' && secrets.AWS_REGION || secrets.STAGING_AWS_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR
|
||||
id: login-ecr
|
||||
@@ -99,33 +161,33 @@ jobs:
|
||||
- name: Set up Docker Buildx
|
||||
uses: useblacksmith/setup-docker-builder@v1
|
||||
|
||||
- name: Resolve ECR repo name
|
||||
id: ecr-repo
|
||||
run: echo "name=$ECR_REPO" >> $GITHUB_OUTPUT
|
||||
env:
|
||||
ECR_REPO: ${{ matrix.ecr_repo_secret == 'ECR_APP' && secrets.ECR_APP || matrix.ecr_repo_secret == 'ECR_MIGRATIONS' && secrets.ECR_MIGRATIONS || matrix.ecr_repo_secret == 'ECR_REALTIME' && secrets.ECR_REALTIME || '' }}
|
||||
|
||||
- name: Generate tags
|
||||
id: meta
|
||||
run: |
|
||||
ECR_REGISTRY="${{ steps.login-ecr.outputs.registry }}"
|
||||
ECR_REPO="${{ secrets[matrix.ecr_repo_secret] }}"
|
||||
ECR_REPO="${{ steps.ecr-repo.outputs.name }}"
|
||||
GHCR_IMAGE="${{ matrix.ghcr_image }}"
|
||||
|
||||
# ECR tags (always build for ECR)
|
||||
if [ "${{ github.ref }}" = "refs/heads/main" ]; then
|
||||
ECR_TAG="latest"
|
||||
elif [ "${{ github.ref }}" = "refs/heads/dev" ]; then
|
||||
ECR_TAG="dev"
|
||||
else
|
||||
ECR_TAG="staging"
|
||||
fi
|
||||
ECR_IMAGE="${ECR_REGISTRY}/${ECR_REPO}:${ECR_TAG}"
|
||||
|
||||
# Build tags list
|
||||
TAGS="${ECR_IMAGE}"
|
||||
|
||||
# Add GHCR tags only for main branch
|
||||
if [ "${{ github.ref }}" = "refs/heads/main" ]; then
|
||||
GHCR_AMD64="${GHCR_IMAGE}:latest-amd64"
|
||||
GHCR_SHA="${GHCR_IMAGE}:${{ github.sha }}-amd64"
|
||||
TAGS="${TAGS},$GHCR_AMD64,$GHCR_SHA"
|
||||
|
||||
# Add version tag if this is a release commit
|
||||
if [ "${{ needs.detect-version.outputs.is_release }}" = "true" ]; then
|
||||
VERSION="${{ needs.detect-version.outputs.version }}"
|
||||
GHCR_VERSION="${GHCR_IMAGE}:${VERSION}-amd64"
|
||||
@@ -150,7 +212,7 @@ jobs:
|
||||
# Build ARM64 images for GHCR (main branch only, runs in parallel)
|
||||
build-ghcr-arm64:
|
||||
name: Build ARM64 (GHCR Only)
|
||||
needs: [test-build, detect-version]
|
||||
needs: [detect-version]
|
||||
runs-on: blacksmith-8vcpu-ubuntu-2404-arm
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
permissions:
|
||||
@@ -169,7 +231,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
@@ -256,6 +318,14 @@ jobs:
|
||||
docker manifest push "${IMAGE_BASE}:${VERSION}"
|
||||
fi
|
||||
|
||||
# Run database migrations for dev
|
||||
migrate-dev:
|
||||
name: Migrate Dev DB
|
||||
needs: [build-dev]
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/dev'
|
||||
uses: ./.github/workflows/migrations.yml
|
||||
secrets: inherit
|
||||
|
||||
# Check if docs changed
|
||||
check-docs-changes:
|
||||
name: Check Docs Changes
|
||||
@@ -264,10 +334,10 @@ jobs:
|
||||
outputs:
|
||||
docs_changed: ${{ steps.filter.outputs.docs }}
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
- uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 2 # Need at least 2 commits to detect changes
|
||||
- uses: dorny/paths-filter@v3
|
||||
- uses: dorny/paths-filter@v4
|
||||
id: filter
|
||||
with:
|
||||
filters: |
|
||||
@@ -294,7 +364,7 @@ jobs:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
|
||||
2
.github/workflows/docs-embeddings.yml
vendored
2
.github/workflows/docs-embeddings.yml
vendored
@@ -15,7 +15,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
|
||||
4
.github/workflows/i18n.yml
vendored
4
.github/workflows/i18n.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: staging
|
||||
token: ${{ secrets.GH_PAT }}
|
||||
@@ -115,7 +115,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
with:
|
||||
ref: staging
|
||||
|
||||
|
||||
4
.github/workflows/images.yml
vendored
4
.github/workflows/images.yml
vendored
@@ -31,7 +31,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Configure AWS credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
@@ -117,7 +117,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Login to GHCR
|
||||
uses: docker/login-action@v3
|
||||
|
||||
4
.github/workflows/migrations.yml
vendored
4
.github/workflows/migrations.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
@@ -38,5 +38,5 @@ jobs:
|
||||
- name: Apply migrations
|
||||
working-directory: ./packages/db
|
||||
env:
|
||||
DATABASE_URL: ${{ github.ref == 'refs/heads/main' && secrets.DATABASE_URL || secrets.STAGING_DATABASE_URL }}
|
||||
DATABASE_URL: ${{ github.ref == 'refs/heads/main' && secrets.DATABASE_URL || github.ref == 'refs/heads/dev' && secrets.DEV_DATABASE_URL || secrets.STAGING_DATABASE_URL }}
|
||||
run: bunx drizzle-kit migrate --config=./drizzle.config.ts
|
||||
2
.github/workflows/publish-cli.yml
vendored
2
.github/workflows/publish-cli.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
|
||||
2
.github/workflows/publish-python-sdk.yml
vendored
2
.github/workflows/publish-python-sdk.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Python
|
||||
uses: actions/setup-python@v5
|
||||
|
||||
2
.github/workflows/publish-ts-sdk.yml
vendored
2
.github/workflows/publish-ts-sdk.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
runs-on: blacksmith-4vcpu-ubuntu-2404
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
|
||||
6
.github/workflows/test-build.yml
vendored
6
.github/workflows/test-build.yml
vendored
@@ -14,7 +14,7 @@ jobs:
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v6
|
||||
|
||||
- name: Setup Bun
|
||||
uses: oven-sh/setup-bun@v2
|
||||
@@ -105,7 +105,7 @@ jobs:
|
||||
|
||||
- name: Run tests with coverage
|
||||
env:
|
||||
NODE_OPTIONS: '--no-warnings'
|
||||
NODE_OPTIONS: '--no-warnings --max-old-space-size=8192'
|
||||
NEXT_PUBLIC_APP_URL: 'https://www.sim.ai'
|
||||
DATABASE_URL: 'postgresql://postgres:postgres@localhost:5432/simstudio'
|
||||
ENCRYPTION_KEY: '7cf672e460e430c1fba707575c2b0e2ad5a99dddf9b7b7e3b5646e630861db1c' # dummy key for CI only
|
||||
@@ -127,7 +127,7 @@ jobs:
|
||||
|
||||
- name: Build application
|
||||
env:
|
||||
NODE_OPTIONS: '--no-warnings'
|
||||
NODE_OPTIONS: '--no-warnings --max-old-space-size=8192'
|
||||
NEXT_PUBLIC_APP_URL: 'https://www.sim.ai'
|
||||
DATABASE_URL: 'postgresql://postgres:postgres@localhost:5432/simstudio'
|
||||
STRIPE_SECRET_KEY: 'dummy_key_for_ci_only'
|
||||
|
||||
10
README.md
10
README.md
@@ -74,10 +74,6 @@ docker compose -f docker-compose.prod.yml up -d
|
||||
|
||||
Open [http://localhost:3000](http://localhost:3000)
|
||||
|
||||
#### Background worker note
|
||||
|
||||
The Docker Compose stack starts a dedicated worker container by default. If `REDIS_URL` is not configured, the worker will start, log that it is idle, and do no queue processing. This is expected. Queue-backed API, webhook, and schedule execution requires Redis; installs without Redis continue to use the inline execution path.
|
||||
|
||||
Sim also supports local models via [Ollama](https://ollama.ai) and [vLLM](https://docs.vllm.ai/) — see the [Docker self-hosting docs](https://docs.sim.ai/self-hosting/docker) for setup details.
|
||||
|
||||
### Self-hosted: Manual Setup
|
||||
@@ -123,12 +119,10 @@ cd packages/db && bun run db:migrate
|
||||
5. Start development servers:
|
||||
|
||||
```bash
|
||||
bun run dev:full # Starts Next.js app, realtime socket server, and the BullMQ worker
|
||||
bun run dev:full # Starts Next.js app and realtime socket server
|
||||
```
|
||||
|
||||
If `REDIS_URL` is not configured, the worker will remain idle and execution continues inline.
|
||||
|
||||
Or run separately: `bun run dev` (Next.js), `cd apps/sim && bun run dev:sockets` (realtime), and `cd apps/sim && bun run worker` (BullMQ worker).
|
||||
Or run separately: `bun run dev` (Next.js) and `cd apps/sim && bun run dev:sockets` (realtime).
|
||||
|
||||
## Copilot API Keys
|
||||
|
||||
|
||||
@@ -17,9 +17,10 @@ import { ResponseSection } from '@/components/ui/response-section'
|
||||
import { i18n } from '@/lib/i18n'
|
||||
import { getApiSpecContent, openapi } from '@/lib/openapi'
|
||||
import { type PageData, source } from '@/lib/source'
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
|
||||
const SUPPORTED_LANGUAGES: Set<string> = new Set(i18n.languages)
|
||||
const BASE_URL = 'https://docs.sim.ai'
|
||||
const BASE_URL = DOCS_BASE_URL
|
||||
|
||||
const OG_LOCALE_MAP: Record<string, string> = {
|
||||
en: 'en_US',
|
||||
@@ -280,12 +281,12 @@ export async function generateMetadata(props: {
|
||||
title: data.title,
|
||||
description:
|
||||
data.description ||
|
||||
'Documentation for Sim — the open-source platform to build AI agents and run your agentic workforce.',
|
||||
'Documentation for Sim — the open-source AI workspace where teams build, deploy, and manage AI agents.',
|
||||
keywords: [
|
||||
'AI agents',
|
||||
'agentic workforce',
|
||||
'AI agent platform',
|
||||
'agentic workflows',
|
||||
'AI workspace',
|
||||
'AI agent builder',
|
||||
'build AI agents',
|
||||
'LLM orchestration',
|
||||
'AI automation',
|
||||
'knowledge base',
|
||||
@@ -300,7 +301,7 @@ export async function generateMetadata(props: {
|
||||
title: data.title,
|
||||
description:
|
||||
data.description ||
|
||||
'Documentation for Sim — the open-source platform to build AI agents and run your agentic workforce.',
|
||||
'Documentation for Sim — the open-source AI workspace where teams build, deploy, and manage AI agents.',
|
||||
url: fullUrl,
|
||||
siteName: 'Sim Documentation',
|
||||
type: 'article',
|
||||
@@ -322,7 +323,7 @@ export async function generateMetadata(props: {
|
||||
title: data.title,
|
||||
description:
|
||||
data.description ||
|
||||
'Documentation for Sim — the open-source platform to build AI agents and run your agentic workforce.',
|
||||
'Documentation for Sim — the open-source AI workspace where teams build, deploy, and manage AI agents.',
|
||||
images: [ogImageUrl],
|
||||
creator: '@simdotai',
|
||||
site: '@simdotai',
|
||||
|
||||
@@ -3,7 +3,6 @@ import { defineI18nUI } from 'fumadocs-ui/i18n'
|
||||
import { DocsLayout } from 'fumadocs-ui/layouts/docs'
|
||||
import { RootProvider } from 'fumadocs-ui/provider/next'
|
||||
import { Geist_Mono, Inter } from 'next/font/google'
|
||||
import Script from 'next/script'
|
||||
import {
|
||||
SidebarFolder,
|
||||
SidebarItem,
|
||||
@@ -13,6 +12,7 @@ import { Navbar } from '@/components/navbar/navbar'
|
||||
import { SimLogoFull } from '@/components/ui/sim-logo'
|
||||
import { i18n } from '@/lib/i18n'
|
||||
import { source } from '@/lib/source'
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
import '../global.css'
|
||||
|
||||
const inter = Inter({
|
||||
@@ -66,15 +66,15 @@ export default async function Layout({ children, params }: LayoutProps) {
|
||||
'@type': 'WebSite',
|
||||
name: 'Sim Documentation',
|
||||
description:
|
||||
'Documentation for Sim — the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows.',
|
||||
url: 'https://docs.sim.ai',
|
||||
'Documentation for Sim — the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM.',
|
||||
url: DOCS_BASE_URL,
|
||||
publisher: {
|
||||
'@type': 'Organization',
|
||||
name: 'Sim',
|
||||
url: 'https://sim.ai',
|
||||
logo: {
|
||||
'@type': 'ImageObject',
|
||||
url: 'https://docs.sim.ai/static/logo.png',
|
||||
url: `${DOCS_BASE_URL}/static/logo.png`,
|
||||
},
|
||||
},
|
||||
inLanguage: lang,
|
||||
@@ -82,7 +82,7 @@ export default async function Layout({ children, params }: LayoutProps) {
|
||||
'@type': 'SearchAction',
|
||||
target: {
|
||||
'@type': 'EntryPoint',
|
||||
urlTemplate: 'https://docs.sim.ai/api/search?q={search_term_string}',
|
||||
urlTemplate: `${DOCS_BASE_URL}/api/search?q={search_term_string}`,
|
||||
},
|
||||
'query-input': 'required name=search_term_string',
|
||||
},
|
||||
@@ -101,7 +101,6 @@ export default async function Layout({ children, params }: LayoutProps) {
|
||||
/>
|
||||
</head>
|
||||
<body className='flex min-h-screen flex-col font-sans'>
|
||||
<Script src='https://assets.onedollarstats.com/stonks.js' strategy='lazyOnload' />
|
||||
<RootProvider i18n={provider(lang)}>
|
||||
<Navbar />
|
||||
<DocsLayout
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import type { ReactNode } from 'react'
|
||||
import type { Viewport } from 'next'
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
|
||||
export default function RootLayout({ children }: { children: ReactNode }) {
|
||||
return children
|
||||
@@ -12,31 +13,29 @@ export const viewport: Viewport = {
|
||||
}
|
||||
|
||||
export const metadata = {
|
||||
metadataBase: new URL('https://docs.sim.ai'),
|
||||
metadataBase: new URL(DOCS_BASE_URL),
|
||||
title: {
|
||||
default: 'Sim Documentation — Build AI Agents & Run Your Agentic Workforce',
|
||||
default: 'Sim Documentation — The AI Workspace for Teams',
|
||||
template: '%s | Sim Docs',
|
||||
},
|
||||
description:
|
||||
'Documentation for Sim — the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows.',
|
||||
'Documentation for Sim — the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM.',
|
||||
applicationName: 'Sim Docs',
|
||||
generator: 'Next.js',
|
||||
referrer: 'origin-when-cross-origin' as const,
|
||||
keywords: [
|
||||
'AI workspace',
|
||||
'AI agent builder',
|
||||
'AI agents',
|
||||
'agentic workforce',
|
||||
'AI agent platform',
|
||||
'build AI agents',
|
||||
'open-source AI agents',
|
||||
'agentic workflows',
|
||||
'LLM orchestration',
|
||||
'AI integrations',
|
||||
'knowledge base',
|
||||
'AI automation',
|
||||
'workflow builder',
|
||||
'AI workflow orchestration',
|
||||
'visual workflow builder',
|
||||
'enterprise AI',
|
||||
'AI agent deployment',
|
||||
'intelligent automation',
|
||||
'AI tools',
|
||||
],
|
||||
authors: [{ name: 'Sim Team', url: 'https://sim.ai' }],
|
||||
@@ -63,14 +62,14 @@ export const metadata = {
|
||||
type: 'website',
|
||||
locale: 'en_US',
|
||||
alternateLocale: ['es_ES', 'fr_FR', 'de_DE', 'ja_JP', 'zh_CN'],
|
||||
url: 'https://docs.sim.ai',
|
||||
url: DOCS_BASE_URL,
|
||||
siteName: 'Sim Documentation',
|
||||
title: 'Sim Documentation — Build AI Agents & Run Your Agentic Workforce',
|
||||
title: 'Sim Documentation — The AI Workspace for Teams',
|
||||
description:
|
||||
'Documentation for Sim — the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows.',
|
||||
'Documentation for Sim — the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM.',
|
||||
images: [
|
||||
{
|
||||
url: 'https://docs.sim.ai/api/og?title=Sim%20Documentation',
|
||||
url: `${DOCS_BASE_URL}/api/og?title=Sim%20Documentation`,
|
||||
width: 1200,
|
||||
height: 630,
|
||||
alt: 'Sim Documentation',
|
||||
@@ -79,12 +78,12 @@ export const metadata = {
|
||||
},
|
||||
twitter: {
|
||||
card: 'summary_large_image',
|
||||
title: 'Sim Documentation — Build AI Agents & Run Your Agentic Workforce',
|
||||
title: 'Sim Documentation — The AI Workspace for Teams',
|
||||
description:
|
||||
'Documentation for Sim — the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows.',
|
||||
'Documentation for Sim — the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM.',
|
||||
creator: '@simdotai',
|
||||
site: '@simdotai',
|
||||
images: ['https://docs.sim.ai/api/og?title=Sim%20Documentation'],
|
||||
images: [`${DOCS_BASE_URL}/api/og?title=Sim%20Documentation`],
|
||||
},
|
||||
robots: {
|
||||
index: true,
|
||||
@@ -98,15 +97,15 @@ export const metadata = {
|
||||
},
|
||||
},
|
||||
alternates: {
|
||||
canonical: 'https://docs.sim.ai',
|
||||
canonical: DOCS_BASE_URL,
|
||||
languages: {
|
||||
'x-default': 'https://docs.sim.ai',
|
||||
en: 'https://docs.sim.ai',
|
||||
es: 'https://docs.sim.ai/es',
|
||||
fr: 'https://docs.sim.ai/fr',
|
||||
de: 'https://docs.sim.ai/de',
|
||||
ja: 'https://docs.sim.ai/ja',
|
||||
zh: 'https://docs.sim.ai/zh',
|
||||
'x-default': DOCS_BASE_URL,
|
||||
en: DOCS_BASE_URL,
|
||||
es: `${DOCS_BASE_URL}/es`,
|
||||
fr: `${DOCS_BASE_URL}/fr`,
|
||||
de: `${DOCS_BASE_URL}/de`,
|
||||
ja: `${DOCS_BASE_URL}/ja`,
|
||||
zh: `${DOCS_BASE_URL}/zh`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import { source } from '@/lib/source'
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
|
||||
export const revalidate = false
|
||||
|
||||
export async function GET() {
|
||||
const baseUrl = 'https://docs.sim.ai'
|
||||
const baseUrl = DOCS_BASE_URL
|
||||
|
||||
try {
|
||||
const pages = source.getPages().filter((page) => {
|
||||
@@ -37,9 +38,9 @@ export async function GET() {
|
||||
|
||||
const manifest = `# Sim Documentation
|
||||
|
||||
> The open-source platform to build AI agents and run your agentic workforce.
|
||||
> The open-source AI workspace where teams build, deploy, and manage AI agents.
|
||||
|
||||
Sim is the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows. Create agents, workflows, knowledge bases, tables, and docs. Trusted by over 100,000 builders.
|
||||
Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work — visually, conversationally, or with code. Trusted by over 100,000 builders.
|
||||
|
||||
## Documentation Overview
|
||||
|
||||
|
||||
@@ -1,70 +1,18 @@
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
|
||||
export const revalidate = false
|
||||
|
||||
export async function GET() {
|
||||
const baseUrl = 'https://docs.sim.ai'
|
||||
const baseUrl = DOCS_BASE_URL
|
||||
|
||||
const robotsTxt = `# Robots.txt for Sim Documentation
|
||||
|
||||
User-agent: *
|
||||
Allow: /
|
||||
|
||||
# Search engine crawlers
|
||||
User-agent: Googlebot
|
||||
Allow: /
|
||||
|
||||
User-agent: Bingbot
|
||||
Allow: /
|
||||
|
||||
User-agent: Slurp
|
||||
Allow: /
|
||||
|
||||
User-agent: DuckDuckBot
|
||||
Allow: /
|
||||
|
||||
User-agent: Baiduspider
|
||||
Allow: /
|
||||
|
||||
User-agent: YandexBot
|
||||
Allow: /
|
||||
|
||||
# AI and LLM crawlers - explicitly allowed for documentation indexing
|
||||
User-agent: GPTBot
|
||||
Allow: /
|
||||
|
||||
User-agent: ChatGPT-User
|
||||
Allow: /
|
||||
|
||||
User-agent: CCBot
|
||||
Allow: /
|
||||
|
||||
User-agent: anthropic-ai
|
||||
Allow: /
|
||||
|
||||
User-agent: Claude-Web
|
||||
Allow: /
|
||||
|
||||
User-agent: Applebot
|
||||
Allow: /
|
||||
|
||||
User-agent: PerplexityBot
|
||||
Allow: /
|
||||
|
||||
User-agent: Diffbot
|
||||
Allow: /
|
||||
|
||||
User-agent: FacebookBot
|
||||
Allow: /
|
||||
|
||||
User-agent: cohere-ai
|
||||
Allow: /
|
||||
|
||||
# Disallow admin and internal paths (if any exist)
|
||||
Disallow: /.next/
|
||||
Disallow: /api/internal/
|
||||
Disallow: /_next/static/
|
||||
Disallow: /admin/
|
||||
|
||||
# Allow but don't prioritize these
|
||||
Allow: /
|
||||
Allow: /api/search
|
||||
Allow: /llms.txt
|
||||
Allow: /llms-full.txt
|
||||
@@ -73,23 +21,12 @@ Allow: /llms.mdx/
|
||||
# Sitemaps
|
||||
Sitemap: ${baseUrl}/sitemap.xml
|
||||
|
||||
# Crawl delay for aggressive bots (optional)
|
||||
# Crawl-delay: 1
|
||||
|
||||
# Additional resources for AI indexing
|
||||
# See https://github.com/AnswerDotAI/llms-txt for more info
|
||||
# LLM-friendly content:
|
||||
# Manifest: ${baseUrl}/llms.txt
|
||||
# Full content: ${baseUrl}/llms-full.txt
|
||||
# Individual pages: ${baseUrl}/llms.mdx/[page-path]
|
||||
|
||||
# Multi-language documentation available at:
|
||||
# ${baseUrl}/en - English
|
||||
# ${baseUrl}/es - Español
|
||||
# ${baseUrl}/fr - Français
|
||||
# ${baseUrl}/de - Deutsch
|
||||
# ${baseUrl}/ja - 日本語
|
||||
# ${baseUrl}/zh - 简体中文`
|
||||
# Individual pages: ${baseUrl}/llms.mdx/[page-path]`
|
||||
|
||||
return new Response(robotsTxt, {
|
||||
headers: {
|
||||
|
||||
42
apps/docs/app/sitemap.ts
Normal file
42
apps/docs/app/sitemap.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
import type { MetadataRoute } from 'next'
|
||||
import { i18n } from '@/lib/i18n'
|
||||
import { source } from '@/lib/source'
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
|
||||
export const revalidate = 3600
|
||||
|
||||
export default function sitemap(): MetadataRoute.Sitemap {
|
||||
const baseUrl = DOCS_BASE_URL
|
||||
const languages = source.getLanguages()
|
||||
|
||||
const pagesBySlug = new Map<string, Map<string, string>>()
|
||||
for (const { language, pages } of languages) {
|
||||
for (const page of pages) {
|
||||
const key = page.slugs.join('/')
|
||||
if (!pagesBySlug.has(key)) {
|
||||
pagesBySlug.set(key, new Map())
|
||||
}
|
||||
pagesBySlug.get(key)!.set(language, `${baseUrl}${page.url}`)
|
||||
}
|
||||
}
|
||||
|
||||
const entries: MetadataRoute.Sitemap = []
|
||||
for (const [, localeMap] of pagesBySlug) {
|
||||
const defaultUrl = localeMap.get(i18n.defaultLanguage)
|
||||
if (!defaultUrl) continue
|
||||
|
||||
const langAlternates: Record<string, string> = {}
|
||||
for (const [lang, url] of localeMap) {
|
||||
langAlternates[lang] = url
|
||||
}
|
||||
|
||||
langAlternates['x-default'] = defaultUrl
|
||||
|
||||
entries.push({
|
||||
url: defaultUrl,
|
||||
alternates: { languages: langAlternates },
|
||||
})
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
import { i18n } from '@/lib/i18n'
|
||||
import { source } from '@/lib/source'
|
||||
|
||||
export const revalidate = 3600
|
||||
|
||||
export async function GET() {
|
||||
const baseUrl = 'https://docs.sim.ai'
|
||||
|
||||
const allPages = source.getPages()
|
||||
|
||||
const getPriority = (url: string): string => {
|
||||
if (url === '/introduction' || url === '/') return '1.0'
|
||||
if (url === '/getting-started') return '0.9'
|
||||
if (url.match(/^\/[^/]+$/)) return '0.8'
|
||||
if (url.includes('/sdks/') || url.includes('/tools/')) return '0.7'
|
||||
return '0.6'
|
||||
}
|
||||
|
||||
const urls = allPages
|
||||
.flatMap((page) => {
|
||||
const urlWithoutLang = page.url.replace(/^\/[a-z]{2}\//, '/')
|
||||
|
||||
return i18n.languages.map((lang) => {
|
||||
const url =
|
||||
lang === i18n.defaultLanguage
|
||||
? `${baseUrl}${urlWithoutLang}`
|
||||
: `${baseUrl}/${lang}${urlWithoutLang}`
|
||||
|
||||
return ` <url>
|
||||
<loc>${url}</loc>
|
||||
<priority>${getPriority(urlWithoutLang)}</priority>
|
||||
${i18n.languages.length > 1 ? generateAlternateLinks(baseUrl, urlWithoutLang) : ''}
|
||||
</url>`
|
||||
})
|
||||
})
|
||||
.join('\n')
|
||||
|
||||
const sitemap = `<?xml version="1.0" encoding="UTF-8"?>
|
||||
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml">
|
||||
${urls}
|
||||
</urlset>`
|
||||
|
||||
return new Response(sitemap, {
|
||||
headers: {
|
||||
'Content-Type': 'application/xml',
|
||||
'Cache-Control': 'public, max-age=3600, s-maxage=3600',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
function generateAlternateLinks(baseUrl: string, urlWithoutLang: string): string {
|
||||
const langLinks = i18n.languages
|
||||
.map((lang) => {
|
||||
const url =
|
||||
lang === i18n.defaultLanguage
|
||||
? `${baseUrl}${urlWithoutLang}`
|
||||
: `${baseUrl}/${lang}${urlWithoutLang}`
|
||||
return ` <xhtml:link rel="alternate" hreflang="${lang}" href="${url}" />`
|
||||
})
|
||||
.join('\n')
|
||||
return `${langLinks}\n <xhtml:link rel="alternate" hreflang="x-default" href="${baseUrl}${urlWithoutLang}" />`
|
||||
}
|
||||
@@ -28,6 +28,17 @@ export function AgentMailIcon(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function CrowdStrikeIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} viewBox='0 0 768 500' fill='none' xmlns='http://www.w3.org/2000/svg'>
|
||||
<path
|
||||
d='m152.8 23.6c-.8.8.3 4.4 1.3 4.4.5 0 .9.5.9 1.2 0 1.5 7.2 15.9 8.8 17.6.6.7 1.2 1.7 1.2 2.2 0 1.3 8.6 13.7 12.8 18.4 10 11.2 28.2 28.1 35.2 32.7 1.4.9 3.9 2.9 5.5 4.3 1.7 1.5 4.8 3.9 7 5.4s4.9 3.5 5.9 4.4c1.1 1 3.8 3 6 4.5 2.3 1.6 5 3.6 6 4.5 1.1 1 3.8 3 6 4.5 2.3 1.5 4.3 3 4.6 3.3s3.7 3 7.5 6c3.9 3 7.5 5.9 8.1 6.5.6.5 4.6 4.1 8.9 8 14.6 13.1 25.8 25.3 32.6 35.5 6.6 10 9.2 14.4 15.1 25.8 3.1 6.2 7.7 14.4 10 18.3 2.4 3.9 5.4 8.9 6.7 11.2s3 4.8 3.8 5.5c.7.7 1.3 1.8 1.3 2.3s.5 1.5 1 2.2c.6.7 5.3 7.7 10.6 15.7 16.9 25.6 40.1 46 62.9 55.1 10.8 4.3 33.4 6 63 4.7 20.6-.8 44.2-.2 48.3 1.3 1.3.5 4.2.9 6.5.9 2.3.1 6 .7 8.2 1.5s4.9 1.5 6 1.5 3.3.7 4.9 1.5c1.5.8 3.5 1.5 4.3 1.5 1.6 0 7.1 2.4 19.8 8.6 18.3 9.1 33.1 19.9 48.7 35.6 10.4 10.5 10.8 10.8 11.4 8.2.8-3.1-.2-13.7-1.5-16.1-.5-1-2-4.1-3.3-6.8-2.5-5.6-7.2-12.3-14.2-20.4-2.7-3.3-4.6-6.5-4.6-7.9 0-4.1-3.9-10.5-8.5-13.9-5.8-4.3-23.6-13.3-26.3-13.3-.5 0-2.3-.7-3.8-1.5-1.6-.8-3.7-1.5-4.7-1.5-.9 0-2.5-.4-3.5-.9-.9-.5-5.1-1.9-9.2-3.1-13.7-4.1-22.5-7.2-25.6-9.1-3.3-2-6.4-7.2-6.4-10.7 0-2.6 3.8-14.4 5-15.6.6-.6 1-1.7 1-2.5 0-.9.6-2.8 1.4-4.3.8-1.4 1.9-5.8 2.6-9.7 3.3-19.4-7.2-31.8-41-48.7-4.5-2.2-12.7-5.9-16.5-7.5-1.1-.4-4.1-1.7-6.7-2.8-2.6-1.2-5.4-2.1-6.2-2.1s-1.8-.5-2.1-1c-.3-.6-1.3-1-2.2-1-.8 0-2.9-.6-4.6-1.4-1.8-.8-10.4-3.8-19.2-6.6-8.8-2.9-16.7-5.6-17.6-6-.9-.5-3.4-1.2-5.5-1.6-2.2-.3-4.3-1-4.9-1.4-.5-.4-2.6-1.1-4.5-1.4-1.9-.4-4.4-1.1-5.5-1.6-1.1-.4-4-1.3-6.5-2-2.5-.6-6.3-1.6-8.5-2.1-2.2-.6-4.9-1.5-6-1.9-1.1-.5-3.6-1.2-5.5-1.6-1.9-.3-4.1-1-5-1.4-.8-.4-4.9-1.8-9-3s-8.2-2.5-9-2.9c-.9-.5-3.1-1.2-5-1.6s-3.9-1-4.5-1.4c-.5-.4-4.4-1.8-8.5-3.1-4.1-1.2-7.9-2.6-8.5-3-.5-.4-3.9-1.7-7.5-3s-6.9-2.7-7.4-3.2c-.6-.4-1.6-.8-2.4-.8-2 0-11.4-4.3-35.2-15.9-16.7-8.2-32.1-16.6-35.5-19.3-.5-.4-4.6-3.1-9-6s-8.4-5.6-9-6c-.5-.4-5.2-3.9-10.4-7.8-18.1-13.5-44.4-38.8-55.5-53.5-2.1-2.8-3.9-5.1-4-5.3-.2-.1-.5.1-.8.4zm447.2 303c10.2 3.4 13.5 6 15.9 12.1 2.4 5.9-1.6 7.3-6.5 2.2-1.6-1.7-4.5-4-6.4-5.2s-4.1-2.7-4.8-3.4-1.9-1.3-2.7-1.3c-1.3 0-2.5-2.1-2.5-4.6 0-1.8 1.4-1.8 7 .2zm-519-240c0 1.1 8.5 17.9 10 19.7.6.7 2.7 3.4 4.7 6.2 7.3 9.8 18.7 21.5 33.9 34.5 3.8 3.3 14.2 11.1 17.5 13.2 1.4.9 3.2 2.3 4 3 .8.8 3.2 2.5 5.4 3.8s4.2 2.7 4.5 3c.6.8 30.1 18.3 39.5 23.5 7.4 4.2 15.4 8.2 43.5 21.9 16.5 8.1 19.6 9.7 31.7 17 9.1 5.5 23.7 16.9 31 24.2 4.1 4.1 7.6 7.4 7.8 7.4.3 0-.1-1.1-.7-2.5s-1.5-2.5-2-2.5c-.4 0-.8-.6-.8-1.3 0-.8-.9-2.5-2-3.8s-2.3-2.9-2.7-3.4c-7.3-9.6-13.3-15.4-31.7-31-2.5-2.2-19-13.4-26.7-18.2-6.1-3.9-18.4-10.8-30.9-17.5-3-1.7-5.9-3.4-6.5-3.8-.9-.7-5.2-3-19.5-10.8-9-4.8-31.8-18.9-35.5-21.9-.5-.5-2.8-2-5-3.3s-4.4-2.8-5-3.2c-.5-.4-5.9-4.4-12-8.9-6-4.5-11.2-8.5-11.5-8.8-.3-.4-2.7-2.4-5.5-4.5-5.6-4.2-12.8-10.8-26.2-24-5.1-5-9.3-8.6-9.3-8zm113.6 179.1c-1 1 15.8 16.6 26.9 24.9 5.5 4.1 10.5 7.8 11 8.2 2.6 2 11.6 7.2 12.4 7.2.5 0 1.6.6 2.3 1.2.7.7 2.9 2 4.8 3 13.3 6.3 19 8.8 20.4 8.8.8 0 1.7.4 2 .8.8 1.3 32.3 11.2 35.8 11.2 1 0 2.6.4 3.6 1 .9.5 3.7 1.4 6.2 1.9 8.7 1.9 13.5 3.1 15.5 4 1.1.5 5.4 1.9 9.5 3.2s7.9 2.6 8.5 3.1c.5.4 1.5.8 2.3.8s2.8.6 4.5 1.4c16.4 7.1 20.8 8.8 21.4 8.3.3-.4-.7-1.7-2.3-2.9-2.5-2-6.9-5.9-16.4-14.8-1.5-1.4-4.2-3.8-6-5.4-5-4.3-26-19.9-30.5-22.6-2.2-1.3-4.2-2.7-4.5-3-.3-.4-1.2-1-2-1.4s-4.2-2.2-7.5-4.1c-6.2-3.6-18.9-9.9-26-12.9-2.2-.9-4.7-2.1-5.5-2.5-.9-.5-3-1.2-4.8-1.5-1.7-.4-3.4-1.2-3.7-1.7-.4-.5-1.6-.9-2.8-.9-2.2.1-2.2.1-.2 1.2 1.1.6 2.2 1.4 2.5 1.8.3.3 2.5 1.8 5 3.3 5.3 3.1 15 11.7 15 13.3 0 .6-.7 1.7-1.5 2.4-1.2 1-4.1.9-14.5-.4-7.2-.9-14.1-2.1-15.3-2.6-1.2-.4-4.7-1.6-7.7-2.5-15.6-4.7-47-22.1-56.1-31-.9-.8-1.9-1.2-2.3-.8z'
|
||||
fill='currentColor'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function SearchIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg
|
||||
@@ -2076,6 +2087,21 @@ export function BrandfetchIcon(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function BrightDataIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} viewBox='54 93 22 52' fill='none' xmlns='http://www.w3.org/2000/svg'>
|
||||
<path
|
||||
d='M62 95.21c.19 2.16 1.85 3.24 2.82 4.74.25.38.48.11.67-.16.21-.31.6-1.21 1.15-1.28-.35 1.38-.04 3.15.16 4.45.49 3.05-1.22 5.64-4.07 6.18-3.38.65-6.22-2.21-5.6-5.62.23-1.24 1.37-2.5.77-3.7-.85-1.7.54-.52.79-.22 1.04 1.2 1.21.09 1.45-.55.24-.63.31-1.31.47-1.97.19-.77.55-1.4 1.39-1.87z'
|
||||
fill='currentColor'
|
||||
/>
|
||||
<path
|
||||
d='M66.70 123.37c0 3.69.04 7.38-.03 11.07-.02 1.04.31 1.48 1.32 1.49.29 0 .59.12.88.13.93.01 1.18.47 1.16 1.37-.05 2.19 0 2.19-2.24 2.19-3.48 0-6.96-.04-10.44.03-1.09.02-1.47-.33-1.3-1.36.02-.12.02-.26 0-.38-.28-1.39.39-1.96 1.7-1.9 1.36.06 1.76-.51 1.74-1.88-.09-5.17-.08-10.35 0-15.53.02-1.22-.32-1.87-1.52-2.17-.57-.14-1.47-.11-1.57-.85-.15-1.04-.05-2.11.01-3.17.02-.34.44-.35.73-.39 2.81-.39 5.63-.77 8.44-1.18.92-.14 1.15.2 1.14 1.09-.04 3.8-.02 7.62-.02 11.44z'
|
||||
fill='currentColor'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function BrowserUseIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg
|
||||
@@ -3554,7 +3580,7 @@ export function FireworksIcon(props: SVGProps<SVGSVGElement>) {
|
||||
>
|
||||
<path
|
||||
d='M314.333 110.167L255.98 251.729l-58.416-141.562h-37.459l64 154.75c5.23 12.854 17.771 21.312 31.646 21.312s26.417-8.437 31.646-21.27l64.396-154.792h-37.459zm24.917 215.666L446 216.583l-14.562-34.77-116.584 119.562c-9.708 9.958-12.541 24.833-7.146 37.646 5.292 12.73 17.792 21.083 31.584 21.083l.042.063L506 359.75l-14.562-34.77-152.146.853h-.042zM66 216.5l14.563-34.77 116.583 119.562a34.592 34.592 0 017.146 37.646C199 351.667 186.5 360.02 172.708 360.02l-166.666-.375-.042.042 14.563-34.771 152.145.875L66 216.5z'
|
||||
fill='currentColor'
|
||||
fill='#5019c5'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
@@ -4614,6 +4640,42 @@ export function DynamoDBIcon(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function IAMIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} viewBox='0 0 80 80' xmlns='http://www.w3.org/2000/svg'>
|
||||
<defs>
|
||||
<linearGradient x1='0%' y1='100%' x2='100%' y2='0%' id='iamGradient'>
|
||||
<stop stopColor='#BD0816' offset='0%' />
|
||||
<stop stopColor='#FF5252' offset='100%' />
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<rect fill='url(#iamGradient)' width='80' height='80' />
|
||||
<path
|
||||
d='M14,59 L66,59 L66,21 L14,21 L14,59 Z M68,20 L68,60 C68,60.552 67.553,61 67,61 L13,61 C12.447,61 12,60.552 12,60 L12,20 C12,19.448 12.447,19 13,19 L67,19 C67.553,19 68,19.448 68,20 L68,20 Z M44,48 L59,48 L59,46 L44,46 L44,48 Z M57,42 L62,42 L62,40 L57,40 L57,42 Z M44,42 L52,42 L52,40 L44,40 L44,42 Z M29,46 C29,45.449 28.552,45 28,45 C27.448,45 27,45.449 27,46 C27,46.551 27.448,47 28,47 C28.552,47 29,46.551 29,46 L29,46 Z M31,46 C31,47.302 30.161,48.401 29,48.816 L29,51 L27,51 L27,48.815 C25.839,48.401 25,47.302 25,46 C25,44.346 26.346,43 28,43 C29.654,43 31,44.346 31,46 L31,46 Z M19,53.993 L36.994,54 L36.996,50 L33,50 L33,48 L36.996,48 L36.998,45 L33,45 L33,43 L36.999,43 L37,40.007 L19.006,40 L19,53.993 Z M22,38.001 L34,38.006 L34,31 C34.001,28.697 31.197,26.677 28,26.675 L27.996,26.675 C24.804,26.675 22.004,28.696 22.002,31 L22,38.001 Z M17,54.992 L17.006,39 C17.006,38.734 17.111,38.48 17.299,38.292 C17.486,38.105 17.741,38 18.006,38 L20,38.001 L20.002,31 C20.004,27.512 23.59,24.675 27.996,24.675 L28,24.675 C32.412,24.677 36.001,27.515 36,31 L36,38.007 L38,38.008 C38.553,38.008 39,38.456 39,39.008 L38.994,55 C38.994,55.266 38.889,55.52 38.701,55.708 C38.514,55.895 38.259,56 37.994,56 L18,55.992 C17.447,55.992 17,55.544 17,54.992 L17,54.992 Z M60,36 L62,36 L62,34 L60,34 L60,36 Z M44,36 L55,36 L55,34 L44,34 L44,36 Z'
|
||||
fill='#FFFFFF'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function STSIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} viewBox='0 0 80 80' xmlns='http://www.w3.org/2000/svg'>
|
||||
<defs>
|
||||
<linearGradient x1='0%' y1='100%' x2='100%' y2='0%' id='stsGradient'>
|
||||
<stop stopColor='#BD0816' offset='0%' />
|
||||
<stop stopColor='#FF5252' offset='100%' />
|
||||
</linearGradient>
|
||||
</defs>
|
||||
<rect fill='url(#stsGradient)' width='80' height='80' />
|
||||
<path
|
||||
d='M14,59 L66,59 L66,21 L14,21 L14,59 Z M68,20 L68,60 C68,60.552 67.553,61 67,61 L13,61 C12.447,61 12,60.552 12,60 L12,20 C12,19.448 12.447,19 13,19 L67,19 C67.553,19 68,19.448 68,20 L68,20 Z M44,48 L59,48 L59,46 L44,46 L44,48 Z M57,42 L62,42 L62,40 L57,40 L57,42 Z M44,42 L52,42 L52,40 L44,40 L44,42 Z M29,46 C29,45.449 28.552,45 28,45 C27.448,45 27,45.449 27,46 C27,46.551 27.448,47 28,47 C28.552,47 29,46.551 29,46 L29,46 Z M31,46 C31,47.302 30.161,48.401 29,48.816 L29,51 L27,51 L27,48.815 C25.839,48.401 25,47.302 25,46 C25,44.346 26.346,43 28,43 C29.654,43 31,44.346 31,46 L31,46 Z M19,53.993 L36.994,54 L36.996,50 L33,50 L33,48 L36.996,48 L36.998,45 L33,45 L33,43 L36.999,43 L37,40.007 L19.006,40 L19,53.993 Z M22,38.001 L34,38.006 L34,31 C34.001,28.697 31.197,26.677 28,26.675 L27.996,26.675 C24.804,26.675 22.004,28.696 22.002,31 L22,38.001 Z M17,54.992 L17.006,39 C17.006,38.734 17.111,38.48 17.299,38.292 C17.486,38.105 17.741,38 18.006,38 L20,38.001 L20.002,31 C20.004,27.512 23.59,24.675 27.996,24.675 L28,24.675 C32.412,24.677 36.001,27.515 36,31 L36,38.007 L38,38.008 C38.553,38.008 39,38.456 39,39.008 L38.994,55 C38.994,55.266 38.889,55.52 38.701,55.708 C38.514,55.895 38.259,56 37.994,56 L18,55.992 C17.447,55.992 17,55.544 17,54.992 L17,54.992 Z M60,36 L62,36 L62,34 L60,34 L60,36 Z M44,36 L55,36 L55,34 L44,34 L44,36 Z'
|
||||
fill='#FFFFFF'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function SecretsManagerIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} viewBox='0 0 80 80' xmlns='http://www.w3.org/2000/svg'>
|
||||
@@ -4824,6 +4886,17 @@ export function WordpressIcon(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function AgiloftIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} viewBox='0 0 47.3 47.2' xmlns='http://www.w3.org/2000/svg'>
|
||||
<path d='M47.3,21.4H0v-4.3l4.3-4.2h43V21.4z' fill='#263A5C' />
|
||||
<path d='M47.3,8.6H8.6L17.2,0h30.1V8.6z' fill='#001028' />
|
||||
<path d='M0,25.7h47.3V30L43,34.4H0V25.7z' fill='#4A6587' />
|
||||
<path d='M0,38.7h38.8l-8.6,8.5H0V38.7z' fill='#6D8DAF' />
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function AhrefsIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 1065 1300'>
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
|
||||
interface StructuredDataProps {
|
||||
title: string
|
||||
description: string
|
||||
@@ -15,7 +17,7 @@ export function StructuredData({
|
||||
dateModified,
|
||||
breadcrumb,
|
||||
}: StructuredDataProps) {
|
||||
const baseUrl = 'https://docs.sim.ai'
|
||||
const baseUrl = DOCS_BASE_URL
|
||||
|
||||
const articleStructuredData = {
|
||||
'@context': 'https://schema.org',
|
||||
@@ -70,10 +72,11 @@ export function StructuredData({
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'SoftwareApplication',
|
||||
name: 'Sim',
|
||||
applicationCategory: 'DeveloperApplication',
|
||||
applicationCategory: 'BusinessApplication',
|
||||
applicationSubCategory: 'AI Workspace',
|
||||
operatingSystem: 'Any',
|
||||
description:
|
||||
'Sim is the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows. Create agents, workflows, knowledge bases, tables, and docs.',
|
||||
'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work.',
|
||||
url: baseUrl,
|
||||
author: {
|
||||
'@type': 'Organization',
|
||||
@@ -84,8 +87,9 @@ export function StructuredData({
|
||||
category: 'Developer Tools',
|
||||
},
|
||||
featureList: [
|
||||
'AI agent creation',
|
||||
'Agentic workflow orchestration',
|
||||
'AI workspace for teams',
|
||||
'Mothership — natural language agent creation',
|
||||
'Visual workflow builder',
|
||||
'1,000+ integrations',
|
||||
'LLM orchestration (OpenAI, Anthropic, Google, xAI, Mistral, Perplexity)',
|
||||
'Knowledge base creation',
|
||||
|
||||
@@ -6,6 +6,7 @@ import type { ComponentType, SVGProps } from 'react'
|
||||
import {
|
||||
A2AIcon,
|
||||
AgentMailIcon,
|
||||
AgiloftIcon,
|
||||
AhrefsIcon,
|
||||
AirtableIcon,
|
||||
AirweaveIcon,
|
||||
@@ -22,6 +23,7 @@ import {
|
||||
BoxCompanyIcon,
|
||||
BrainIcon,
|
||||
BrandfetchIcon,
|
||||
BrightDataIcon,
|
||||
BrowserUseIcon,
|
||||
CalComIcon,
|
||||
CalendlyIcon,
|
||||
@@ -32,6 +34,7 @@ import {
|
||||
CloudflareIcon,
|
||||
CloudWatchIcon,
|
||||
ConfluenceIcon,
|
||||
CrowdStrikeIcon,
|
||||
CursorIcon,
|
||||
DagsterIcon,
|
||||
DatabricksIcon,
|
||||
@@ -87,6 +90,7 @@ import {
|
||||
HubspotIcon,
|
||||
HuggingFaceIcon,
|
||||
HunterIOIcon,
|
||||
IAMIcon,
|
||||
ImageIcon,
|
||||
IncidentioIcon,
|
||||
InfisicalIcon,
|
||||
@@ -161,6 +165,7 @@ import {
|
||||
SmtpIcon,
|
||||
SQSIcon,
|
||||
SshIcon,
|
||||
STSIcon,
|
||||
STTIcon,
|
||||
StagehandIcon,
|
||||
StripeIcon,
|
||||
@@ -196,6 +201,7 @@ type IconComponent = ComponentType<SVGProps<SVGSVGElement>>
|
||||
export const blockTypeToIconMap: Record<string, IconComponent> = {
|
||||
a2a: A2AIcon,
|
||||
agentmail: AgentMailIcon,
|
||||
agiloft: AgiloftIcon,
|
||||
ahrefs: AhrefsIcon,
|
||||
airtable: AirtableIcon,
|
||||
airweave: AirweaveIcon,
|
||||
@@ -210,6 +216,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
|
||||
attio: AttioIcon,
|
||||
box: BoxCompanyIcon,
|
||||
brandfetch: BrandfetchIcon,
|
||||
brightdata: BrightDataIcon,
|
||||
browser_use: BrowserUseIcon,
|
||||
calcom: CalComIcon,
|
||||
calendly: CalendlyIcon,
|
||||
@@ -220,6 +227,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
|
||||
cloudformation: CloudFormationIcon,
|
||||
cloudwatch: CloudWatchIcon,
|
||||
confluence_v2: ConfluenceIcon,
|
||||
crowdstrike: CrowdStrikeIcon,
|
||||
cursor_v2: CursorIcon,
|
||||
dagster: DagsterIcon,
|
||||
databricks: DatabricksIcon,
|
||||
@@ -274,6 +282,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
|
||||
hubspot: HubspotIcon,
|
||||
huggingface: HuggingFaceIcon,
|
||||
hunter: HunterIOIcon,
|
||||
iam: IAMIcon,
|
||||
image_generator: ImageIcon,
|
||||
imap: MailServerIcon,
|
||||
incidentio: IncidentioIcon,
|
||||
@@ -352,6 +361,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
|
||||
ssh: SshIcon,
|
||||
stagehand: StagehandIcon,
|
||||
stripe: StripeIcon,
|
||||
sts: STSIcon,
|
||||
stt_v2: STTIcon,
|
||||
supabase: SupabaseIcon,
|
||||
tailscale: TailscaleIcon,
|
||||
|
||||
@@ -21,7 +21,17 @@ Verwenden Sie Ihre eigenen API-Schlüssel für KI-Modellanbieter anstelle der ge
|
||||
| OpenAI | Knowledge Base-Embeddings, Agent-Block |
|
||||
| Anthropic | Agent-Block |
|
||||
| Google | Agent-Block |
|
||||
| Mistral | Knowledge Base OCR |
|
||||
| Mistral | Knowledge Base OCR, Agent-Block |
|
||||
| Fireworks | Agent-Block |
|
||||
| Firecrawl | Web-Scraping, Crawling, Suche und Extraktion |
|
||||
| Exa | KI-gestützte Suche und Recherche |
|
||||
| Serper | Google-Such-API |
|
||||
| Linkup | Websuche und Inhaltsabruf |
|
||||
| Parallel AI | Websuche, Extraktion und tiefgehende Recherche |
|
||||
| Perplexity | KI-gestützter Chat und Websuche |
|
||||
| Jina AI | Web-Lesen und Suche |
|
||||
| Google Cloud | Translate, Maps, PageSpeed und Books APIs |
|
||||
| Brandfetch | Marken-Assets, Logos, Farben und Unternehmensinformationen |
|
||||
|
||||
### Einrichtung
|
||||
|
||||
|
||||
@@ -105,9 +105,108 @@ Die Modellaufschlüsselung zeigt:
|
||||
Die angezeigten Preise entsprechen den Tarifen vom 10. September 2025. Überprüfen Sie die Dokumentation der Anbieter für aktuelle Preise.
|
||||
</Callout>
|
||||
|
||||
## Gehostete Tool-Preise
|
||||
|
||||
Wenn Workflows Tool-Blöcke mit den gehosteten API-Schlüsseln von Sim verwenden, werden die Kosten pro Operation berechnet. Verwenden Sie Ihre eigenen Schlüssel über BYOK, um direkt an die Anbieter zu zahlen.
|
||||
|
||||
<Tabs items={['Firecrawl', 'Exa', 'Serper', 'Perplexity', 'Linkup', 'Parallel AI', 'Jina AI', 'Google Cloud', 'Brandfetch']}>
|
||||
<Tab>
|
||||
**Firecrawl** - Web-Scraping, Crawling, Suche und Extraktion
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Scrape | $0.001 per credit used |
|
||||
| Crawl | $0.001 per credit used |
|
||||
| Search | $0.001 per credit used |
|
||||
| Extract | $0.001 per credit used |
|
||||
| Map | $0.001 per credit used |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Exa** - KI-gestützte Suche und Recherche
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | Dynamic (returned by API) |
|
||||
| Get Contents | Dynamic (returned by API) |
|
||||
| Find Similar Links | Dynamic (returned by API) |
|
||||
| Answer | Dynamic (returned by API) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Serper** - Google-Such-API
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search (≤10 results) | $0.001 |
|
||||
| Search (>10 results) | $0.002 |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Perplexity** - KI-gestützter Chat und Websuche
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | $0.005 per request |
|
||||
| Chat | Token-based (varies by model) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Linkup** - Websuche und Inhaltsabruf
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Standard search | ~$0.006 |
|
||||
| Deep search | ~$0.055 |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Parallel AI** - Websuche, Extraktion und tiefgehende Recherche
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search (≤10 results) | $0.005 |
|
||||
| Search (>10 results) | $0.005 + $0.001 per additional result |
|
||||
| Extract | $0.001 per URL |
|
||||
| Deep Research | $0.005–$2.40 (varies by processor tier) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Jina AI** - Web-Lesen und Suche
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Read URL | $0.20 per 1M tokens |
|
||||
| Search | $0.20 per 1M tokens (minimum 10K tokens) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Google Cloud** - Translate, Maps, PageSpeed und Books APIs
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Translate / Detect | $0.00002 per character |
|
||||
| Maps (Geocode, Directions, Distance Matrix, Elevation, Timezone, Reverse Geocode, Geolocate, Validate Address) | $0.005 per request |
|
||||
| Maps (Snap to Roads) | $0.01 per request |
|
||||
| Maps (Place Details) | $0.017 per request |
|
||||
| Maps (Places Search) | $0.032 per request |
|
||||
| PageSpeed | Free |
|
||||
| Books (Search, Details) | Free |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Brandfetch** - Marken-Assets, Logos, Farben und Unternehmensinformationen
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | Free |
|
||||
| Get Brand | $0.04 per request |
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Bring Your Own Key (BYOK)
|
||||
|
||||
Sie können Ihre eigenen API-Schlüssel für gehostete Modelle (OpenAI, Anthropic, Google, Mistral) unter **Einstellungen → BYOK** verwenden, um Basispreise zu zahlen. Schlüssel werden verschlüsselt und gelten arbeitsbereichsweit.
|
||||
Sie können Ihre eigenen API-Schlüssel für unterstützte Anbieter (OpenAI, Anthropic, Google, Mistral, Fireworks, Firecrawl, Exa, Serper, Linkup, Parallel AI, Perplexity, Jina AI, Google Cloud, Brandfetch) unter **Einstellungen → BYOK** verwenden, um Basispreise zu zahlen. Schlüssel werden verschlüsselt und gelten arbeitsbereichsweit.
|
||||
|
||||
## Strategien zur Kostenoptimierung
|
||||
|
||||
|
||||
@@ -0,0 +1,9 @@
|
||||
{
|
||||
"pages": [
|
||||
"listPausedExecutions",
|
||||
"getPausedExecution",
|
||||
"getPausedExecutionByResumePath",
|
||||
"getPauseContext",
|
||||
"resumeExecution"
|
||||
]
|
||||
}
|
||||
@@ -10,6 +10,7 @@
|
||||
"typescript",
|
||||
"---Endpoints---",
|
||||
"(generated)/workflows",
|
||||
"(generated)/human-in-the-loop",
|
||||
"(generated)/logs",
|
||||
"(generated)/usage",
|
||||
"(generated)/audit-logs",
|
||||
|
||||
@@ -78,7 +78,7 @@ Defines the fields approvers fill in when responding. This data becomes availabl
|
||||
}
|
||||
```
|
||||
|
||||
Access resume data in downstream blocks using `<blockId.resumeInput.fieldName>`.
|
||||
Access resume data in downstream blocks using `<blockId.fieldName>`.
|
||||
|
||||
## Approval Methods
|
||||
|
||||
@@ -93,11 +93,12 @@ Access resume data in downstream blocks using `<blockId.resumeInput.fieldName>`.
|
||||
<Tab>
|
||||
### REST API
|
||||
|
||||
Programmatically resume workflows using the resume endpoint. The `contextId` is available from the block's `resumeEndpoint` output or from the paused execution detail.
|
||||
Programmatically resume workflows using the resume endpoint. The `contextId` is available from the block's `resumeEndpoint` output or from the `_resume` object in the paused execution response.
|
||||
|
||||
```bash
|
||||
POST /api/resume/{workflowId}/{executionId}/{contextId}
|
||||
Content-Type: application/json
|
||||
X-API-Key: your-api-key
|
||||
|
||||
{
|
||||
"input": {
|
||||
@@ -107,23 +108,56 @@ Access resume data in downstream blocks using `<blockId.resumeInput.fieldName>`.
|
||||
}
|
||||
```
|
||||
|
||||
The response includes a new `executionId` for the resumed execution:
|
||||
The resume endpoint automatically respects the execution mode used in the original execute call:
|
||||
|
||||
- **Sync mode** (default) — The response waits for the remaining workflow to complete and returns the full result:
|
||||
|
||||
```json
|
||||
{
|
||||
"status": "started",
|
||||
"success": true,
|
||||
"status": "completed",
|
||||
"executionId": "<resumeExecutionId>",
|
||||
"message": "Resume execution started."
|
||||
"output": { ... },
|
||||
"metadata": { "duration": 1234, "startTime": "...", "endTime": "..." }
|
||||
}
|
||||
```
|
||||
|
||||
To poll execution progress after resuming, connect to the SSE stream:
|
||||
If the resumed workflow hits another HITL block, the response returns `"status": "paused"` with new `_resume` URLs in the output.
|
||||
|
||||
```bash
|
||||
GET /api/workflows/{workflowId}/executions/{resumeExecutionId}/stream
|
||||
- **Stream mode** (`stream: true` on the original execute call) — The resume response streams SSE events with `selectedOutputs` chunks, just like the initial execution.
|
||||
|
||||
- **Async mode** (`X-Execution-Mode: async` on the original execute call) — The resume dispatches execution to a background worker and returns immediately with `202`, including a `jobId` and `statusUrl` for polling:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"async": true,
|
||||
"jobId": "<jobId>",
|
||||
"executionId": "<resumeExecutionId>",
|
||||
"message": "Resume execution queued",
|
||||
"statusUrl": "/api/jobs/<jobId>"
|
||||
}
|
||||
```
|
||||
|
||||
Build custom approval UIs or integrate with existing systems.
|
||||
#### Polling execution status
|
||||
|
||||
Poll the `statusUrl` from the async response to check when the resume completes:
|
||||
|
||||
```bash
|
||||
GET /api/jobs/{jobId}
|
||||
X-API-Key: your-api-key
|
||||
```
|
||||
|
||||
Returns job status and, when completed, the full workflow output.
|
||||
|
||||
To check on a paused execution's pause points and resume links:
|
||||
|
||||
```bash
|
||||
GET /api/resume/{workflowId}/{executionId}
|
||||
X-API-Key: your-api-key
|
||||
```
|
||||
|
||||
Returns the paused execution detail with all pause points, their statuses, and resume links. Returns `404` when the execution has completed and is no longer paused.
|
||||
</Tab>
|
||||
<Tab>
|
||||
### Webhook
|
||||
@@ -132,6 +166,53 @@ Access resume data in downstream blocks using `<blockId.resumeInput.fieldName>`.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## API Execute Behavior
|
||||
|
||||
When triggering a workflow via the execute API (`POST /api/workflows/{id}/execute`), HITL blocks cause the execution to pause and return the `_resume` data in the response:
|
||||
|
||||
<Tabs items={['Sync (JSON)', 'Stream (SSE)', 'Async']}>
|
||||
<Tab>
|
||||
The response includes the full pause data with resume URLs:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"executionId": "<executionId>",
|
||||
"output": {
|
||||
"data": {
|
||||
"operation": "human",
|
||||
"_resume": {
|
||||
"apiUrl": "/api/resume/{workflowId}/{executionId}/{contextId}",
|
||||
"uiUrl": "/resume/{workflowId}/{executionId}",
|
||||
"contextId": "<contextId>",
|
||||
"executionId": "<executionId>",
|
||||
"workflowId": "<workflowId>"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
</Tab>
|
||||
<Tab>
|
||||
Blocks before the HITL stream their `selectedOutputs` normally. When execution pauses, the final SSE event includes `status: "paused"` and the `_resume` data:
|
||||
|
||||
```
|
||||
data: {"blockId":"agent1","chunk":"streamed content..."}
|
||||
data: {"event":"final","data":{"success":true,"output":{...,"_resume":{...}},"status":"paused"}}
|
||||
data: "[DONE]"
|
||||
```
|
||||
|
||||
On resume, blocks after the HITL stream their `selectedOutputs` the same way.
|
||||
|
||||
<Callout type="info">
|
||||
HITL blocks are automatically excluded from the `selectedOutputs` dropdown since their data is always included in the pause response.
|
||||
</Callout>
|
||||
</Tab>
|
||||
<Tab>
|
||||
Returns `202` immediately. Use the polling endpoint to check when the execution pauses.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Common Use Cases
|
||||
|
||||
**Content Approval** - Review AI-generated content before publishing
|
||||
@@ -161,9 +242,9 @@ Agent (Generate) → Human in the Loop (QA) → Gmail (Send)
|
||||
**`response`** - Display data shown to the approver (json)
|
||||
**`submission`** - Form submission data from the approver (json)
|
||||
**`submittedAt`** - ISO timestamp when the workflow was resumed
|
||||
**`resumeInput.*`** - All fields defined in Resume Form become available after the workflow resumes
|
||||
**`<fieldName>`** - All fields defined in Resume Form become available at the top level after the workflow resumes
|
||||
|
||||
Access using `<blockId.resumeInput.fieldName>`.
|
||||
Access using `<blockId.fieldName>`.
|
||||
|
||||
## Example
|
||||
|
||||
@@ -187,7 +268,7 @@ Access using `<blockId.resumeInput.fieldName>`.
|
||||
**Downstream Usage:**
|
||||
```javascript
|
||||
// Condition block
|
||||
<approval1.resumeInput.approved> === true
|
||||
<approval1.approved> === true
|
||||
```
|
||||
The example below shows an approval portal as seen by an approver after the workflow is paused. Approvers can review the data and provide inputs as a part of the workflow resumption. The approval portal can be accessed directly via the unique URL, `<blockId.url>`.
|
||||
|
||||
@@ -204,7 +285,7 @@ The example below shows an approval portal as seen by an approver after the work
|
||||
<FAQ items={[
|
||||
{ question: "How long does the workflow stay paused?", answer: "The workflow pauses indefinitely until a human provides input through the approval portal, the REST API, or a webhook. There is no automatic timeout — it will wait until someone responds." },
|
||||
{ question: "What notification channels can I use to alert approvers?", answer: "You can configure notifications through Slack, Gmail, Microsoft Teams, SMS (via Twilio), or custom webhooks. Include the approval URL in your notification message so approvers can access the portal directly." },
|
||||
{ question: "How do I access the approver's input in downstream blocks?", answer: "Use the syntax <blockId.resumeInput.fieldName> to reference specific fields from the resume form. For example, if your block ID is 'approval1' and the form has an 'approved' field, use <approval1.resumeInput.approved>." },
|
||||
{ question: "How do I access the approver's input in downstream blocks?", answer: "Use the syntax <blockId.fieldName> to reference specific fields from the resume form. For example, if your block name is 'approval1' and the form has an 'approved' field, use <approval1.approved>." },
|
||||
{ question: "Can I chain multiple Human in the Loop blocks for multi-stage approvals?", answer: "Yes. You can place multiple Human in the Loop blocks in sequence to create multi-stage approval workflows. Each block pauses independently and can have its own notification configuration and resume form fields." },
|
||||
{ question: "Can I resume the workflow programmatically without the portal?", answer: "Yes. Each block exposes a resume API endpoint that you can call with a POST request containing the form data as JSON. This lets you build custom approval UIs or integrate with existing systems like Jira or ServiceNow." },
|
||||
{ question: "What outputs are available after the workflow resumes?", answer: "The block outputs include the approval portal URL, the resume API endpoint URL, the display data shown to the approver, the form submission data, the raw resume input, and an ISO timestamp of when the workflow was resumed." },
|
||||
|
||||
@@ -69,6 +69,9 @@ For self-hosted deployments, enterprise features can be enabled via environment
|
||||
| `ACCESS_CONTROL_ENABLED`, `NEXT_PUBLIC_ACCESS_CONTROL_ENABLED` | Permission groups for access restrictions |
|
||||
| `SSO_ENABLED`, `NEXT_PUBLIC_SSO_ENABLED` | Single Sign-On with SAML/OIDC |
|
||||
| `CREDENTIAL_SETS_ENABLED`, `NEXT_PUBLIC_CREDENTIAL_SETS_ENABLED` | Polling Groups for email triggers |
|
||||
| `INBOX_ENABLED`, `NEXT_PUBLIC_INBOX_ENABLED` | Sim Mailer inbox for outbound email |
|
||||
| `WHITELABELING_ENABLED`, `NEXT_PUBLIC_WHITELABELING_ENABLED` | Custom branding and white-labeling |
|
||||
| `AUDIT_LOGS_ENABLED`, `NEXT_PUBLIC_AUDIT_LOGS_ENABLED` | Audit logging for compliance and monitoring |
|
||||
| `DISABLE_INVITATIONS`, `NEXT_PUBLIC_DISABLE_INVITATIONS` | Globally disable workspace/organization invitations |
|
||||
|
||||
### Organization Management
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Video } from '@/components/ui/video'
|
||||
|
||||
Sim provides a comprehensive external API for querying workflow execution logs and setting up webhooks for real-time notifications when workflows complete.
|
||||
Sim provides a comprehensive external API for querying workflow run logs and setting up webhooks for real-time notifications when workflows complete.
|
||||
|
||||
## Authentication
|
||||
|
||||
@@ -21,7 +21,7 @@ You can generate API keys from the Sim platform and navigate to **Settings**, th
|
||||
|
||||
## Logs API
|
||||
|
||||
All API responses include information about your workflow execution limits and usage:
|
||||
All API responses include information about your workflow run limits and usage:
|
||||
|
||||
```json
|
||||
"limits": {
|
||||
@@ -48,11 +48,11 @@ All API responses include information about your workflow execution limits and u
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** Rate limits use a token bucket algorithm. `remaining` can exceed `requestsPerMinute` up to `maxBurst` when you haven't used your full allowance recently, allowing for burst traffic. The rate limits in the response body are for workflow executions. The rate limits for calling this API endpoint are in the response headers (`X-RateLimit-*`).
|
||||
**Note:** Rate limits use a token bucket algorithm. `remaining` can exceed `requestsPerMinute` up to `maxBurst` when you haven't used your full allowance recently, allowing for burst traffic. The rate limits in the response body are for workflow runs. The rate limits for calling this API endpoint are in the response headers (`X-RateLimit-*`).
|
||||
|
||||
### Query Logs
|
||||
|
||||
Query workflow execution logs with extensive filtering options.
|
||||
Query workflow run logs with extensive filtering options.
|
||||
|
||||
<Tabs items={['Request', 'Response']}>
|
||||
<Tab value="Request">
|
||||
@@ -70,11 +70,11 @@ Query workflow execution logs with extensive filtering options.
|
||||
- `level` - Filter by level: `info`, `error`
|
||||
- `startDate` - ISO timestamp for date range start
|
||||
- `endDate` - ISO timestamp for date range end
|
||||
- `executionId` - Exact execution ID match
|
||||
- `minDurationMs` - Minimum execution duration in milliseconds
|
||||
- `maxDurationMs` - Maximum execution duration in milliseconds
|
||||
- `minCost` - Minimum execution cost
|
||||
- `maxCost` - Maximum execution cost
|
||||
- `executionId` - Exact run ID match
|
||||
- `minDurationMs` - Minimum run duration in milliseconds
|
||||
- `maxDurationMs` - Maximum run duration in milliseconds
|
||||
- `minCost` - Minimum run cost
|
||||
- `maxCost` - Maximum run cost
|
||||
- `model` - Filter by AI model used
|
||||
|
||||
**Pagination:**
|
||||
@@ -213,9 +213,9 @@ Retrieve detailed information about a specific log entry.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### Get Execution Details
|
||||
### Get Run Details
|
||||
|
||||
Retrieve execution details including the workflow state snapshot.
|
||||
Retrieve run details including the workflow state snapshot.
|
||||
|
||||
<Tabs items={['Request', 'Response']}>
|
||||
<Tab value="Request">
|
||||
@@ -248,7 +248,7 @@ Retrieve execution details including the workflow state snapshot.
|
||||
|
||||
## Notifications
|
||||
|
||||
Get real-time notifications when workflow executions complete via webhook, email, or Slack. Notifications are configured at the workspace level from the Logs page.
|
||||
Get real-time notifications when workflow runs complete via webhook, email, or Slack. Notifications are configured at the workspace level from the Logs page.
|
||||
|
||||
### Configuration
|
||||
|
||||
@@ -256,7 +256,7 @@ Configure notifications from the Logs page by clicking the menu button and selec
|
||||
|
||||
**Notification Channels:**
|
||||
- **Webhook**: Send HTTP POST requests to your endpoint
|
||||
- **Email**: Receive email notifications with execution details
|
||||
- **Email**: Receive email notifications with run details
|
||||
- **Slack**: Post messages to a Slack channel
|
||||
|
||||
**Workflow Selection:**
|
||||
@@ -269,38 +269,38 @@ Configure notifications from the Logs page by clicking the menu button and selec
|
||||
|
||||
**Optional Data:**
|
||||
- `includeFinalOutput`: Include the workflow's final output
|
||||
- `includeTraceSpans`: Include detailed execution trace spans
|
||||
- `includeTraceSpans`: Include detailed trace spans
|
||||
- `includeRateLimits`: Include rate limit information (sync/async limits and remaining)
|
||||
- `includeUsageData`: Include billing period usage and limits
|
||||
|
||||
### Alert Rules
|
||||
|
||||
Instead of receiving notifications for every execution, configure alert rules to be notified only when issues are detected:
|
||||
Instead of receiving notifications for every run, configure alert rules to be notified only when issues are detected:
|
||||
|
||||
**Consecutive Failures**
|
||||
- Alert after X consecutive failed executions (e.g., 3 failures in a row)
|
||||
- Resets when an execution succeeds
|
||||
- Alert after X consecutive failed runs (e.g., 3 failures in a row)
|
||||
- Resets when a run succeeds
|
||||
|
||||
**Failure Rate**
|
||||
- Alert when failure rate exceeds X% over the last Y hours
|
||||
- Requires minimum 5 executions in the window
|
||||
- Requires minimum 5 runs in the window
|
||||
- Only triggers after the full time window has elapsed
|
||||
|
||||
**Latency Threshold**
|
||||
- Alert when any execution takes longer than X seconds
|
||||
- Alert when any run takes longer than X seconds
|
||||
- Useful for catching slow or hanging workflows
|
||||
|
||||
**Latency Spike**
|
||||
- Alert when execution is X% slower than the average
|
||||
- Alert when a run is X% slower than the average
|
||||
- Compares against the average duration over the configured time window
|
||||
- Requires minimum 5 executions to establish baseline
|
||||
- Requires minimum 5 runs to establish baseline
|
||||
|
||||
**Cost Threshold**
|
||||
- Alert when a single execution costs more than $X
|
||||
- Alert when a single run costs more than $X
|
||||
- Useful for catching expensive LLM calls
|
||||
|
||||
**No Activity**
|
||||
- Alert when no executions occur within X hours
|
||||
- Alert when no runs occur within X hours
|
||||
- Useful for monitoring scheduled workflows that should run regularly
|
||||
|
||||
**Error Count**
|
||||
@@ -317,7 +317,7 @@ For webhooks, additional options are available:
|
||||
|
||||
### Payload Structure
|
||||
|
||||
When a workflow execution completes, Sim sends the following payload (via webhook POST, email, or Slack):
|
||||
When a workflow run completes, Sim sends the following payload (via webhook POST, email, or Slack):
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -456,7 +456,7 @@ Failed webhook deliveries are retried with exponential backoff and jitter:
|
||||
- Deliveries timeout after 30 seconds
|
||||
|
||||
<Callout type="info">
|
||||
Webhook deliveries are processed asynchronously and don't affect workflow execution performance.
|
||||
Webhook deliveries are processed asynchronously and don't affect workflow run performance.
|
||||
</Callout>
|
||||
|
||||
## Best Practices
|
||||
@@ -596,11 +596,11 @@ app.listen(3000, () => {
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "How do I trigger async execution via the API?", answer: "Set the X-Execution-Mode header to 'async' on your POST request to /api/workflows/{id}/execute. The API returns a 202 response with a jobId, executionId, and a statusUrl you can poll to check when the job completes. Async mode does not support draft state, workflow overrides, or selective output options." },
|
||||
{ question: "How do I trigger an async run via the API?", answer: "Set the X-Execution-Mode header to 'async' on your POST request to /api/workflows/{id}/execute. The API returns a 202 response with a jobId, executionId, and a statusUrl you can poll to check when the job completes. Async mode does not support draft state, workflow overrides, or selective output options." },
|
||||
{ question: "What authentication methods does the API support?", answer: "The API supports two authentication methods: API keys passed in the x-api-key header, and session-based authentication for logged-in users. API keys can be generated from Settings > Sim Keys in the platform. Workflows with public API access enabled can also be called without authentication." },
|
||||
{ question: "How does the webhook retry policy work?", answer: "Failed webhook deliveries are retried up to 5 times with exponential backoff: 5 seconds, 15 seconds, 1 minute, 3 minutes, and 10 minutes, plus up to 10% jitter. Only HTTP 5xx and 429 responses trigger retries. Each delivery times out after 30 seconds." },
|
||||
{ question: "What rate limits apply to the Logs API?", answer: "Rate limits use a token bucket algorithm. Free plans get 30 requests/minute with 60 burst capacity, Pro gets 100/200, Team gets 200/400, and Enterprise gets 500/1000. These are separate from workflow execution rate limits, which are shown in the response body." },
|
||||
{ question: "What rate limits apply to the Logs API?", answer: "Rate limits use a token bucket algorithm. Free plans get 30 requests/minute with 60 burst capacity, Pro gets 100/200, Team gets 200/400, and Enterprise gets 500/1000. These are separate from workflow run rate limits, which are shown in the response body." },
|
||||
{ question: "How do I verify that a webhook is from Sim?", answer: "Configure a webhook secret when setting up notifications. Sim signs each delivery with HMAC-SHA256 using the format 't={timestamp},v1={signature}' in the sim-signature header. Compute the HMAC of '{timestamp}.{body}' with your secret and compare it to the signature value." },
|
||||
{ question: "What alert rules are available for notifications?", answer: "You can configure alerts for consecutive failures, failure rate thresholds, latency thresholds, latency spikes (percentage above average), cost thresholds, no-activity periods, and error counts within a time window. All alert types include a 1-hour cooldown to prevent notification spam." },
|
||||
{ question: "Can I filter which executions trigger notifications?", answer: "Yes. You can filter notifications by specific workflows (or select all), log level (info or error), and trigger type (api, webhook, schedule, manual, chat). You can also choose whether to include final output, trace spans, rate limits, and usage data in the notification payload." },
|
||||
{ question: "Can I filter which runs trigger notifications?", answer: "Yes. You can filter notifications by specific workflows (or select all), log level (info or error), and trigger type (api, webhook, schedule, manual, chat). You can also choose whether to include final output, trace spans, rate limits, and usage data in the notification payload." },
|
||||
]} />
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Image } from '@/components/ui/image'
|
||||
|
||||
Understanding how workflows execute in Sim is key to building efficient and reliable automations. The execution engine automatically handles dependencies, concurrency, and data flow to ensure your workflows run smoothly and predictably.
|
||||
Understanding how workflows run in Sim is key to building efficient and reliable automations. The execution engine automatically handles dependencies, concurrency, and data flow to ensure your workflows run smoothly and predictably.
|
||||
|
||||
## How Workflows Execute
|
||||
|
||||
@@ -14,7 +14,7 @@ Sim's execution engine processes workflows intelligently by analyzing dependenci
|
||||
|
||||
### Concurrent Execution by Default
|
||||
|
||||
Multiple blocks run concurrently when they don't depend on each other. This parallel execution dramatically improves performance without requiring manual configuration.
|
||||
Multiple blocks run concurrently when they don't depend on each other. This dramatically improves performance without requiring manual configuration.
|
||||
|
||||
<Image
|
||||
src="/static/execution/concurrency.png"
|
||||
@@ -49,7 +49,7 @@ Workflows can branch in multiple directions using routing blocks. The execution
|
||||
height={500}
|
||||
/>
|
||||
|
||||
This workflow demonstrates how execution can follow different paths based on conditions or AI decisions, with each path executing independently.
|
||||
This workflow demonstrates how a run can follow different paths based on conditions or AI decisions, with each path running independently.
|
||||
|
||||
## Block Types
|
||||
|
||||
@@ -57,7 +57,7 @@ Sim provides different types of blocks that serve specific purposes in your work
|
||||
|
||||
<Cards>
|
||||
<Card title="Triggers" href="/triggers">
|
||||
**Starter blocks** initiate workflows and **Webhook blocks** respond to external events. Every workflow needs a trigger to begin execution.
|
||||
**Starter blocks** initiate workflows and **Webhook blocks** respond to external events. Every workflow needs a trigger to begin a run.
|
||||
</Card>
|
||||
|
||||
<Card title="Processing Blocks" href="/blocks">
|
||||
@@ -73,37 +73,37 @@ Sim provides different types of blocks that serve specific purposes in your work
|
||||
</Card>
|
||||
</Cards>
|
||||
|
||||
All blocks execute automatically based on their dependencies - you don't need to manually manage execution order or timing.
|
||||
All blocks run automatically based on their dependencies - you don't need to manually manage run order or timing.
|
||||
|
||||
## Execution Monitoring
|
||||
## Run Monitoring
|
||||
|
||||
When workflows run, Sim provides real-time visibility into the execution process:
|
||||
When workflows run, Sim provides real-time visibility into the process:
|
||||
|
||||
- **Live Block States**: See which blocks are currently executing, completed, or failed
|
||||
- **Execution Logs**: Detailed logs appear in real-time showing inputs, outputs, and any errors
|
||||
- **Performance Metrics**: Track execution time and costs for each block
|
||||
- **Path Visualization**: Understand which execution paths were taken through your workflow
|
||||
- **Live Block States**: See which blocks are currently running, completed, or failed
|
||||
- **Run Logs**: Detailed logs appear in real-time showing inputs, outputs, and any errors
|
||||
- **Performance Metrics**: Track run time and costs for each block
|
||||
- **Path Visualization**: Understand which paths were taken through your workflow
|
||||
|
||||
<Callout type="info">
|
||||
All execution details are captured and available for review even after workflows complete, helping with debugging and optimization.
|
||||
All run details are captured and available for review even after workflows complete, helping with debugging and optimization.
|
||||
</Callout>
|
||||
|
||||
## Key Execution Principles
|
||||
## Key Principles
|
||||
|
||||
Understanding these core principles will help you build better workflows:
|
||||
|
||||
1. **Dependency-Based Execution**: Blocks only run when all their dependencies have completed
|
||||
2. **Automatic Parallelization**: Independent blocks run concurrently without configuration
|
||||
3. **Smart Data Flow**: Outputs flow automatically to connected blocks
|
||||
4. **Error Handling**: Failed blocks stop their execution path but don't affect independent paths
|
||||
5. **Response Blocks as Exit Points**: When a Response block executes, the entire workflow stops and the API response is sent immediately. Multiple Response blocks can exist on different branches — the first one to execute wins
|
||||
6. **State Persistence**: All block outputs and execution details are preserved for debugging
|
||||
7. **Cycle Protection**: Workflows that call other workflows (via Workflow blocks, MCP tools, or API blocks) are tracked with a call chain. If the chain exceeds 25 hops, execution is stopped to prevent infinite loops
|
||||
4. **Error Handling**: Failed blocks stop their run path but don't affect independent paths
|
||||
5. **Response Blocks as Exit Points**: When a Response block runs, the entire workflow stops and the API response is sent immediately. Multiple Response blocks can exist on different branches — the first one to run wins
|
||||
6. **State Persistence**: All block outputs and run details are preserved for debugging
|
||||
7. **Cycle Protection**: Workflows that call other workflows (via Workflow blocks, MCP tools, or API blocks) are tracked with a call chain. If the chain exceeds 25 hops, the run is stopped to prevent infinite loops
|
||||
|
||||
## Next Steps
|
||||
|
||||
Now that you understand execution basics, explore:
|
||||
- **[Block Types](/blocks)** - Learn about specific block capabilities
|
||||
- **[Logging](/execution/logging)** - Monitor workflow executions and debug issues
|
||||
- **[Logging](/execution/logging)** - Monitor workflow runs and debug issues
|
||||
- **[Cost Calculation](/execution/costs)** - Understand and optimize workflow costs
|
||||
- **[Triggers](/triggers)** - Set up different ways to run your workflows
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Image } from '@/components/ui/image'
|
||||
|
||||
Sim automatically calculates costs for all workflow executions, providing transparent pricing based on AI model usage and execution charges. Understanding these costs helps you optimize workflows and manage your budget effectively.
|
||||
Sim automatically calculates costs for all workflow runs, providing transparent pricing based on AI model usage and run charges. Understanding these costs helps you optimize workflows and manage your budget effectively.
|
||||
|
||||
## Credits
|
||||
|
||||
@@ -16,18 +16,18 @@ All plan limits, usage meters, and billing thresholds are displayed in credits t
|
||||
|
||||
## How Costs Are Calculated
|
||||
|
||||
Every workflow execution includes two cost components:
|
||||
Every workflow run includes two cost components:
|
||||
|
||||
**Base Execution Charge**: 1 credit ($0.005) per execution
|
||||
**Base Run Charge**: 1 credit ($0.005) per run
|
||||
|
||||
**AI Model Usage**: Variable cost based on token consumption
|
||||
```javascript
|
||||
modelCost = (inputTokens × inputPrice + outputTokens × outputPrice) / 1,000,000
|
||||
totalCredits = baseExecutionCharge + modelCost × 200
|
||||
totalCredits = baseRunCharge + modelCost × 200
|
||||
```
|
||||
|
||||
<Callout type="info">
|
||||
AI model prices are per million tokens. The calculation divides by 1,000,000 to get the actual cost. Workflows without AI blocks only incur the base execution charge.
|
||||
AI model prices are per million tokens. The calculation divides by 1,000,000 to get the actual cost. Workflows without AI blocks only incur the base run charge.
|
||||
</Callout>
|
||||
|
||||
## Model Breakdown in Logs
|
||||
@@ -48,7 +48,7 @@ The model breakdown shows:
|
||||
- **Token Usage**: Input and output token counts for each model
|
||||
- **Cost Breakdown**: Individual costs per model and operation
|
||||
- **Model Distribution**: Which models were used and how many times
|
||||
- **Total Cost**: Aggregate cost for the entire workflow execution
|
||||
- **Total Cost**: Aggregate cost for the entire workflow run
|
||||
|
||||
## Pricing Options
|
||||
|
||||
@@ -110,9 +110,108 @@ The model breakdown shows:
|
||||
Pricing shown reflects rates as of September 10, 2025. Check provider documentation for current pricing.
|
||||
</Callout>
|
||||
|
||||
## Hosted Tool Pricing
|
||||
|
||||
When workflows use tool blocks with Sim's hosted API keys, costs are charged per operation. Use your own keys via BYOK to pay providers directly instead.
|
||||
|
||||
<Tabs items={['Firecrawl', 'Exa', 'Serper', 'Perplexity', 'Linkup', 'Parallel AI', 'Jina AI', 'Google Cloud', 'Brandfetch']}>
|
||||
<Tab>
|
||||
**Firecrawl** - Web scraping, crawling, search, and extraction
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Scrape | $0.001 per credit used |
|
||||
| Crawl | $0.001 per credit used |
|
||||
| Search | $0.001 per credit used |
|
||||
| Extract | $0.001 per credit used |
|
||||
| Map | $0.001 per credit used |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Exa** - AI-powered search and research
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | Dynamic (returned by API) |
|
||||
| Get Contents | Dynamic (returned by API) |
|
||||
| Find Similar Links | Dynamic (returned by API) |
|
||||
| Answer | Dynamic (returned by API) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Serper** - Google search API
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search (≤10 results) | $0.001 |
|
||||
| Search (>10 results) | $0.002 |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Perplexity** - AI-powered chat and web search
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | $0.005 per request |
|
||||
| Chat | Token-based (varies by model) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Linkup** - Web search and content retrieval
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Standard search | ~$0.006 |
|
||||
| Deep search | ~$0.055 |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Parallel AI** - Web search, extraction, and deep research
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search (≤10 results) | $0.005 |
|
||||
| Search (>10 results) | $0.005 + $0.001 per additional result |
|
||||
| Extract | $0.001 per URL |
|
||||
| Deep Research | $0.005–$2.40 (varies by processor tier) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Jina AI** - Web reading and search
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Read URL | $0.20 per 1M tokens |
|
||||
| Search | $0.20 per 1M tokens (minimum 10K tokens) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Google Cloud** - Translate, Maps, PageSpeed, and Books APIs
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Translate / Detect | $0.00002 per character |
|
||||
| Maps (Geocode, Directions, Distance Matrix, Elevation, Timezone, Reverse Geocode, Geolocate, Validate Address) | $0.005 per request |
|
||||
| Maps (Snap to Roads) | $0.01 per request |
|
||||
| Maps (Place Details) | $0.017 per request |
|
||||
| Maps (Places Search) | $0.032 per request |
|
||||
| PageSpeed | Free |
|
||||
| Books (Search, Details) | Free |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Brandfetch** - Brand assets, logos, colors, and company info
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | Free |
|
||||
| Get Brand | $0.04 per request |
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Bring Your Own Key (BYOK)
|
||||
|
||||
Use your own API keys for AI model providers instead of Sim's hosted keys to pay base prices with no markup.
|
||||
Use your own API keys for supported providers instead of Sim's hosted keys to pay base prices with no markup.
|
||||
|
||||
### Supported Providers
|
||||
|
||||
@@ -121,7 +220,17 @@ Use your own API keys for AI model providers instead of Sim's hosted keys to pay
|
||||
| OpenAI | Knowledge Base embeddings, Agent block |
|
||||
| Anthropic | Agent block |
|
||||
| Google | Agent block |
|
||||
| Mistral | Knowledge Base OCR |
|
||||
| Mistral | Knowledge Base OCR, Agent block |
|
||||
| Fireworks | Agent block |
|
||||
| Firecrawl | Web scraping, crawling, search, and extraction |
|
||||
| Exa | AI-powered search and research |
|
||||
| Serper | Google search API |
|
||||
| Linkup | Web search and content retrieval |
|
||||
| Parallel AI | Web search, extraction, and deep research |
|
||||
| Perplexity | AI-powered chat and web search |
|
||||
| Jina AI | Web reading and search |
|
||||
| Google Cloud | Translate, Maps, PageSpeed, and Books APIs |
|
||||
| Brandfetch | Brand assets, logos, colors, and company info |
|
||||
|
||||
### Setup
|
||||
|
||||
@@ -152,20 +261,20 @@ Each voice session is billed when it starts. In deployed chat voice mode, each c
|
||||
|
||||
## Plans
|
||||
|
||||
Sim has two paid plan tiers — **Pro** and **Max**. Either can be used individually or with a team. Team plans pool credits across all seats in the organization.
|
||||
Sim has two paid plan tiers - **Pro** and **Max**. Either can be used individually or with a team. Team plans pool credits across all seats in the organization.
|
||||
|
||||
| Plan | Price | Credits Included | Daily Refresh |
|
||||
|------|-------|------------------|---------------|
|
||||
| **Community** | $0 | 1,000 (one-time) | — |
|
||||
| **Community** | $0 | 1,000 (one-time) | - |
|
||||
| **Pro** | $25/mo | 6,000/mo | +50/day |
|
||||
| **Max** | $100/mo | 25,000/mo | +200/day |
|
||||
| **Enterprise** | Custom | Custom | — |
|
||||
| **Enterprise** | Custom | Custom | - |
|
||||
|
||||
To use Pro or Max with a team, select **Get For Team** in subscription settings and choose the tier and number of seats. Credits are pooled across the organization at the per-seat rate (e.g. Max for Teams with 3 seats = 75,000 credits/mo pooled).
|
||||
|
||||
### Daily Refresh Credits
|
||||
|
||||
Paid plans include a small daily credit allowance that does not count toward your plan limit. Each day, usage up to the daily refresh amount is excluded from billable usage. This allowance resets every 24 hours and does not carry over — use it or lose it.
|
||||
Paid plans include a small daily credit allowance that does not count toward your plan limit. Each day, usage up to the daily refresh amount is excluded from billable usage. This allowance resets every 24 hours and does not carry over - use it or lose it.
|
||||
|
||||
| Plan | Daily Refresh |
|
||||
|------|---------------|
|
||||
@@ -210,17 +319,6 @@ By default, your usage is capped at the credits included in your plan. To allow
|
||||
|
||||
Max (individual) shares the same rate limits as team plans. Team plans (Pro or Max for Teams) use the Max-tier rate limits.
|
||||
|
||||
### Concurrent Execution Limits
|
||||
|
||||
| Plan | Concurrent Executions |
|
||||
|------|----------------------|
|
||||
| **Free** | 5 |
|
||||
| **Pro** | 50 |
|
||||
| **Max / Team** | 200 |
|
||||
| **Enterprise** | 200 (customizable) |
|
||||
|
||||
Concurrent execution limits control how many workflow executions can run simultaneously within a workspace. When the limit is reached, new executions are queued and admitted as running executions complete. Manual runs from the editor are not subject to these limits.
|
||||
|
||||
### File Storage
|
||||
|
||||
| Plan | Storage |
|
||||
@@ -232,18 +330,18 @@ Concurrent execution limits control how many workflow executions can run simulta
|
||||
|
||||
Team plans (Pro or Max for Teams) use 500 GB.
|
||||
|
||||
### Execution Time Limits
|
||||
### Run Time Limits
|
||||
|
||||
| Plan | Sync | Async |
|
||||
|------|------|-------|
|
||||
| **Free** | 5 minutes | 90 minutes |
|
||||
| **Pro / Max / Team / Enterprise** | 50 minutes | 90 minutes |
|
||||
|
||||
**Sync executions** run immediately and return results directly. These are triggered via the API with `async: false` (default) or through the UI.
|
||||
**Async executions** (triggered via API with `async: true`, webhooks, or schedules) run in the background.
|
||||
**Sync runs** complete immediately and return results directly. These are triggered via the API with `async: false` (default) or through the UI.
|
||||
**Async runs** (triggered via API with `async: true`, webhooks, or schedules) run in the background.
|
||||
|
||||
<Callout type="info">
|
||||
If a workflow exceeds its time limit, it will be terminated and marked as failed with a timeout error. Design long-running workflows to use async execution or break them into smaller workflows.
|
||||
If a workflow exceeds its time limit, it will be terminated and marked as failed with a timeout error. Design long-running workflows to use async runs or break them into smaller workflows.
|
||||
</Callout>
|
||||
|
||||
## Billing Model
|
||||
@@ -252,7 +350,7 @@ Sim uses a **base subscription + overage** billing model:
|
||||
|
||||
### How It Works
|
||||
|
||||
**Pro Plan ($25/month — 6,000 credits):**
|
||||
**Pro Plan ($25/month - 6,000 credits):**
|
||||
- Monthly subscription includes 6,000 credits of usage
|
||||
- Usage under 6,000 credits → No additional charges
|
||||
- Usage over 6,000 credits (with on-demand enabled) → Pay the overage at month end
|
||||
@@ -354,18 +452,18 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
|
||||
## Next Steps
|
||||
|
||||
- Review your current usage in [Settings → Subscription](https://sim.ai/settings/subscription)
|
||||
- Learn about [Logging](/execution/logging) to track execution details
|
||||
- Learn about [Logging](/execution/logging) to track run details
|
||||
- Explore the [External API](/execution/api) for programmatic cost monitoring
|
||||
- Check out [workflow optimization techniques](/blocks) to reduce costs
|
||||
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "How much does a single workflow execution cost?", answer: "Every execution incurs a base charge of 1 credit ($0.005). On top of that, any AI model usage is billed based on token consumption. Workflows that do not use AI blocks only pay the base execution charge." },
|
||||
{ question: "How much does a single workflow run cost?", answer: "Every run incurs a base charge of 1 credit ($0.005). On top of that, any AI model usage is billed based on token consumption. Workflows that do not use AI blocks only pay the base run charge." },
|
||||
{ question: "What is the credit-to-dollar conversion rate?", answer: "1 credit equals $0.005. All plan limits, usage meters, and billing thresholds in the Sim UI are displayed in credits." },
|
||||
{ question: "Do unused daily refresh credits carry over?", answer: "No. Daily refresh credits reset every 24 hours and do not accumulate. If you do not use them within the day, they are lost." },
|
||||
{ question: "What happens when I exceed my plan's credit limit?", answer: "By default, your usage is capped at your plan's included credits and executions will stop. If you enable on-demand billing or manually raise your usage limit in Settings, you can continue running workflows and pay for the overage at the end of the billing period." },
|
||||
{ question: "What happens when I exceed my plan's credit limit?", answer: "By default, your usage is capped at your plan's included credits and runs will stop. If you enable on-demand billing or manually raise your usage limit in Settings, you can continue running workflows and pay for the overage at the end of the billing period." },
|
||||
{ question: "How does the 1.1x hosted model multiplier work?", answer: "When you use Sim's hosted API keys (instead of bringing your own), a 1.1x multiplier is applied to the base model pricing for Agent blocks. This covers infrastructure and API management costs. You can avoid this multiplier by using your own API keys via the BYOK feature." },
|
||||
{ question: "Are there any free options for AI models?", answer: "Yes. If you run local models through Ollama or VLLM, there are no API costs for those model calls. You still pay the base execution charge of 1 credit per execution." },
|
||||
{ question: "Are there any free options for AI models?", answer: "Yes. If you run local models through Ollama or VLLM, there are no API costs for those model calls. You still pay the base run charge of 1 credit per run." },
|
||||
{ question: "When does threshold billing trigger?", answer: "When on-demand billing is enabled and your unbilled overage reaches $50, Sim automatically bills the full unbilled amount. This spreads large charges throughout the month instead of accumulating one large bill at period end." },
|
||||
]} />
|
||||
|
||||
@@ -156,7 +156,7 @@ Use `url` for direct downloads or `base64` for inline processing.
|
||||
- **Dropbox** - Dropbox file operations
|
||||
|
||||
<Callout type="info">
|
||||
Files are automatically available to downstream blocks. The execution engine handles all file transfer and format conversion.
|
||||
Files are automatically available to downstream blocks. The engine handles all file transfer and format conversion.
|
||||
</Callout>
|
||||
|
||||
## Best Practices
|
||||
@@ -165,15 +165,15 @@ Use `url` for direct downloads or `base64` for inline processing.
|
||||
|
||||
2. **Check file types** - Ensure the file type matches what the receiving block expects. The Vision block needs images, the File block handles documents.
|
||||
|
||||
3. **Consider file size** - Large files increase execution time. For very large files, consider using storage blocks (S3, Supabase) for intermediate storage.
|
||||
3. **Consider file size** - Large files increase run time. For very large files, consider using storage blocks (S3, Supabase) for intermediate storage.
|
||||
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "What is the maximum file size for uploads?", answer: "The maximum file size for files processed during workflow execution is 20 MB. Files exceeding this limit will be rejected with an error indicating the actual file size. For larger files, use storage blocks like S3 or Supabase for intermediate storage." },
|
||||
{ question: "What is the maximum file size for uploads?", answer: "The maximum file size for files processed during a workflow run is 20 MB. Files exceeding this limit will be rejected with an error indicating the actual file size. For larger files, use storage blocks like S3 or Supabase for intermediate storage." },
|
||||
{ question: "What file input formats are supported via the API?", answer: "When triggering a workflow via API, you can send files as base64-encoded data (using a data URI with the format 'data:{mime};base64,{data}') or as a URL pointing to a publicly accessible file. In both cases, include the file name and MIME type in the request." },
|
||||
{ question: "How are files passed between blocks internally?", answer: "Files are represented as standardized UserFile objects with name, url, base64, type, and size properties. Most blocks accept the full file object and extract what they need automatically, so you typically pass the entire object rather than individual properties." },
|
||||
{ question: "Which blocks can output files?", answer: "Gmail outputs email attachments, Slack outputs downloaded files, TTS generates audio files, Video Generator and Image Generator produce media files. Storage blocks like S3, Supabase, Google Drive, and Dropbox can also retrieve files for use in downstream blocks." },
|
||||
{ question: "Do I need to extract base64 or URL from file objects manually?", answer: "No. Most blocks accept the full file object and handle the format conversion automatically. Simply pass the entire file reference (e.g., <gmail.attachments[0]>) and the receiving block will extract the data it needs." },
|
||||
{ question: "How do file fields work in the Start block's input format?", answer: "When you define a field with type 'file[]' in the Start block's input format, the execution engine automatically processes incoming file data (base64 or URL) and uploads it to storage, converting it into UserFile objects before the workflow runs." },
|
||||
{ question: "How do file fields work in the Start block's input format?", answer: "When you define a field with type 'file[]' in the Start block's input format, the engine automatically processes incoming file data (base64 or URL) and uploads it to storage, converting it into UserFile objects before the workflow runs." },
|
||||
]} />
|
||||
|
||||
@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Image } from '@/components/ui/image'
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
Sim's execution engine brings your workflows to life by processing blocks in the correct order, managing data flow, and handling errors gracefully, so you can understand exactly how workflows are executed in Sim.
|
||||
Sim's execution engine brings your workflows to life by processing blocks in the correct order, managing data flow, and handling errors gracefully, so you can understand exactly how workflows run in Sim.
|
||||
|
||||
<Callout type="info">
|
||||
Every workflow execution follows a deterministic path based on your block connections and logic, ensuring predictable and reliable results.
|
||||
Every workflow run follows a deterministic path based on your block connections and logic, ensuring predictable and reliable results.
|
||||
</Callout>
|
||||
|
||||
## Documentation Overview
|
||||
@@ -22,33 +22,33 @@ Sim's execution engine brings your workflows to life by processing blocks in the
|
||||
</Card>
|
||||
|
||||
<Card title="Logging" href="/execution/logging">
|
||||
Monitor workflow executions with comprehensive logging and real-time visibility
|
||||
Monitor workflow runs with comprehensive logging and real-time visibility
|
||||
</Card>
|
||||
|
||||
|
||||
<Card title="Cost Calculation" href="/execution/costs">
|
||||
Understand how workflow execution costs are calculated and optimized
|
||||
Understand how workflow run costs are calculated and optimized
|
||||
</Card>
|
||||
|
||||
|
||||
<Card title="External API" href="/execution/api">
|
||||
Access execution logs and set up webhooks programmatically via REST API
|
||||
Access run logs and set up webhooks programmatically via REST API
|
||||
</Card>
|
||||
</Cards>
|
||||
|
||||
## Key Concepts
|
||||
|
||||
### Topological Execution
|
||||
Blocks execute in dependency order, similar to how a spreadsheet recalculates cells. The execution engine automatically determines which blocks can run based on completed dependencies.
|
||||
Blocks run in dependency order, similar to how a spreadsheet recalculates cells. The execution engine automatically determines which blocks can run based on completed dependencies.
|
||||
|
||||
### Path Tracking
|
||||
The engine actively tracks execution paths through your workflow. Router and Condition blocks dynamically update these paths, ensuring only relevant blocks execute.
|
||||
The engine actively tracks run paths through your workflow. Router and Condition blocks dynamically update these paths, ensuring only relevant blocks run.
|
||||
|
||||
### Layer-Based Processing
|
||||
Instead of executing blocks one-by-one, the engine identifies layers of blocks that can run in parallel, optimizing performance for complex workflows.
|
||||
|
||||
### Execution Context
|
||||
Each workflow maintains a rich context during execution containing:
|
||||
### Run Context
|
||||
Each workflow maintains a rich context during a run containing:
|
||||
- Block outputs and states
|
||||
- Active execution paths
|
||||
- Active run paths
|
||||
- Loop and parallel iteration tracking
|
||||
- Environment variables
|
||||
- Routing decisions
|
||||
@@ -56,7 +56,7 @@ Each workflow maintains a rich context during execution containing:
|
||||
|
||||
## Deployment Snapshots
|
||||
|
||||
API, Chat, Schedule, and Webhook executions run against the workflow’s active deployment snapshot. Manual runs from the editor execute the current draft canvas state, letting you test changes before deploying. Publish a new deployment whenever you change the canvas so every trigger uses the updated version.
|
||||
API, Chat, Schedule, and Webhook runs use the workflow’s active deployment snapshot. Manual runs from the editor use the current draft canvas state, letting you test changes before deploying. Publish a new deployment whenever you change the canvas so every trigger uses the updated version.
|
||||
|
||||
<div className='flex justify-center my-6'>
|
||||
<Image
|
||||
@@ -70,9 +70,9 @@ API, Chat, Schedule, and Webhook executions run against the workflow’s active
|
||||
|
||||
The Deploy modal keeps a full version history—inspect any snapshot, compare it against your draft, and promote or roll back with one click when you need to restore a prior release.
|
||||
|
||||
## Programmatic Execution
|
||||
## Programmatic Access
|
||||
|
||||
Execute workflows from your applications using our official SDKs:
|
||||
Run workflows from your applications using our official SDKs:
|
||||
|
||||
```bash
|
||||
# TypeScript/JavaScript
|
||||
@@ -107,21 +107,21 @@ const result = await client.executeWorkflow('workflow-id', {
|
||||
- Use parallel execution for independent operations
|
||||
- Cache results with Memory blocks when appropriate
|
||||
|
||||
### Monitor Executions
|
||||
### Monitor Runs
|
||||
- Review logs regularly to understand performance patterns
|
||||
- Track costs for AI model usage
|
||||
- Use workflow snapshots to debug issues
|
||||
|
||||
## What's Next?
|
||||
|
||||
Start with [Execution Basics](/execution/basics) to understand how workflows run, then explore [Logging](/execution/logging) to monitor your executions and [Cost Calculation](/execution/costs) to optimize your spending.
|
||||
Start with [Execution Basics](/execution/basics) to understand how workflows run, then explore [Logging](/execution/logging) to monitor your runs and [Cost Calculation](/execution/costs) to optimize your spending.
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "What are the execution timeout limits?", answer: "Synchronous executions (API, chat) have a default timeout of 5 minutes on the Free plan and 50 minutes on Pro, Team, and Enterprise plans. Asynchronous executions (schedules, webhooks) allow up to 90 minutes across all plans. These limits are configurable by the platform administrator." },
|
||||
{ question: "What are the run timeout limits?", answer: "Synchronous runs (API, chat) have a default timeout of 5 minutes on the Free plan and 50 minutes on Pro, Team, and Enterprise plans. Asynchronous runs (schedules, webhooks) allow up to 90 minutes across all plans. These limits are configurable by the platform administrator." },
|
||||
{ question: "How does parallel execution work?", answer: "The engine identifies layers of blocks with no dependencies on each other and runs them concurrently. Within loops and parallel blocks, the engine supports up to 20 parallel branches by default and up to 1,000 loop iterations. Nested subflows (loops inside parallels, or vice versa) are supported up to 10 levels deep." },
|
||||
{ question: "Can I cancel a running execution?", answer: "Yes. The engine supports cancellation through an abort signal mechanism. When you cancel an execution, the engine checks for cancellation between block executions (at roughly 500ms intervals when using Redis-backed cancellation). Any in-progress blocks complete, and the execution returns with a cancelled status." },
|
||||
{ question: "What is a deployment snapshot?", answer: "A deployment snapshot is a frozen copy of your workflow at the time you click Deploy. Trigger-based executions (API, chat, schedule, webhook) run against the active snapshot, not your draft canvas. Manual runs from the editor execute the current draft canvas state, so you can test changes before deploying. You can view, compare, and roll back snapshots from the Deploy modal." },
|
||||
{ question: "How are execution costs calculated?", answer: "Costs are tracked per block based on the AI model used. Each block log records input tokens, output tokens, and the computed cost using the model's pricing. The total workflow cost is the sum of all block-level costs for that execution. You can review costs in the execution logs." },
|
||||
{ question: "What happens when a block fails during execution?", answer: "When a block throws an error, the engine captures the error message in the block log, finalizes any incomplete logs with timing data, and halts the execution with a failure status. If the failing block has an error output handle connected to another block, that error path is followed instead of halting entirely." },
|
||||
{ question: "Can I re-run part of a workflow without starting from scratch?", answer: "Yes. The run-from-block feature lets you select a specific block and re-execute from that point. The engine computes which upstream blocks need to be re-run (the dirty set) and preserves cached outputs from blocks that have not changed, so only the affected portion of the workflow re-executes." },
|
||||
{ question: "Can I cancel a running workflow?", answer: "Yes. The engine supports cancellation through an abort signal mechanism. When you cancel a run, the engine checks for cancellation between blocks (at roughly 500ms intervals when using Redis-backed cancellation). Any in-progress blocks complete, and the run returns with a cancelled status." },
|
||||
{ question: "What is a deployment snapshot?", answer: "A deployment snapshot is a frozen copy of your workflow at the time you click Deploy. Trigger-based runs (API, chat, schedule, webhook) use the active snapshot, not your draft canvas. Manual runs from the editor use the current draft canvas state, so you can test changes before deploying. You can view, compare, and roll back snapshots from the Deploy modal." },
|
||||
{ question: "How are run costs calculated?", answer: "Costs are tracked per block based on the AI model used. Each block log records input tokens, output tokens, and the computed cost using the model's pricing. The total workflow cost is the sum of all block-level costs for that run. You can review costs in the run logs." },
|
||||
{ question: "What happens when a block fails during a run?", answer: "When a block throws an error, the engine captures the error message in the block log, finalizes any incomplete logs with timing data, and halts the run with a failure status. If the failing block has an error output handle connected to another block, that error path is followed instead of halting entirely." },
|
||||
{ question: "Can I re-run part of a workflow without starting from scratch?", answer: "Yes. The run-from-block feature lets you select a specific block and re-run from that point. The engine computes which upstream blocks need to be re-run (the dirty set) and preserves cached outputs from blocks that have not changed, so only the affected portion of the workflow re-runs." },
|
||||
]} />
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Image } from '@/components/ui/image'
|
||||
|
||||
Sim provides comprehensive logging for all workflow executions, giving you complete visibility into how your workflows run, what data flows through them, and where issues might occur.
|
||||
Sim provides comprehensive logging for all workflow runs, giving you complete visibility into how your workflows run, what data flows through them, and where issues might occur.
|
||||
|
||||
## Logging System
|
||||
|
||||
@@ -14,7 +14,7 @@ Sim offers two complementary logging interfaces to match different workflows and
|
||||
|
||||
### Real-Time Console
|
||||
|
||||
During manual or chat workflow execution, logs appear in real-time in the Console panel on the right side of the workflow editor:
|
||||
During manual or chat workflow runs, logs appear in real-time in the Console panel on the right side of the workflow editor:
|
||||
|
||||
<div className="flex justify-center">
|
||||
<Image
|
||||
@@ -27,14 +27,14 @@ During manual or chat workflow execution, logs appear in real-time in the Consol
|
||||
</div>
|
||||
|
||||
The console shows:
|
||||
- Block execution progress with active block highlighting
|
||||
- Block progress with active block highlighting
|
||||
- Real-time outputs as blocks complete
|
||||
- Execution timing for each block
|
||||
- Timing for each block
|
||||
- Success/error status indicators
|
||||
|
||||
### Logs Page
|
||||
|
||||
All workflow executions—whether triggered manually, via API, Chat, Schedule, or Webhook—are logged to the dedicated Logs page:
|
||||
All workflow runs—whether triggered manually, via API, Chat, Schedule, or Webhook—are logged to the dedicated Logs page:
|
||||
|
||||
<div className="flex justify-center">
|
||||
<Image
|
||||
@@ -72,7 +72,7 @@ View the complete data flow for each block with tabs to switch between:
|
||||
|
||||
<Tabs items={['Output', 'Input']}>
|
||||
<Tab>
|
||||
**Output Tab** shows the block's execution result:
|
||||
**Output Tab** shows the block's result:
|
||||
- Structured data with JSON formatting
|
||||
- Markdown rendering for AI-generated content
|
||||
- Copy button for easy data extraction
|
||||
@@ -87,17 +87,17 @@ View the complete data flow for each block with tabs to switch between:
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### Execution Timeline
|
||||
### Run Timeline
|
||||
|
||||
For workflow-level logs, view detailed execution metrics:
|
||||
For workflow-level logs, view detailed run metrics:
|
||||
- Start and end timestamps
|
||||
- Total workflow duration
|
||||
- Individual block execution times
|
||||
- Individual block run times
|
||||
- Performance bottleneck identification
|
||||
|
||||
## Workflow Snapshots
|
||||
|
||||
For any logged execution, click "View Snapshot" to see the exact workflow state at execution time:
|
||||
For any logged run, click "View Snapshot" to see the exact workflow state at the time of the run:
|
||||
|
||||
<div className="flex justify-center">
|
||||
<Image
|
||||
@@ -111,12 +111,12 @@ For any logged execution, click "View Snapshot" to see the exact workflow state
|
||||
|
||||
The snapshot provides:
|
||||
- Frozen canvas showing the workflow structure
|
||||
- Block states and connections as they were during execution
|
||||
- Block states and connections as they were during the run
|
||||
- Click any block to see its inputs and outputs
|
||||
- Useful for debugging workflows that have since been modified
|
||||
|
||||
<Callout type="info">
|
||||
Workflow snapshots are only available for executions after the enhanced logging system was introduced. Older migrated logs show a "Logged State Not Found" message.
|
||||
Workflow snapshots are only available for runs after the enhanced logging system was introduced. Older migrated logs show a "Logged State Not Found" message.
|
||||
</Callout>
|
||||
|
||||
## Log Retention
|
||||
@@ -134,11 +134,11 @@ The snapshot provides:
|
||||
### For Production
|
||||
- Monitor the Logs page regularly for errors or performance issues
|
||||
- Set up filters to focus on specific workflows or time periods
|
||||
- Use live mode during critical deployments to watch executions in real-time
|
||||
- Use live mode during critical deployments to watch runs in real-time
|
||||
|
||||
### For Debugging
|
||||
- Always check the execution timeline to identify slow blocks
|
||||
- Compare inputs between working and failing executions
|
||||
- Always check the run timeline to identify slow blocks
|
||||
- Compare inputs between working and failing runs
|
||||
- Use workflow snapshots to see the exact state when issues occurred
|
||||
|
||||
## Next Steps
|
||||
@@ -150,10 +150,10 @@ The snapshot provides:
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "How long are execution logs retained?", answer: "Free plans retain logs for 7 days — after that, logs are archived to cloud storage and deleted from the database. Pro, Team, and Enterprise plans retain logs indefinitely with no automatic cleanup." },
|
||||
{ question: "What data is captured in each execution log?", answer: "Each log entry includes the execution ID, workflow ID, trigger type, start and end timestamps, total duration in milliseconds, cost breakdown (total cost, token counts, and per-model breakdowns), execution data with trace spans, final output, and any associated files. The log details sidebar lets you inspect block-level inputs and outputs." },
|
||||
{ question: "How long are run logs retained?", answer: "Free plans retain logs for 7 days — after that, logs are archived to cloud storage and deleted from the database. Pro, Team, and Enterprise plans retain logs indefinitely with no automatic cleanup." },
|
||||
{ question: "What data is captured in each run log?", answer: "Each log entry includes the run ID, workflow ID, trigger type, start and end timestamps, total duration in milliseconds, cost breakdown (total cost, token counts, and per-model breakdowns), run data with trace spans, final output, and any associated files. The log details sidebar lets you inspect block-level inputs and outputs." },
|
||||
{ question: "Are API keys visible in the logs?", answer: "No. API keys and credentials are automatically redacted in the log input tab for security. You can safely inspect block inputs without exposing sensitive values." },
|
||||
{ question: "What is a workflow snapshot?", answer: "A workflow snapshot is a frozen copy of the workflow's structure (blocks, connections, and configuration) captured at execution time. It lets you see the exact state of the workflow when a particular execution ran, which is useful for debugging workflows that have been modified since the execution." },
|
||||
{ question: "Can I access logs programmatically?", answer: "Yes. The External API provides endpoints to query logs with filtering by workflow, time range, trigger type, duration, cost, and model. You can also set up webhook, email, or Slack notifications for real-time alerts when executions complete." },
|
||||
{ question: "What does Live mode do on the Logs page?", answer: "Live mode automatically refreshes the Logs page in real-time so new execution entries appear as they are logged, without requiring manual page refreshes. This is useful during deployments or when monitoring active workflows." },
|
||||
{ question: "What is a workflow snapshot?", answer: "A workflow snapshot is a frozen copy of the workflow's structure (blocks, connections, and configuration) captured at the time of a run. It lets you see the exact state of the workflow when a particular run happened, which is useful for debugging workflows that have been modified since." },
|
||||
{ question: "Can I access logs programmatically?", answer: "Yes. The External API provides endpoints to query logs with filtering by workflow, time range, trigger type, duration, cost, and model. You can also set up webhook, email, or Slack notifications for real-time alerts when runs complete." },
|
||||
{ question: "What does Live mode do on the Logs page?", answer: "Live mode automatically refreshes the Logs page in real-time so new log entries appear as they are recorded, without requiring manual page refreshes. This is useful during deployments or when monitoring active workflows." },
|
||||
]} />
|
||||
@@ -170,17 +170,17 @@ Build, test, and refine workflows quickly with immediate feedback
|
||||
## Next Steps
|
||||
|
||||
<Cards>
|
||||
<Card title="Explore Workflow Blocks" href="/blocks">
|
||||
Discover API, Function, Condition, and other workflow blocks
|
||||
<Card title="Explore Blocks" href="/blocks">
|
||||
Discover API, Function, Condition, and other blocks
|
||||
</Card>
|
||||
<Card title="Browse Integrations" href="/tools">
|
||||
Connect 160+ services including Gmail, Slack, Notion, and more
|
||||
Connect 1,000+ services including Gmail, Slack, Notion, and more
|
||||
</Card>
|
||||
<Card title="Add Custom Logic" href="/blocks/function">
|
||||
Write custom functions for advanced data processing
|
||||
</Card>
|
||||
<Card title="Deploy Your Workflow" href="/execution">
|
||||
Make your workflow accessible via REST API or webhooks
|
||||
<Card title="Deploy Your Agent" href="/execution">
|
||||
Make your agent accessible via REST API or webhooks
|
||||
</Card>
|
||||
</Cards>
|
||||
|
||||
@@ -188,7 +188,7 @@ Build, test, and refine workflows quickly with immediate feedback
|
||||
|
||||
**Need detailed explanations?** Visit the [Blocks documentation](/blocks) for comprehensive guides on each component.
|
||||
|
||||
**Looking for integrations?** Explore the [Tools documentation](/tools) to see all 160+ available integrations.
|
||||
**Looking for integrations?** Explore the [Tools documentation](/tools) to see all 1,000+ available integrations.
|
||||
|
||||
**Ready to go live?** Learn about [Execution and Deployment](/execution) to make your workflows production-ready.
|
||||
|
||||
@@ -199,5 +199,5 @@ Build, test, and refine workflows quickly with immediate feedback
|
||||
{ question: "Can I use a different AI model instead of GPT-4o?", answer: "Yes. The Agent block supports models from OpenAI, Anthropic, Google, Groq, Cerebras, DeepSeek, Mistral, xAI, and more. You can select any available model from the dropdown. If you self-host, you can also use local models through Ollama." },
|
||||
{ question: "Can I import workflows from other tools?", answer: "Sim does not currently support importing workflows from other automation platforms. However, you can use the Copilot feature to describe what you want in natural language and have it build the workflow for you, which is often faster than manual recreation." },
|
||||
{ question: "What if my workflow does not produce the expected output?", answer: "Use the Chat panel to test iteratively and inspect outputs from each block. You can click the dropdown to view different block outputs and pinpoint where the issue is. The execution logs (accessible from the Logs tab) show detailed information about each step including token usage, costs, and any errors." },
|
||||
{ question: "Where do I go after completing this tutorial?", answer: "Explore the Blocks documentation to learn about Condition, Router, Function, and API blocks. Browse the Tools section to discover 160+ integrations you can add to your agents. When you are ready to deploy, check the Execution docs for REST API, webhook, and scheduled trigger options." },
|
||||
{ question: "Where do I go after completing this tutorial?", answer: "Explore the Blocks documentation to learn about Condition, Router, Function, and API blocks. Browse the Tools section to discover 1,000+ integrations you can add to your agents. When you are ready to deploy, check the Execution docs for REST API, webhook, and scheduled trigger options." },
|
||||
]} />
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
|
||||
# Sim Documentation
|
||||
|
||||
Welcome to Sim, a visual workflow builder for AI applications. Build powerful AI agents, automation workflows, and data processing pipelines by connecting blocks on a canvas.
|
||||
Welcome to Sim, the open-source AI workspace where teams build, deploy, and manage AI agents. Create agents visually with the workflow builder, conversationally through Mothership, or programmatically with the API — connected to 1,000+ integrations and every major LLM.
|
||||
|
||||
## Quick Start
|
||||
|
||||
@@ -15,13 +15,13 @@ Welcome to Sim, a visual workflow builder for AI applications. Build powerful AI
|
||||
Learn what you can build with Sim
|
||||
</Card>
|
||||
<Card title="Getting Started" href="/getting-started">
|
||||
Create your first workflow in 10 minutes
|
||||
Build your first agent in 10 minutes
|
||||
</Card>
|
||||
<Card title="Workflow Blocks" href="/blocks">
|
||||
<Card title="Blocks" href="/blocks">
|
||||
Learn about the building blocks
|
||||
</Card>
|
||||
<Card title="Tools & Integrations" href="/tools">
|
||||
Explore 80+ built-in integrations
|
||||
Explore 1,000+ integrations
|
||||
</Card>
|
||||
</Cards>
|
||||
|
||||
@@ -35,10 +35,10 @@ Welcome to Sim, a visual workflow builder for AI applications. Build powerful AI
|
||||
Work with workflow and environment variables
|
||||
</Card>
|
||||
<Card title="Execution" href="/execution">
|
||||
Monitor workflow runs and manage costs
|
||||
Monitor agent runs and manage costs
|
||||
</Card>
|
||||
<Card title="Triggers" href="/triggers">
|
||||
Start workflows via API, webhooks, or schedules
|
||||
Start agents via API, webhooks, or schedules
|
||||
</Card>
|
||||
</Cards>
|
||||
|
||||
|
||||
@@ -8,7 +8,7 @@ import { Image } from '@/components/ui/image'
|
||||
import { Video } from '@/components/ui/video'
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
Sim is an open-source visual workflow builder for building and deploying AI agent workflows. Design intelligent automation systems using a no-code interface—connect AI models, databases, APIs, and business tools through an intuitive drag-and-drop canvas. Whether you're building chatbots, automating business processes, or orchestrating complex data pipelines, Sim provides the tools to bring your AI workflows to life.
|
||||
Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Create agents visually with the workflow builder, conversationally through Mothership, or programmatically with the API. Connect AI models, databases, APIs, and 1,000+ business tools to build agents that automate real work — from chatbots and compliance agents to data pipelines and ITSM automation.
|
||||
|
||||
<div className="flex justify-center">
|
||||
<Image
|
||||
@@ -40,8 +40,8 @@ Orchestrate complex multi-service interactions. Create unified API endpoints, im
|
||||
|
||||
## How It Works
|
||||
|
||||
**Visual Workflow Editor**
|
||||
Design workflows using an intuitive drag-and-drop canvas. Connect AI models, databases, APIs, and third-party services through a visual, no-code interface that makes complex automation logic easy to understand and maintain.
|
||||
**Visual Workflow Builder**
|
||||
Design agent logic using an intuitive drag-and-drop canvas. Connect AI models, databases, APIs, and third-party services through a visual interface that makes complex automation easy to understand and maintain.
|
||||
|
||||
**Modular Block System**
|
||||
Build with specialized components: processing blocks (AI agents, API calls, custom functions), logic blocks (conditional branching, loops, routers), and output blocks (responses, evaluators). Each block handles a specific task in your workflow.
|
||||
@@ -58,7 +58,7 @@ Enable your team to build together. Multiple users can edit workflows simultaneo
|
||||
|
||||
## Integrations
|
||||
|
||||
Sim provides native integrations with 160+ services across multiple categories:
|
||||
Sim provides native integrations with 1,000+ services across multiple categories:
|
||||
|
||||
- **AI Models**: OpenAI, Anthropic, Google Gemini, Groq, Cerebras, local models via Ollama or VLLM
|
||||
- **Communication**: Gmail, Slack, Microsoft Teams, Telegram, WhatsApp
|
||||
@@ -100,17 +100,17 @@ Deploy on your own infrastructure using Docker Compose or Kubernetes. Maintain c
|
||||
|
||||
## Next Steps
|
||||
|
||||
Ready to build your first AI workflow?
|
||||
Ready to build your first AI agent?
|
||||
|
||||
<Cards>
|
||||
<Card title="Getting Started" href="/getting-started">
|
||||
Create your first workflow in 10 minutes
|
||||
Build your first agent in 10 minutes
|
||||
</Card>
|
||||
<Card title="Workflow Blocks" href="/blocks">
|
||||
<Card title="Blocks" href="/blocks">
|
||||
Learn about the building blocks
|
||||
</Card>
|
||||
<Card title="Tools & Integrations" href="/tools">
|
||||
Explore 160+ built-in integrations
|
||||
Explore 1,000+ integrations
|
||||
</Card>
|
||||
<Card title="Team Permissions" href="/permissions/roles-and-permissions">
|
||||
Set up workspace roles and permissions
|
||||
@@ -121,9 +121,9 @@ Ready to build your first AI workflow?
|
||||
{ question: "Is Sim free to use?", answer: "Sim offers a free Community plan with 1,000 one-time credits to get started. Paid plans start at $25/month (Pro) with 5,000 credits and go up to $100/month (Max) with 20,000 credits. Annual billing is available at a 15% discount. You can also self-host Sim for free on your own infrastructure." },
|
||||
{ question: "Is Sim open source?", answer: "Yes. Sim is open source under the Apache 2.0 license. The full source code is available on GitHub and you can self-host it, contribute to development, or modify it for your own needs. Enterprise features (SSO, access control) have a separate license that requires a subscription for production use." },
|
||||
{ question: "Which AI models and providers are supported?", answer: "Sim supports 15+ providers including OpenAI, Anthropic, Google Gemini, Groq, Cerebras, DeepSeek, Mistral, xAI, and OpenRouter. You can also run local models through Ollama or VLLM at no API cost. Bring Your Own Key (BYOK) is supported so you can use your own API keys at base provider pricing with no markup." },
|
||||
{ question: "Do I need coding experience to use Sim?", answer: "No. Sim is a no-code visual builder where you design workflows by dragging blocks onto a canvas and connecting them. For advanced use cases, the Function block lets you write custom JavaScript, but it is entirely optional." },
|
||||
{ question: "Do I need coding experience to use Sim?", answer: "No. Sim lets you build agents visually by dragging blocks onto a canvas and connecting them, or conversationally through Mothership using natural language. For advanced use cases, the Function block lets you write custom JavaScript, and the full API/SDK is available for programmatic access." },
|
||||
{ question: "Can I self-host Sim?", answer: "Yes. Sim provides Docker Compose configurations for self-hosted deployments. The stack includes the Sim application, a PostgreSQL database with pgvector, and a realtime collaboration server. You can also integrate local AI models via Ollama for a fully offline setup." },
|
||||
{ question: "Is there a limit on how many workflows I can create?", answer: "There is no limit on the number of workflows you can create on any plan. Usage limits apply to execution credits, rate limits, and file storage, which vary by plan tier." },
|
||||
{ question: "What integrations are available?", answer: "Sim offers 160+ native integrations across categories including AI models, communication tools (Gmail, Slack, Teams, Telegram), productivity apps (Notion, Google Workspace, Airtable), development tools (GitHub, Jira, Linear), search services (Google Search, Perplexity, Exa), and databases (PostgreSQL, Supabase, Pinecone). For anything not built in, you can use the MCP (Model Context Protocol) support to connect custom services." },
|
||||
{ question: "How does Sim compare to other workflow automation tools?", answer: "Sim is purpose-built for AI agent workflows rather than general task automation. It provides a visual canvas for orchestrating LLM-powered agents with built-in support for tool use, structured outputs, conditional branching, and real-time collaboration. The Copilot feature also lets you build and modify workflows using natural language." },
|
||||
{ question: "What integrations are available?", answer: "Sim offers 1,000+ native integrations across categories including AI models, communication tools (Gmail, Slack, Teams, Telegram), productivity apps (Notion, Google Workspace, Airtable), development tools (GitHub, Jira, Linear), search services (Google Search, Perplexity, Exa), and databases (PostgreSQL, Supabase, Pinecone). For anything not built in, you can use the MCP (Model Context Protocol) support to connect custom services." },
|
||||
{ question: "How does Sim compare to other AI agent builders?", answer: "Sim is an AI workspace — not just a workflow tool or an agent framework. It combines a visual workflow builder, Mothership for natural-language agent creation, knowledge bases, tables, and full observability in one environment. Teams build agents visually, conversationally, or with code, then deploy and manage them with enterprise governance, real-time collaboration, and staging-to-production workflows." },
|
||||
]} />
|
||||
|
||||
332
apps/docs/content/docs/en/tools/agiloft.mdx
Normal file
332
apps/docs/content/docs/en/tools/agiloft.mdx
Normal file
@@ -0,0 +1,332 @@
|
||||
---
|
||||
title: Agiloft
|
||||
description: Manage records in Agiloft CLM
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="agiloft"
|
||||
color="#263A5C"
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[Agiloft](https://www.agiloft.com/) is an enterprise contract lifecycle management (CLM) platform that helps organizations automate and manage contracts, agreements, and related business processes across any knowledge base.
|
||||
|
||||
With the Agiloft integration in Sim, you can:
|
||||
|
||||
- **Create records**: Add new records to any Agiloft table with custom field values
|
||||
- **Read records**: Retrieve individual records by ID with optional field selection
|
||||
- **Update records**: Modify existing record fields in any table
|
||||
- **Delete records**: Remove records from your knowledge base
|
||||
- **Search records**: Find records using Agiloft's query syntax with pagination support
|
||||
- **Select records**: Query records using SQL WHERE clauses for advanced filtering
|
||||
- **Saved searches**: List saved search definitions available for a table
|
||||
- **Attach files**: Upload and attach files to record fields
|
||||
- **Retrieve attachments**: Download attached files from record fields
|
||||
- **Remove attachments**: Delete attached files from record fields by position
|
||||
- **Attachment info**: Get metadata about all files attached to a record field
|
||||
- **Lock records**: Check, acquire, or release locks on records for concurrent editing
|
||||
|
||||
In Sim, the Agiloft integration enables your agents to manage contracts and records programmatically as part of automated workflows. Agents can create and update records, search across tables, handle file attachments, and manage record locks — enabling intelligent contract lifecycle automation.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate with Agiloft contract lifecycle management to create, read, update, delete, and search records. Supports file attachments, SQL-based selection, saved searches, and record locking across any table in your knowledge base.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `agiloft_attach_file`
|
||||
|
||||
Attach a file to a field in an Agiloft record.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `instanceUrl` | string | Yes | Agiloft instance URL \(e.g., https://mycompany.agiloft.com\) |
|
||||
| `knowledgeBase` | string | Yes | Knowledge base name |
|
||||
| `login` | string | Yes | Agiloft username |
|
||||
| `password` | string | Yes | Agiloft password |
|
||||
| `table` | string | Yes | Table name \(e.g., "contracts"\) |
|
||||
| `recordId` | string | Yes | ID of the record to attach the file to |
|
||||
| `fieldName` | string | Yes | Name of the attachment field |
|
||||
| `file` | file | No | File to attach |
|
||||
| `fileName` | string | No | Name to assign to the file \(defaults to original file name\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `recordId` | string | ID of the record the file was attached to |
|
||||
| `fieldName` | string | Name of the field the file was attached to |
|
||||
| `fileName` | string | Name of the attached file |
|
||||
| `totalAttachments` | number | Total number of files attached in the field after the operation |
|
||||
|
||||
### `agiloft_attachment_info`
|
||||
|
||||
Get information about file attachments on a record field.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `instanceUrl` | string | Yes | Agiloft instance URL \(e.g., https://mycompany.agiloft.com\) |
|
||||
| `knowledgeBase` | string | Yes | Knowledge base name |
|
||||
| `login` | string | Yes | Agiloft username |
|
||||
| `password` | string | Yes | Agiloft password |
|
||||
| `table` | string | Yes | Table name \(e.g., "contracts"\) |
|
||||
| `recordId` | string | Yes | ID of the record to check attachments on |
|
||||
| `fieldName` | string | Yes | Name of the attachment field to inspect |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `attachments` | array | List of attachments with position, name, and size |
|
||||
| ↳ `position` | number | Position index of the attachment in the field |
|
||||
| ↳ `name` | string | File name of the attachment |
|
||||
| ↳ `size` | number | File size in bytes |
|
||||
| `totalCount` | number | Total number of attachments in the field |
|
||||
|
||||
### `agiloft_create_record`
|
||||
|
||||
Create a new record in an Agiloft table.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `instanceUrl` | string | Yes | Agiloft instance URL \(e.g., https://mycompany.agiloft.com\) |
|
||||
| `knowledgeBase` | string | Yes | Knowledge base name |
|
||||
| `login` | string | Yes | Agiloft username |
|
||||
| `password` | string | Yes | Agiloft password |
|
||||
| `table` | string | Yes | Table name \(e.g., "contracts", "contacts.employees"\) |
|
||||
| `data` | string | Yes | Record field values as a JSON object \(e.g., \{"first_name": "John", "status": "Active"\}\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `id` | string | ID of the created record |
|
||||
| `fields` | json | Field values of the created record |
|
||||
|
||||
### `agiloft_delete_record`
|
||||
|
||||
Delete a record from an Agiloft table.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `instanceUrl` | string | Yes | Agiloft instance URL \(e.g., https://mycompany.agiloft.com\) |
|
||||
| `knowledgeBase` | string | Yes | Knowledge base name |
|
||||
| `login` | string | Yes | Agiloft username |
|
||||
| `password` | string | Yes | Agiloft password |
|
||||
| `table` | string | Yes | Table name \(e.g., "contracts", "contacts.employees"\) |
|
||||
| `recordId` | string | Yes | ID of the record to delete |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `id` | string | ID of the deleted record |
|
||||
| `deleted` | boolean | Whether the record was successfully deleted |
|
||||
|
||||
### `agiloft_lock_record`
|
||||
|
||||
Lock, unlock, or check the lock status of an Agiloft record.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `instanceUrl` | string | Yes | Agiloft instance URL \(e.g., https://mycompany.agiloft.com\) |
|
||||
| `knowledgeBase` | string | Yes | Knowledge base name |
|
||||
| `login` | string | Yes | Agiloft username |
|
||||
| `password` | string | Yes | Agiloft password |
|
||||
| `table` | string | Yes | Table name \(e.g., "contracts"\) |
|
||||
| `recordId` | string | Yes | ID of the record to lock, unlock, or check |
|
||||
| `lockAction` | string | Yes | Action to perform: "lock", "unlock", or "check" |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `id` | string | Record ID |
|
||||
| `lockStatus` | string | Lock status \(e.g., "LOCKED", "UNLOCKED"\) |
|
||||
| `lockedBy` | string | Username of the user who locked the record |
|
||||
| `lockExpiresInMinutes` | number | Minutes until the lock expires |
|
||||
|
||||
### `agiloft_read_record`
|
||||
|
||||
Read a record by ID from an Agiloft table.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `instanceUrl` | string | Yes | Agiloft instance URL \(e.g., https://mycompany.agiloft.com\) |
|
||||
| `knowledgeBase` | string | Yes | Knowledge base name |
|
||||
| `login` | string | Yes | Agiloft username |
|
||||
| `password` | string | Yes | Agiloft password |
|
||||
| `table` | string | Yes | Table name \(e.g., "contracts", "contacts.employees"\) |
|
||||
| `recordId` | string | Yes | ID of the record to read |
|
||||
| `fields` | string | No | Comma-separated list of field names to include in the response |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `id` | string | ID of the record |
|
||||
| `fields` | json | Field values of the record |
|
||||
|
||||
### `agiloft_remove_attachment`
|
||||
|
||||
Remove an attached file from a field in an Agiloft record.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `instanceUrl` | string | Yes | Agiloft instance URL \(e.g., https://mycompany.agiloft.com\) |
|
||||
| `knowledgeBase` | string | Yes | Knowledge base name |
|
||||
| `login` | string | Yes | Agiloft username |
|
||||
| `password` | string | Yes | Agiloft password |
|
||||
| `table` | string | Yes | Table name \(e.g., "contracts"\) |
|
||||
| `recordId` | string | Yes | ID of the record containing the attachment |
|
||||
| `fieldName` | string | Yes | Name of the attachment field |
|
||||
| `position` | string | Yes | Position index of the file to remove \(starting from 0\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `recordId` | string | ID of the record |
|
||||
| `fieldName` | string | Name of the attachment field |
|
||||
| `remainingAttachments` | number | Number of attachments remaining in the field after removal |
|
||||
|
||||
### `agiloft_retrieve_attachment`
|
||||
|
||||
Download an attached file from an Agiloft record field.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `instanceUrl` | string | Yes | Agiloft instance URL \(e.g., https://mycompany.agiloft.com\) |
|
||||
| `knowledgeBase` | string | Yes | Knowledge base name |
|
||||
| `login` | string | Yes | Agiloft username |
|
||||
| `password` | string | Yes | Agiloft password |
|
||||
| `table` | string | Yes | Table name \(e.g., "contracts"\) |
|
||||
| `recordId` | string | Yes | ID of the record containing the attachment |
|
||||
| `fieldName` | string | Yes | Name of the attachment field |
|
||||
| `position` | string | Yes | Position index of the file in the field \(starting from 0\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `file` | file | Downloaded attachment file |
|
||||
|
||||
### `agiloft_saved_search`
|
||||
|
||||
List saved searches defined for an Agiloft table.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `instanceUrl` | string | Yes | Agiloft instance URL \(e.g., https://mycompany.agiloft.com\) |
|
||||
| `knowledgeBase` | string | Yes | Knowledge base name |
|
||||
| `login` | string | Yes | Agiloft username |
|
||||
| `password` | string | Yes | Agiloft password |
|
||||
| `table` | string | Yes | Table name to list saved searches for \(e.g., "contracts"\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `searches` | array | List of saved searches for the table |
|
||||
| ↳ `name` | string | Saved search name |
|
||||
| ↳ `label` | string | Saved search display label |
|
||||
| ↳ `id` | string | Saved search database identifier |
|
||||
| ↳ `description` | string | Saved search description |
|
||||
|
||||
### `agiloft_search_records`
|
||||
|
||||
Search for records in an Agiloft table using a query.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `instanceUrl` | string | Yes | Agiloft instance URL \(e.g., https://mycompany.agiloft.com\) |
|
||||
| `knowledgeBase` | string | Yes | Knowledge base name |
|
||||
| `login` | string | Yes | Agiloft username |
|
||||
| `password` | string | Yes | Agiloft password |
|
||||
| `table` | string | Yes | Table name to search in \(e.g., "contracts", "contacts.employees"\) |
|
||||
| `query` | string | Yes | Search query using Agiloft query syntax \(e.g., "status=\'Active\'" or "company_name~=\'Acme\'"\) |
|
||||
| `fields` | string | No | Comma-separated list of field names to include in the results |
|
||||
| `page` | string | No | Page number for paginated results \(starting from 0\) |
|
||||
| `limit` | string | No | Maximum number of records to return per page |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `records` | json | Array of matching records with their field values |
|
||||
| `totalCount` | number | Total number of matching records |
|
||||
| `page` | number | Current page number |
|
||||
| `limit` | number | Records per page |
|
||||
|
||||
### `agiloft_select_records`
|
||||
|
||||
Select record IDs matching a SQL WHERE clause from an Agiloft table.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `instanceUrl` | string | Yes | Agiloft instance URL \(e.g., https://mycompany.agiloft.com\) |
|
||||
| `knowledgeBase` | string | Yes | Knowledge base name |
|
||||
| `login` | string | Yes | Agiloft username |
|
||||
| `password` | string | Yes | Agiloft password |
|
||||
| `table` | string | Yes | Table name \(e.g., "contracts", "contacts.employees"\) |
|
||||
| `where` | string | Yes | SQL WHERE clause using database column names \(e.g., "summary like \'%new%\'" or "assigned_person=\'John Doe\'"\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `recordIds` | array | Array of record IDs matching the query |
|
||||
| `totalCount` | number | Total number of matching records |
|
||||
|
||||
### `agiloft_update_record`
|
||||
|
||||
Update an existing record in an Agiloft table.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `instanceUrl` | string | Yes | Agiloft instance URL \(e.g., https://mycompany.agiloft.com\) |
|
||||
| `knowledgeBase` | string | Yes | Knowledge base name |
|
||||
| `login` | string | Yes | Agiloft username |
|
||||
| `password` | string | Yes | Agiloft password |
|
||||
| `table` | string | Yes | Table name \(e.g., "contracts", "contacts.employees"\) |
|
||||
| `recordId` | string | Yes | ID of the record to update |
|
||||
| `data` | string | Yes | Updated field values as a JSON object \(e.g., \{"status": "Active", "priority": "High"\}\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `id` | string | ID of the updated record |
|
||||
| `fields` | json | Updated field values of the record |
|
||||
|
||||
|
||||
@@ -113,7 +113,7 @@ Retrieve the results of a completed Athena query execution
|
||||
| `awsAccessKeyId` | string | Yes | AWS access key ID |
|
||||
| `awsSecretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `queryExecutionId` | string | Yes | Query execution ID to get results for |
|
||||
| `maxResults` | number | No | Maximum number of rows to return \(1-1000\) |
|
||||
| `maxResults` | number | No | Maximum number of rows to return \(1-999\) |
|
||||
| `nextToken` | string | No | Pagination token from a previous request |
|
||||
|
||||
#### Output
|
||||
|
||||
201
apps/docs/content/docs/en/tools/brightdata.mdx
Normal file
201
apps/docs/content/docs/en/tools/brightdata.mdx
Normal file
@@ -0,0 +1,201 @@
|
||||
---
|
||||
title: Bright Data
|
||||
description: Scrape websites, search engines, and extract structured data
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="brightdata"
|
||||
color="#FFFFFF"
|
||||
/>
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Bright Data into the workflow. Scrape any URL with Web Unlocker, search Google and other engines with SERP API, discover web content ranked by intent, or trigger pre-built scrapers for structured data extraction.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `brightdata_scrape_url`
|
||||
|
||||
Fetch content from any URL using Bright Data Web Unlocker. Bypasses anti-bot protections, CAPTCHAs, and IP blocks automatically.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `zone` | string | Yes | Web Unlocker zone name from your Bright Data dashboard \(e.g., "web_unlocker1"\) |
|
||||
| `url` | string | Yes | The URL to scrape \(e.g., "https://example.com/page"\) |
|
||||
| `format` | string | No | Response format: "raw" for HTML or "json" for parsed content. Defaults to "raw" |
|
||||
| `country` | string | No | Two-letter country code for geo-targeting \(e.g., "us", "gb"\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | The scraped page content \(HTML or JSON depending on format\) |
|
||||
| `url` | string | The URL that was scraped |
|
||||
| `statusCode` | number | HTTP status code of the response |
|
||||
|
||||
### `brightdata_serp_search`
|
||||
|
||||
Search Google, Bing, DuckDuckGo, or Yandex and get structured search results using Bright Data SERP API.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `zone` | string | Yes | SERP API zone name from your Bright Data dashboard \(e.g., "serp_api1"\) |
|
||||
| `query` | string | Yes | The search query \(e.g., "best project management tools"\) |
|
||||
| `searchEngine` | string | No | Search engine to use: "google", "bing", "duckduckgo", or "yandex". Defaults to "google" |
|
||||
| `country` | string | No | Two-letter country code for localized results \(e.g., "us", "gb"\) |
|
||||
| `language` | string | No | Two-letter language code \(e.g., "en", "es"\) |
|
||||
| `numResults` | number | No | Number of results to return \(e.g., 10, 20\). Defaults to 10 |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | array | Array of search results |
|
||||
| ↳ `title` | string | Title of the search result |
|
||||
| ↳ `url` | string | URL of the search result |
|
||||
| ↳ `description` | string | Snippet or description of the result |
|
||||
| ↳ `rank` | number | Position in search results |
|
||||
| `query` | string | The search query that was executed |
|
||||
| `searchEngine` | string | The search engine that was used |
|
||||
|
||||
### `brightdata_discover`
|
||||
|
||||
AI-powered web discovery that finds and ranks results by intent. Returns up to 1,000 results with optional cleaned page content for RAG and verification.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `query` | string | Yes | The search query \(e.g., "competitor pricing changes enterprise plan"\) |
|
||||
| `numResults` | number | No | Number of results to return, up to 1000. Defaults to 10 |
|
||||
| `intent` | string | No | Describes what the agent is trying to accomplish, used to rank results by relevance \(e.g., "find official pricing pages and change notes"\) |
|
||||
| `includeContent` | boolean | No | Whether to include cleaned page content in results |
|
||||
| `format` | string | No | Response format: "json" or "markdown". Defaults to "json" |
|
||||
| `language` | string | No | Search language code \(e.g., "en", "es", "fr"\). Defaults to "en" |
|
||||
| `country` | string | No | Two-letter ISO country code for localized results \(e.g., "us", "gb"\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | array | Array of discovered web results ranked by intent relevance |
|
||||
| ↳ `url` | string | URL of the discovered page |
|
||||
| ↳ `title` | string | Page title |
|
||||
| ↳ `description` | string | Page description or snippet |
|
||||
| ↳ `relevanceScore` | number | AI-calculated relevance score for intent-based ranking |
|
||||
| ↳ `content` | string | Cleaned page content in the requested format \(when includeContent is true\) |
|
||||
| `query` | string | The search query that was executed |
|
||||
| `totalResults` | number | Total number of results returned |
|
||||
|
||||
### `brightdata_sync_scrape`
|
||||
|
||||
Scrape URLs synchronously using a Bright Data pre-built scraper and get structured results directly. Supports up to 20 URLs with a 1-minute timeout.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `datasetId` | string | Yes | Dataset scraper ID from your Bright Data dashboard \(e.g., "gd_l1viktl72bvl7bjuj0"\) |
|
||||
| `urls` | string | Yes | JSON array of URL objects to scrape, up to 20 \(e.g., \[\{"url": "https://example.com/product"\}\]\) |
|
||||
| `format` | string | No | Output format: "json", "ndjson", or "csv". Defaults to "json" |
|
||||
| `includeErrors` | boolean | No | Whether to include error reports in results |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `data` | array | Array of scraped result objects with fields specific to the dataset scraper used |
|
||||
| `snapshotId` | string | Snapshot ID returned if the request exceeded the 1-minute timeout and switched to async processing |
|
||||
| `isAsync` | boolean | Whether the request fell back to async mode \(true means use snapshot ID to retrieve results\) |
|
||||
|
||||
### `brightdata_scrape_dataset`
|
||||
|
||||
Trigger a Bright Data pre-built scraper to extract structured data from URLs. Supports 660+ scrapers for platforms like Amazon, LinkedIn, Instagram, and more.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `datasetId` | string | Yes | Dataset scraper ID from your Bright Data dashboard \(e.g., "gd_l1viktl72bvl7bjuj0"\) |
|
||||
| `urls` | string | Yes | JSON array of URL objects to scrape \(e.g., \[\{"url": "https://example.com/product"\}\]\) |
|
||||
| `format` | string | No | Output format: "json" or "csv". Defaults to "json" |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `snapshotId` | string | The snapshot ID to retrieve results later |
|
||||
| `status` | string | Status of the scraping job \(e.g., "triggered", "running"\) |
|
||||
|
||||
### `brightdata_snapshot_status`
|
||||
|
||||
Check the progress of an async Bright Data scraping job. Returns status: starting, running, ready, or failed.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `snapshotId` | string | Yes | The snapshot ID returned when the collection was triggered \(e.g., "s_m4x7enmven8djfqak"\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `snapshotId` | string | The snapshot ID that was queried |
|
||||
| `datasetId` | string | The dataset ID associated with this snapshot |
|
||||
| `status` | string | Current status of the snapshot: "starting", "running", "ready", or "failed" |
|
||||
|
||||
### `brightdata_download_snapshot`
|
||||
|
||||
Download the results of a completed Bright Data scraping job using its snapshot ID. The snapshot must have ready status.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `snapshotId` | string | Yes | The snapshot ID returned when the collection was triggered \(e.g., "s_m4x7enmven8djfqak"\) |
|
||||
| `format` | string | No | Output format: "json", "ndjson", "jsonl", or "csv". Defaults to "json" |
|
||||
| `compress` | boolean | No | Whether to compress the results |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `data` | array | Array of scraped result records |
|
||||
| `format` | string | The content type of the downloaded data |
|
||||
| `snapshotId` | string | The snapshot ID that was downloaded |
|
||||
|
||||
### `brightdata_cancel_snapshot`
|
||||
|
||||
Cancel an active Bright Data scraping job using its snapshot ID. Terminates data collection in progress.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `snapshotId` | string | Yes | The snapshot ID of the collection to cancel \(e.g., "s_m4x7enmven8djfqak"\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `snapshotId` | string | The snapshot ID that was cancelled |
|
||||
| `cancelled` | boolean | Whether the cancellation was successful |
|
||||
|
||||
|
||||
@@ -10,6 +10,24 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
color="linear-gradient(45deg, #B0084D 0%, #FF4F8B 100%)"
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[AWS CloudWatch](https://aws.amazon.com/cloudwatch/) is a monitoring and observability service that provides data and actionable insights for AWS resources, applications, and services. CloudWatch collects monitoring and operational data in the form of logs, metrics, and events, giving you a unified view of your AWS environment.
|
||||
|
||||
With the CloudWatch integration, you can:
|
||||
|
||||
- **Query Logs (Insights)**: Run CloudWatch Log Insights queries against one or more log groups to analyze log data with a powerful query language
|
||||
- **Describe Log Groups**: List available CloudWatch log groups in your account, optionally filtered by name prefix
|
||||
- **Get Log Events**: Retrieve log events from a specific log stream within a log group
|
||||
- **Describe Log Streams**: List log streams within a log group, ordered by last event time or filtered by name prefix
|
||||
- **List Metrics**: Browse available CloudWatch metrics, optionally filtered by namespace, metric name, or recent activity
|
||||
- **Get Metric Statistics**: Retrieve statistical data for a metric over a specified time range with configurable granularity
|
||||
- **Publish Metric**: Publish custom metric data points to CloudWatch for your own application monitoring
|
||||
- **Describe Alarms**: List and filter CloudWatch alarms by name prefix, state, or alarm type
|
||||
|
||||
In Sim, the CloudWatch integration enables your agents to monitor AWS infrastructure, analyze application logs, track custom metrics, and respond to alarm states as part of automated DevOps and SRE workflows. This is especially powerful when combined with other AWS integrations like CloudFormation and SNS for end-to-end infrastructure management.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate AWS CloudWatch into workflows. Run Log Insights queries, list log groups, retrieve log events, list and get metrics, and monitor alarms. Requires AWS access key and secret access key.
|
||||
@@ -155,6 +173,34 @@ Get statistics for a CloudWatch metric over a time range
|
||||
| `label` | string | Metric label |
|
||||
| `datapoints` | array | Datapoints with timestamp and statistics values |
|
||||
|
||||
### `cloudwatch_put_metric_data`
|
||||
|
||||
Publish a custom metric data point to CloudWatch
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `awsRegion` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `awsAccessKeyId` | string | Yes | AWS access key ID |
|
||||
| `awsSecretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `namespace` | string | Yes | Metric namespace \(e.g., Custom/MyApp\) |
|
||||
| `metricName` | string | Yes | Name of the metric |
|
||||
| `value` | number | Yes | Metric value to publish |
|
||||
| `unit` | string | No | Unit of the metric \(e.g., Count, Seconds, Bytes\) |
|
||||
| `dimensions` | string | No | JSON string of dimension name/value pairs |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `success` | boolean | Whether the metric was published successfully |
|
||||
| `namespace` | string | Metric namespace |
|
||||
| `metricName` | string | Metric name |
|
||||
| `value` | number | Published metric value |
|
||||
| `unit` | string | Metric unit |
|
||||
| `timestamp` | string | Timestamp when the metric was published |
|
||||
|
||||
### `cloudwatch_describe_alarms`
|
||||
|
||||
List and filter CloudWatch alarms
|
||||
|
||||
144
apps/docs/content/docs/en/tools/crowdstrike.mdx
Normal file
144
apps/docs/content/docs/en/tools/crowdstrike.mdx
Normal file
@@ -0,0 +1,144 @@
|
||||
---
|
||||
title: CrowdStrike
|
||||
description: Query CrowdStrike Identity Protection sensors and documented aggregates
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="crowdstrike"
|
||||
color="#E01F3D"
|
||||
/>
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate CrowdStrike Identity Protection into workflows to search sensors, fetch documented sensor details by device ID, and run documented sensor aggregate queries.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `crowdstrike_get_sensor_aggregates`
|
||||
|
||||
Get documented CrowdStrike Identity Protection sensor aggregates from a JSON aggregate query body
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `clientId` | string | Yes | CrowdStrike Falcon API client ID |
|
||||
| `clientSecret` | string | Yes | CrowdStrike Falcon API client secret |
|
||||
| `cloud` | string | Yes | CrowdStrike Falcon cloud region |
|
||||
| `aggregateQuery` | json | Yes | JSON aggregate query body documented by CrowdStrike for sensor aggregates |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `aggregates` | array | Aggregate result groups returned by CrowdStrike |
|
||||
| ↳ `buckets` | array | Buckets within the aggregate result |
|
||||
| ↳ `count` | number | Bucket document count |
|
||||
| ↳ `from` | number | Bucket lower bound |
|
||||
| ↳ `keyAsString` | string | String representation of the bucket key |
|
||||
| ↳ `label` | json | Bucket label object |
|
||||
| ↳ `stringFrom` | string | String lower bound |
|
||||
| ↳ `stringTo` | string | String upper bound |
|
||||
| ↳ `subAggregates` | json | Nested aggregate results for this bucket |
|
||||
| ↳ `to` | number | Bucket upper bound |
|
||||
| ↳ `value` | number | Bucket metric value |
|
||||
| ↳ `valueAsString` | string | String representation of the bucket value |
|
||||
| ↳ `docCountErrorUpperBound` | number | Upper bound for bucket count error |
|
||||
| ↳ `name` | string | Aggregate result name |
|
||||
| ↳ `sumOtherDocCount` | number | Document count not included in the returned buckets |
|
||||
| `count` | number | Number of aggregate result groups returned |
|
||||
|
||||
### `crowdstrike_get_sensor_details`
|
||||
|
||||
Get documented CrowdStrike Identity Protection sensor details for one or more device IDs
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `clientId` | string | Yes | CrowdStrike Falcon API client ID |
|
||||
| `clientSecret` | string | Yes | CrowdStrike Falcon API client secret |
|
||||
| `cloud` | string | Yes | CrowdStrike Falcon cloud region |
|
||||
| `ids` | json | Yes | JSON array of CrowdStrike sensor device IDs |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `sensors` | array | CrowdStrike identity sensor detail records |
|
||||
| ↳ `agentVersion` | string | Sensor agent version |
|
||||
| ↳ `cid` | string | CrowdStrike customer identifier |
|
||||
| ↳ `deviceId` | string | Sensor device identifier |
|
||||
| ↳ `heartbeatTime` | number | Last heartbeat timestamp |
|
||||
| ↳ `hostname` | string | Sensor hostname |
|
||||
| ↳ `idpPolicyId` | string | Assigned Identity Protection policy ID |
|
||||
| ↳ `idpPolicyName` | string | Assigned Identity Protection policy name |
|
||||
| ↳ `ipAddress` | string | Sensor local IP address |
|
||||
| ↳ `kerberosConfig` | string | Kerberos configuration status |
|
||||
| ↳ `ldapConfig` | string | LDAP configuration status |
|
||||
| ↳ `ldapsConfig` | string | LDAPS configuration status |
|
||||
| ↳ `machineDomain` | string | Machine domain |
|
||||
| ↳ `ntlmConfig` | string | NTLM configuration status |
|
||||
| ↳ `osVersion` | string | Operating system version |
|
||||
| ↳ `rdpToDcConfig` | string | RDP to domain controller configuration status |
|
||||
| ↳ `smbToDcConfig` | string | SMB to domain controller configuration status |
|
||||
| ↳ `status` | string | Sensor protection status |
|
||||
| ↳ `statusCauses` | array | Documented causes behind the current status |
|
||||
| ↳ `tiEnabled` | string | Threat intelligence enablement status |
|
||||
| `count` | number | Number of sensors returned |
|
||||
| `pagination` | json | Pagination metadata when returned by the underlying API |
|
||||
| ↳ `limit` | number | Page size used for the query |
|
||||
| ↳ `offset` | number | Offset returned by CrowdStrike |
|
||||
| ↳ `total` | number | Total records available |
|
||||
|
||||
### `crowdstrike_query_sensors`
|
||||
|
||||
Search CrowdStrike identity protection sensors by hostname, IP, or related fields
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `clientId` | string | Yes | CrowdStrike Falcon API client ID |
|
||||
| `clientSecret` | string | Yes | CrowdStrike Falcon API client secret |
|
||||
| `cloud` | string | Yes | CrowdStrike Falcon cloud region |
|
||||
| `filter` | string | No | Falcon Query Language filter for identity sensor search |
|
||||
| `limit` | number | No | Maximum number of sensor records to return |
|
||||
| `offset` | number | No | Pagination offset for the identity sensor query |
|
||||
| `sort` | string | No | Sort expression for identity sensor results |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `sensors` | array | Matching CrowdStrike identity sensor records |
|
||||
| ↳ `agentVersion` | string | Sensor agent version |
|
||||
| ↳ `cid` | string | CrowdStrike customer identifier |
|
||||
| ↳ `deviceId` | string | Sensor device identifier |
|
||||
| ↳ `heartbeatTime` | number | Last heartbeat timestamp |
|
||||
| ↳ `hostname` | string | Sensor hostname |
|
||||
| ↳ `idpPolicyId` | string | Assigned Identity Protection policy ID |
|
||||
| ↳ `idpPolicyName` | string | Assigned Identity Protection policy name |
|
||||
| ↳ `ipAddress` | string | Sensor local IP address |
|
||||
| ↳ `kerberosConfig` | string | Kerberos configuration status |
|
||||
| ↳ `ldapConfig` | string | LDAP configuration status |
|
||||
| ↳ `ldapsConfig` | string | LDAPS configuration status |
|
||||
| ↳ `machineDomain` | string | Machine domain |
|
||||
| ↳ `ntlmConfig` | string | NTLM configuration status |
|
||||
| ↳ `osVersion` | string | Operating system version |
|
||||
| ↳ `rdpToDcConfig` | string | RDP to domain controller configuration status |
|
||||
| ↳ `smbToDcConfig` | string | SMB to domain controller configuration status |
|
||||
| ↳ `status` | string | Sensor protection status |
|
||||
| ↳ `statusCauses` | array | Documented causes behind the current status |
|
||||
| ↳ `tiEnabled` | string | Threat intelligence enablement status |
|
||||
| `count` | number | Number of sensors returned |
|
||||
| `pagination` | json | Pagination metadata \(limit, offset, total\) |
|
||||
| ↳ `limit` | number | Page size used for the query |
|
||||
| ↳ `offset` | number | Offset returned by CrowdStrike |
|
||||
| ↳ `total` | number | Total records available |
|
||||
|
||||
|
||||
443
apps/docs/content/docs/en/tools/iam.mdx
Normal file
443
apps/docs/content/docs/en/tools/iam.mdx
Normal file
@@ -0,0 +1,443 @@
|
||||
---
|
||||
title: AWS IAM
|
||||
description: Manage AWS IAM users, roles, policies, and groups
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="iam"
|
||||
color="linear-gradient(45deg, #BD0816 0%, #FF5252 100%)"
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[AWS Identity and Access Management (IAM)](https://aws.amazon.com/iam/) is a web service that helps you securely control access to AWS resources. IAM lets you manage permissions that control which AWS resources users, groups, and roles can access.
|
||||
|
||||
With AWS IAM, you can:
|
||||
|
||||
- **Manage users**: Create and manage IAM users, assign them individual security credentials, and grant them permissions to access AWS services and resources
|
||||
- **Create roles**: Define IAM roles with specific permissions that can be assumed by users, services, or applications for temporary access
|
||||
- **Attach policies**: Assign managed policies to users and roles to define what actions they can perform on which resources
|
||||
- **Organize with groups**: Create IAM groups to manage permissions for collections of users, simplifying access management at scale
|
||||
- **Control access keys**: Generate and manage programmatic access key pairs for API and CLI access to AWS services
|
||||
|
||||
In Sim, the AWS IAM integration allows your workflows to automate identity management tasks such as provisioning new users, assigning roles and permissions, managing group memberships, and rotating access keys. This is particularly useful for onboarding automation, security compliance workflows, access reviews, and incident response — enabling your agents to manage AWS access control programmatically.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate AWS Identity and Access Management into your workflow. Create and manage users, roles, policies, groups, and access keys.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `iam_list_users`
|
||||
|
||||
List IAM users in your AWS account
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `pathPrefix` | string | No | Path prefix to filter users \(e.g., /division_abc/\) |
|
||||
| `maxItems` | number | No | Maximum number of users to return \(1-1000, default 100\) |
|
||||
| `marker` | string | No | Pagination marker from a previous request |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `users` | json | List of IAM users with userName, userId, arn, path, and dates |
|
||||
| `isTruncated` | boolean | Whether there are more results available |
|
||||
| `marker` | string | Pagination marker for the next page of results |
|
||||
| `count` | number | Number of users returned |
|
||||
|
||||
### `iam_get_user`
|
||||
|
||||
Get detailed information about an IAM user
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `userName` | string | Yes | The name of the IAM user to retrieve |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `userName` | string | The name of the user |
|
||||
| `userId` | string | The unique ID of the user |
|
||||
| `arn` | string | The ARN of the user |
|
||||
| `path` | string | The path to the user |
|
||||
| `createDate` | string | Date the user was created |
|
||||
| `passwordLastUsed` | string | Date the password was last used |
|
||||
| `permissionsBoundaryArn` | string | ARN of the permissions boundary policy |
|
||||
| `tags` | json | Tags attached to the user \(key, value pairs\) |
|
||||
|
||||
### `iam_create_user`
|
||||
|
||||
Create a new IAM user
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `userName` | string | Yes | Name for the new IAM user \(1-64 characters\) |
|
||||
| `path` | string | No | Path for the user \(e.g., /division_abc/\), defaults to / |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation status message |
|
||||
| `userName` | string | The name of the created user |
|
||||
| `userId` | string | The unique ID of the created user |
|
||||
| `arn` | string | The ARN of the created user |
|
||||
| `path` | string | The path of the created user |
|
||||
| `createDate` | string | Date the user was created |
|
||||
|
||||
### `iam_delete_user`
|
||||
|
||||
Delete an IAM user
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `userName` | string | Yes | The name of the IAM user to delete |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation status message |
|
||||
|
||||
### `iam_list_roles`
|
||||
|
||||
List IAM roles in your AWS account
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `pathPrefix` | string | No | Path prefix to filter roles \(e.g., /application/\) |
|
||||
| `maxItems` | number | No | Maximum number of roles to return \(1-1000, default 100\) |
|
||||
| `marker` | string | No | Pagination marker from a previous request |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `roles` | json | List of IAM roles with roleName, roleId, arn, path, and dates |
|
||||
| `isTruncated` | boolean | Whether there are more results available |
|
||||
| `marker` | string | Pagination marker for the next page of results |
|
||||
| `count` | number | Number of roles returned |
|
||||
|
||||
### `iam_get_role`
|
||||
|
||||
Get detailed information about an IAM role
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `roleName` | string | Yes | The name of the IAM role to retrieve |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `roleName` | string | The name of the role |
|
||||
| `roleId` | string | The unique ID of the role |
|
||||
| `arn` | string | The ARN of the role |
|
||||
| `path` | string | The path to the role |
|
||||
| `createDate` | string | Date the role was created |
|
||||
| `description` | string | Description of the role |
|
||||
| `maxSessionDuration` | number | Maximum session duration in seconds |
|
||||
| `assumeRolePolicyDocument` | string | The trust policy document \(JSON\) |
|
||||
| `roleLastUsedDate` | string | Date the role was last used |
|
||||
| `roleLastUsedRegion` | string | AWS region where the role was last used |
|
||||
|
||||
### `iam_create_role`
|
||||
|
||||
Create a new IAM role with a trust policy
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `roleName` | string | Yes | Name for the new IAM role \(1-64 characters\) |
|
||||
| `assumeRolePolicyDocument` | string | Yes | Trust policy JSON specifying who can assume this role |
|
||||
| `description` | string | No | Description of the role |
|
||||
| `path` | string | No | Path for the role \(e.g., /application/\), defaults to / |
|
||||
| `maxSessionDuration` | number | No | Maximum session duration in seconds \(3600-43200, default 3600\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation status message |
|
||||
| `roleName` | string | The name of the created role |
|
||||
| `roleId` | string | The unique ID of the created role |
|
||||
| `arn` | string | The ARN of the created role |
|
||||
| `path` | string | The path of the created role |
|
||||
| `createDate` | string | Date the role was created |
|
||||
|
||||
### `iam_delete_role`
|
||||
|
||||
Delete an IAM role
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `roleName` | string | Yes | The name of the IAM role to delete |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation status message |
|
||||
|
||||
### `iam_attach_user_policy`
|
||||
|
||||
Attach a managed policy to an IAM user
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `userName` | string | Yes | The name of the IAM user |
|
||||
| `policyArn` | string | Yes | The ARN of the managed policy to attach |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation status message |
|
||||
|
||||
### `iam_detach_user_policy`
|
||||
|
||||
Remove a managed policy from an IAM user
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `userName` | string | Yes | The name of the IAM user |
|
||||
| `policyArn` | string | Yes | The ARN of the managed policy to detach |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation status message |
|
||||
|
||||
### `iam_attach_role_policy`
|
||||
|
||||
Attach a managed policy to an IAM role
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `roleName` | string | Yes | The name of the IAM role |
|
||||
| `policyArn` | string | Yes | The ARN of the managed policy to attach |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation status message |
|
||||
|
||||
### `iam_detach_role_policy`
|
||||
|
||||
Remove a managed policy from an IAM role
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `roleName` | string | Yes | The name of the IAM role |
|
||||
| `policyArn` | string | Yes | The ARN of the managed policy to detach |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation status message |
|
||||
|
||||
### `iam_list_policies`
|
||||
|
||||
List managed IAM policies
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `scope` | string | No | Filter by scope: All, AWS \(AWS-managed\), or Local \(customer-managed\) |
|
||||
| `onlyAttached` | boolean | No | If true, only return policies attached to an entity |
|
||||
| `pathPrefix` | string | No | Path prefix to filter policies |
|
||||
| `maxItems` | number | No | Maximum number of policies to return \(1-1000, default 100\) |
|
||||
| `marker` | string | No | Pagination marker from a previous request |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `policies` | json | List of policies with policyName, arn, attachmentCount, and dates |
|
||||
| `isTruncated` | boolean | Whether there are more results available |
|
||||
| `marker` | string | Pagination marker for the next page of results |
|
||||
| `count` | number | Number of policies returned |
|
||||
|
||||
### `iam_create_access_key`
|
||||
|
||||
Create a new access key pair for an IAM user
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `userName` | string | No | The IAM user to create the key for \(defaults to current user\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation status message |
|
||||
| `accessKeyId` | string | The new access key ID |
|
||||
| `secretAccessKey` | string | The new secret access key \(only shown once\) |
|
||||
| `userName` | string | The user the key was created for |
|
||||
| `status` | string | Status of the access key \(Active\) |
|
||||
| `createDate` | string | Date the key was created |
|
||||
|
||||
### `iam_delete_access_key`
|
||||
|
||||
Delete an access key pair for an IAM user
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `accessKeyIdToDelete` | string | Yes | The access key ID to delete |
|
||||
| `userName` | string | No | The IAM user whose key to delete \(defaults to current user\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation status message |
|
||||
|
||||
### `iam_list_groups`
|
||||
|
||||
List IAM groups in your AWS account
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `pathPrefix` | string | No | Path prefix to filter groups |
|
||||
| `maxItems` | number | No | Maximum number of groups to return \(1-1000, default 100\) |
|
||||
| `marker` | string | No | Pagination marker from a previous request |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `groups` | json | List of IAM groups with groupName, groupId, arn, and path |
|
||||
| `isTruncated` | boolean | Whether there are more results available |
|
||||
| `marker` | string | Pagination marker for the next page of results |
|
||||
| `count` | number | Number of groups returned |
|
||||
|
||||
### `iam_add_user_to_group`
|
||||
|
||||
Add an IAM user to a group
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `userName` | string | Yes | The name of the IAM user |
|
||||
| `groupName` | string | Yes | The name of the IAM group |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation status message |
|
||||
|
||||
### `iam_remove_user_from_group`
|
||||
|
||||
Remove an IAM user from a group
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `userName` | string | Yes | The name of the IAM user |
|
||||
| `groupName` | string | Yes | The name of the IAM group |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `message` | string | Operation status message |
|
||||
|
||||
|
||||
@@ -251,7 +251,7 @@ Update a Jira issue
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `issueKey` | string | Yes | Jira issue key to update \(e.g., PROJ-123\) |
|
||||
| `summary` | string | No | New summary for the issue |
|
||||
| `description` | string | No | New description for the issue |
|
||||
| `description` | string | No | New description for the issue. Accepts plain text \(auto-wrapped in ADF\) or a raw ADF document object |
|
||||
| `priority` | string | No | New priority ID or name for the issue \(e.g., "High"\) |
|
||||
| `assignee` | string | No | New assignee account ID for the issue |
|
||||
| `labels` | json | No | Labels to set on the issue \(array of label name strings\) |
|
||||
@@ -284,7 +284,7 @@ Create a new Jira issue
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `projectId` | string | Yes | Jira project key \(e.g., PROJ\) |
|
||||
| `summary` | string | Yes | Summary for the issue |
|
||||
| `description` | string | No | Description for the issue |
|
||||
| `description` | string | No | Description for the issue. Accepts plain text \(auto-wrapped in ADF\) or a raw ADF document object |
|
||||
| `priority` | string | No | Priority ID or name for the issue \(e.g., "10000" or "High"\) |
|
||||
| `assignee` | string | No | Assignee account ID for the issue |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. |
|
||||
|
||||
@@ -113,10 +113,11 @@ Create a new service request in Jira Service Management
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `serviceDeskId` | string | Yes | Service Desk ID \(e.g., "1", "2"\) |
|
||||
| `requestTypeId` | string | Yes | Request Type ID \(e.g., "10", "15"\) |
|
||||
| `summary` | string | Yes | Summary/title for the service request |
|
||||
| `summary` | string | No | Summary/title for the service request \(required unless using Form Answers\) |
|
||||
| `description` | string | No | Description for the service request |
|
||||
| `raiseOnBehalfOf` | string | No | Account ID of customer to raise request on behalf of |
|
||||
| `requestFieldValues` | json | No | Request field values as key-value pairs \(overrides summary/description if provided\) |
|
||||
| `formAnswers` | json | No | Form answers using numeric form question IDs as keys \(e.g., \{"1": \{"text": "Title"\}, "4": \{"choices": \["5"\]\}\}\). Keys are question IDs from the Jira Form, not Jira field names. |
|
||||
| `requestParticipants` | string | No | Comma-separated account IDs to add as request participants |
|
||||
| `channel` | string | No | Channel the request originates from \(e.g., portal, email\) |
|
||||
|
||||
@@ -677,4 +678,315 @@ Get the fields required to create a request of a specific type in Jira Service M
|
||||
| ↳ `defaultValues` | json | Default values for the field |
|
||||
| ↳ `jiraSchema` | json | Jira field schema with type, system, custom, customId |
|
||||
|
||||
### `jsm_get_form_templates`
|
||||
|
||||
List forms (ProForma/JSM Forms) in a Jira project to discover form IDs for request types
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `projectIdOrKey` | string | Yes | Jira project ID or key \(e.g., "10001" or "SD"\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp of the operation |
|
||||
| `projectIdOrKey` | string | Project ID or key |
|
||||
| `templates` | array | List of forms in the project |
|
||||
| ↳ `id` | string | Form template ID \(UUID\) |
|
||||
| ↳ `name` | string | Form template name |
|
||||
| ↳ `updated` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| ↳ `issueCreateIssueTypeIds` | json | Issue type IDs that auto-attach this form on issue create |
|
||||
| ↳ `issueCreateRequestTypeIds` | json | Request type IDs that auto-attach this form on issue create |
|
||||
| ↳ `portalRequestTypeIds` | json | Request type IDs that show this form on the customer portal |
|
||||
| ↳ `recommendedIssueRequestTypeIds` | json | Request type IDs that recommend this form |
|
||||
| `total` | number | Total number of forms |
|
||||
|
||||
### `jsm_get_form_structure`
|
||||
|
||||
Get the full structure of a ProForma/JSM form including all questions, field types, choices, layout, and conditions
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `projectIdOrKey` | string | Yes | Jira project ID or key \(e.g., "10001" or "SD"\) |
|
||||
| `formId` | string | Yes | Form ID \(UUID from Get Form Templates\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp of the operation |
|
||||
| `projectIdOrKey` | string | Project ID or key |
|
||||
| `formId` | string | Form ID |
|
||||
| `design` | json | Full form design with questions \(field types, labels, choices, validation\), layout \(field ordering\), and conditions |
|
||||
| `updated` | string | Last updated timestamp |
|
||||
| `publish` | json | Publishing and request type configuration |
|
||||
|
||||
### `jsm_get_issue_forms`
|
||||
|
||||
List forms (ProForma/JSM Forms) attached to a Jira issue with metadata (name, submitted status, lock)
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `issueIdOrKey` | string | Yes | Issue ID or key \(e.g., "SD-123", "10001"\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp of the operation |
|
||||
| `issueIdOrKey` | string | Issue ID or key |
|
||||
| `forms` | array | List of forms attached to the issue |
|
||||
| ↳ `id` | string | Form instance ID \(UUID\) |
|
||||
| ↳ `name` | string | Form name |
|
||||
| ↳ `updated` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| ↳ `submitted` | boolean | Whether the form has been submitted |
|
||||
| ↳ `lock` | boolean | Whether the form is locked |
|
||||
| ↳ `internal` | boolean | Whether the form is internal-only |
|
||||
| ↳ `formTemplateId` | string | Source form template ID \(UUID\) |
|
||||
| `total` | number | Total number of forms |
|
||||
|
||||
### `jsm_attach_form`
|
||||
|
||||
Attach a form template to an existing Jira issue or JSM request
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `issueIdOrKey` | string | Yes | Issue ID or key to attach the form to \(e.g., "SD-123"\) |
|
||||
| `formTemplateId` | string | Yes | Form template UUID \(from Get Form Templates\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp of the operation |
|
||||
| `issueIdOrKey` | string | Issue ID or key |
|
||||
| `id` | string | Attached form instance ID \(UUID\) |
|
||||
| `name` | string | Form name |
|
||||
| `updated` | string | Last updated timestamp |
|
||||
| `submitted` | boolean | Whether the form has been submitted |
|
||||
| `lock` | boolean | Whether the form is locked |
|
||||
| `internal` | boolean | Whether the form is internal only |
|
||||
| `formTemplateId` | string | Form template ID |
|
||||
|
||||
### `jsm_save_form_answers`
|
||||
|
||||
Save answers to a form attached to a Jira issue or JSM request
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `issueIdOrKey` | string | Yes | Issue ID or key \(e.g., "SD-123"\) |
|
||||
| `formId` | string | Yes | Form instance UUID \(from Attach Form or Get Issue Forms\) |
|
||||
| `answers` | json | Yes | Form answers using numeric question IDs as keys \(e.g., \{"1": \{"text": "Title"\}, "4": \{"choices": \["5"\]\}\}\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp of the operation |
|
||||
| `issueIdOrKey` | string | Issue ID or key |
|
||||
| `formId` | string | Form instance UUID |
|
||||
| `state` | json | Form state with status \(open, submitted, locked\) |
|
||||
| `updated` | string | Last updated timestamp |
|
||||
|
||||
### `jsm_submit_form`
|
||||
|
||||
Submit a form on a Jira issue or JSM request, locking it from further edits
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `issueIdOrKey` | string | Yes | Issue ID or key \(e.g., "SD-123"\) |
|
||||
| `formId` | string | Yes | Form instance UUID \(from Attach Form or Get Issue Forms\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp of the operation |
|
||||
| `issueIdOrKey` | string | Issue ID or key |
|
||||
| `formId` | string | Form instance UUID |
|
||||
| `status` | string | Form status after submission \(open, submitted, locked\) |
|
||||
|
||||
### `jsm_get_form`
|
||||
|
||||
Get a single form with full design, state, and answers from a Jira issue
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `issueIdOrKey` | string | Yes | Issue ID or key \(e.g., "SD-123"\) |
|
||||
| `formId` | string | Yes | Form instance UUID \(from Attach Form or Get Issue Forms\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp of the operation |
|
||||
| `issueIdOrKey` | string | Issue ID or key |
|
||||
| `formId` | string | Form instance UUID |
|
||||
| `design` | json | Full form design with questions, layout, conditions, sections, settings |
|
||||
| `state` | json | Form state with answers map, status \(o=open, s=submitted, l=locked\), visibility \(i=internal, e=external\) |
|
||||
| `updated` | string | Last updated timestamp |
|
||||
|
||||
### `jsm_get_form_answers`
|
||||
|
||||
Get simplified answers from a form attached to a Jira issue or JSM request
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `issueIdOrKey` | string | Yes | Issue ID or key \(e.g., "SD-123"\) |
|
||||
| `formId` | string | Yes | Form instance UUID \(from Attach Form or Get Issue Forms\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp of the operation |
|
||||
| `issueIdOrKey` | string | Issue ID or key |
|
||||
| `formId` | string | Form instance UUID |
|
||||
| `answers` | json | Simplified form answers as key-value pairs \(question label to answer text/choices\) |
|
||||
|
||||
### `jsm_reopen_form`
|
||||
|
||||
Reopen a submitted form on a Jira issue or JSM request, allowing further edits
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `issueIdOrKey` | string | Yes | Issue ID or key \(e.g., "SD-123"\) |
|
||||
| `formId` | string | Yes | Form instance UUID \(from Get Issue Forms\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp of the operation |
|
||||
| `issueIdOrKey` | string | Issue ID or key |
|
||||
| `formId` | string | Form instance UUID |
|
||||
| `status` | string | Form status after reopening \(open, submitted, locked\) |
|
||||
|
||||
### `jsm_delete_form`
|
||||
|
||||
Remove a form from a Jira issue or JSM request
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `issueIdOrKey` | string | Yes | Issue ID or key \(e.g., "SD-123"\) |
|
||||
| `formId` | string | Yes | Form instance UUID to delete |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp of the operation |
|
||||
| `issueIdOrKey` | string | Issue ID or key |
|
||||
| `formId` | string | Deleted form instance UUID |
|
||||
| `deleted` | boolean | Whether the form was successfully deleted |
|
||||
|
||||
### `jsm_externalise_form`
|
||||
|
||||
Make a form visible to customers on a Jira issue or JSM request
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `issueIdOrKey` | string | Yes | Issue ID or key \(e.g., "SD-123"\) |
|
||||
| `formId` | string | Yes | Form instance UUID |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp of the operation |
|
||||
| `issueIdOrKey` | string | Issue ID or key |
|
||||
| `formId` | string | Form instance UUID |
|
||||
| `visibility` | string | Form visibility after change \(internal or external\) |
|
||||
|
||||
### `jsm_internalise_form`
|
||||
|
||||
Make a form internal only (not visible to customers) on a Jira issue or JSM request
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `issueIdOrKey` | string | Yes | Issue ID or key \(e.g., "SD-123"\) |
|
||||
| `formId` | string | Yes | Form instance UUID |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp of the operation |
|
||||
| `issueIdOrKey` | string | Issue ID or key |
|
||||
| `formId` | string | Form instance UUID |
|
||||
| `visibility` | string | Form visibility after change \(internal or external\) |
|
||||
|
||||
### `jsm_copy_forms`
|
||||
|
||||
Copy forms from one Jira issue to another
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance |
|
||||
| `sourceIssueIdOrKey` | string | Yes | Source issue ID or key to copy forms from \(e.g., "SD-123"\) |
|
||||
| `targetIssueIdOrKey` | string | Yes | Target issue ID or key to copy forms to \(e.g., "SD-456"\) |
|
||||
| `formIds` | json | No | Optional JSON array of form UUIDs to copy \(e.g., \["uuid1", "uuid2"\]\). If omitted, copies all forms. |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `ts` | string | Timestamp of the operation |
|
||||
| `sourceIssueIdOrKey` | string | Source issue ID or key |
|
||||
| `targetIssueIdOrKey` | string | Target issue ID or key |
|
||||
| `copiedForms` | json | Array of successfully copied forms |
|
||||
| `errors` | json | Array of errors encountered during copy |
|
||||
|
||||
|
||||
|
||||
@@ -3,6 +3,7 @@
|
||||
"index",
|
||||
"a2a",
|
||||
"agentmail",
|
||||
"agiloft",
|
||||
"ahrefs",
|
||||
"airtable",
|
||||
"airweave",
|
||||
@@ -17,6 +18,7 @@
|
||||
"attio",
|
||||
"box",
|
||||
"brandfetch",
|
||||
"brightdata",
|
||||
"browser_use",
|
||||
"calcom",
|
||||
"calendly",
|
||||
@@ -27,6 +29,7 @@
|
||||
"cloudformation",
|
||||
"cloudwatch",
|
||||
"confluence",
|
||||
"crowdstrike",
|
||||
"cursor",
|
||||
"dagster",
|
||||
"databricks",
|
||||
@@ -81,6 +84,7 @@
|
||||
"hubspot",
|
||||
"huggingface",
|
||||
"hunter",
|
||||
"iam",
|
||||
"image_generator",
|
||||
"imap",
|
||||
"incidentio",
|
||||
@@ -159,6 +163,7 @@
|
||||
"ssh",
|
||||
"stagehand",
|
||||
"stripe",
|
||||
"sts",
|
||||
"stt",
|
||||
"supabase",
|
||||
"table",
|
||||
|
||||
@@ -45,6 +45,7 @@ Read data from a specific sheet in a Microsoft Excel spreadsheet
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `spreadsheetId` | string | Yes | The ID of the spreadsheet/workbook to read from \(e.g., "01ABC123DEF456"\) |
|
||||
| `driveId` | string | No | The ID of the drive containing the spreadsheet. Required for SharePoint files. If omitted, uses personal OneDrive. |
|
||||
| `range` | string | No | The range of cells to read from. Accepts "SheetName!A1:B2" for explicit ranges or just "SheetName" to read the used range of that sheet. If omitted, reads the used range of the first sheet. |
|
||||
|
||||
#### Output
|
||||
@@ -67,6 +68,7 @@ Write data to a specific sheet in a Microsoft Excel spreadsheet
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `spreadsheetId` | string | Yes | The ID of the spreadsheet/workbook to write to \(e.g., "01ABC123DEF456"\) |
|
||||
| `driveId` | string | No | The ID of the drive containing the spreadsheet. Required for SharePoint files. If omitted, uses personal OneDrive. |
|
||||
| `range` | string | No | The range of cells to write to \(e.g., "Sheet1!A1:B2"\) |
|
||||
| `values` | array | Yes | The data to write as a 2D array \(e.g., \[\["Name", "Age"\], \["Alice", 30\]\]\) or array of objects |
|
||||
| `valueInputOption` | string | No | The format of the data to write |
|
||||
|
||||
@@ -314,8 +314,8 @@ Cancel an order in your Shopify store
|
||||
| `orderId` | string | Yes | Order ID to cancel \(gid://shopify/Order/123456789\) |
|
||||
| `reason` | string | Yes | Cancellation reason \(CUSTOMER, DECLINED, FRAUD, INVENTORY, STAFF, OTHER\) |
|
||||
| `notifyCustomer` | boolean | No | Whether to notify the customer about the cancellation |
|
||||
| `refund` | boolean | No | Whether to refund the order |
|
||||
| `restock` | boolean | No | Whether to restock the inventory |
|
||||
| `restock` | boolean | Yes | Whether to restock the inventory committed to the order |
|
||||
| `refundMethod` | json | No | Optional refund method object, for example \{"originalPaymentMethodsRefund": true\} |
|
||||
| `staffNote` | string | No | A note about the cancellation for staff reference |
|
||||
|
||||
#### Output
|
||||
|
||||
128
apps/docs/content/docs/en/tools/sts.mdx
Normal file
128
apps/docs/content/docs/en/tools/sts.mdx
Normal file
@@ -0,0 +1,128 @@
|
||||
---
|
||||
title: AWS STS
|
||||
description: Connect to AWS Security Token Service
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="sts"
|
||||
color="linear-gradient(45deg, #BD0816 0%, #FF5252 100%)"
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[AWS Security Token Service (STS)](https://docs.aws.amazon.com/STS/latest/APIReference/welcome.html) is a web service that enables you to request temporary, limited-privilege credentials for AWS Identity and Access Management (IAM) users or for users that you authenticate (federated users).
|
||||
|
||||
With AWS STS, you can:
|
||||
|
||||
- **Assume IAM roles**: Request temporary credentials to access AWS resources across accounts or with elevated permissions
|
||||
- **Verify identity**: Determine the AWS account, ARN, and user ID associated with the calling credentials
|
||||
- **Generate session tokens**: Obtain temporary credentials with optional MFA protection for enhanced security
|
||||
- **Audit access keys**: Look up the AWS account that owns a given access key for security investigations
|
||||
|
||||
In Sim, the AWS STS integration allows your agents to manage temporary credentials as part of automated workflows. This is useful for cross-account access patterns, credential rotation, identity verification before sensitive operations, and security auditing. Agents can assume roles to interact with other AWS services, verify their own identity, or look up access key ownership without exposing long-lived credentials.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate AWS STS into the workflow. Assume roles, get temporary credentials, verify caller identity, and look up access key information.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `sts_assume_role`
|
||||
|
||||
Assume an IAM role and receive temporary security credentials
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `roleArn` | string | Yes | ARN of the IAM role to assume |
|
||||
| `roleSessionName` | string | Yes | Identifier for the assumed role session |
|
||||
| `durationSeconds` | number | No | Duration of the session in seconds \(900-43200, default 3600\) |
|
||||
| `externalId` | string | No | External ID for cross-account access |
|
||||
| `serialNumber` | string | No | MFA device serial number or ARN |
|
||||
| `tokenCode` | string | No | MFA token code \(6 digits\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `accessKeyId` | string | Temporary access key ID |
|
||||
| `secretAccessKey` | string | Temporary secret access key |
|
||||
| `sessionToken` | string | Temporary session token |
|
||||
| `expiration` | string | Credential expiration timestamp |
|
||||
| `assumedRoleArn` | string | ARN of the assumed role |
|
||||
| `assumedRoleId` | string | Assumed role ID with session name |
|
||||
| `packedPolicySize` | number | Percentage of allowed policy size used |
|
||||
|
||||
### `sts_get_caller_identity`
|
||||
|
||||
Get details about the IAM user or role whose credentials are used to call the API
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `account` | string | AWS account ID |
|
||||
| `arn` | string | ARN of the calling entity |
|
||||
| `userId` | string | Unique identifier of the calling entity |
|
||||
|
||||
### `sts_get_session_token`
|
||||
|
||||
Get temporary security credentials for an IAM user, optionally with MFA
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `durationSeconds` | number | No | Duration of the session in seconds \(900-129600, default 43200\) |
|
||||
| `serialNumber` | string | No | MFA device serial number or ARN |
|
||||
| `tokenCode` | string | No | MFA token code \(6 digits\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `accessKeyId` | string | Temporary access key ID |
|
||||
| `secretAccessKey` | string | Temporary secret access key |
|
||||
| `sessionToken` | string | Temporary session token |
|
||||
| `expiration` | string | Credential expiration timestamp |
|
||||
|
||||
### `sts_get_access_key_info`
|
||||
|
||||
Get the AWS account ID associated with an access key
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `region` | string | Yes | AWS region \(e.g., us-east-1\) |
|
||||
| `accessKeyId` | string | Yes | AWS access key ID |
|
||||
| `secretAccessKey` | string | Yes | AWS secret access key |
|
||||
| `targetAccessKeyId` | string | Yes | The access key ID to look up |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `account` | string | AWS account ID that owns the access key |
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: Trello
|
||||
description: Manage Trello boards and cards
|
||||
description: Manage Trello lists, cards, and activity
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
@@ -28,7 +28,16 @@ Integrating Trello with Sim empowers your agents to manage your team’s tasks,
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate with Trello to manage boards and cards. List boards, list cards, create cards, update cards, get actions, and add comments.
|
||||
{/* MANUAL-CONTENT-START:usage */}
|
||||
### Trello OAuth Setup
|
||||
|
||||
Before connecting Trello in Sim, add your Sim app origin to the **Allowed Origins** list for your Trello API key in the Trello Power-Up admin settings.
|
||||
|
||||
Trello's authorization flow redirects back to Sim using a `return_url`. If your Sim origin is not whitelisted in Trello, Trello will block the redirect and the connection flow will fail before Sim can save the token.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
Integrate with Trello to list board lists, list cards, create cards, update cards, review activity, and add comments.
|
||||
|
||||
|
||||
|
||||
@@ -48,48 +57,82 @@ List all lists on a Trello board
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `lists` | array | Array of list objects with id, name, closed, pos, and idBoard |
|
||||
| `lists` | array | Lists on the selected board |
|
||||
| ↳ `id` | string | List ID |
|
||||
| ↳ `name` | string | List name |
|
||||
| ↳ `closed` | boolean | Whether the list is archived |
|
||||
| ↳ `pos` | number | List position on the board |
|
||||
| ↳ `idBoard` | string | Board ID containing the list |
|
||||
| `count` | number | Number of lists returned |
|
||||
|
||||
### `trello_list_cards`
|
||||
|
||||
List all cards on a Trello board
|
||||
List cards from a Trello board or list
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `boardId` | string | Yes | Trello board ID \(24-character hex string\) |
|
||||
| `listId` | string | No | Trello list ID to filter cards \(24-character hex string\) |
|
||||
| `boardId` | string | No | Trello board ID to list open cards from. Provide either boardId or listId |
|
||||
| `listId` | string | No | Trello list ID to list cards from. Provide either boardId or listId |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `cards` | array | Array of card objects with id, name, desc, url, board/list IDs, labels, and due date |
|
||||
| `cards` | array | Cards returned from the selected Trello board or list |
|
||||
| ↳ `id` | string | Card ID |
|
||||
| ↳ `name` | string | Card name |
|
||||
| ↳ `desc` | string | Card description |
|
||||
| ↳ `url` | string | Full card URL |
|
||||
| ↳ `idBoard` | string | Board ID containing the card |
|
||||
| ↳ `idList` | string | List ID containing the card |
|
||||
| ↳ `closed` | boolean | Whether the card is archived |
|
||||
| ↳ `labelIds` | array | Label IDs applied to the card |
|
||||
| ↳ `labels` | array | Labels applied to the card |
|
||||
| ↳ `id` | string | Label ID |
|
||||
| ↳ `name` | string | Label name |
|
||||
| ↳ `color` | string | Label color |
|
||||
| ↳ `due` | string | Card due date in ISO 8601 format |
|
||||
| ↳ `dueComplete` | boolean | Whether the due date is complete |
|
||||
| `count` | number | Number of cards returned |
|
||||
|
||||
### `trello_create_card`
|
||||
|
||||
Create a new card on a Trello board
|
||||
Create a new card in a Trello list
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `boardId` | string | Yes | Trello board ID \(24-character hex string\) |
|
||||
| `listId` | string | Yes | Trello list ID \(24-character hex string\) |
|
||||
| `name` | string | Yes | Name/title of the card |
|
||||
| `desc` | string | No | Description of the card |
|
||||
| `pos` | string | No | Position of the card \(top, bottom, or positive float\) |
|
||||
| `due` | string | No | Due date \(ISO 8601 format\) |
|
||||
| `labels` | string | No | Comma-separated list of label IDs \(24-character hex strings\) |
|
||||
| `dueComplete` | boolean | No | Whether the due date should be marked complete |
|
||||
| `labelIds` | array | No | Label IDs to attach to the card |
|
||||
| `items` | string | No | A Trello label ID |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `card` | object | The created card object with id, name, desc, url, and other properties |
|
||||
| `card` | json | Created card \(id, name, desc, url, idBoard, idList, closed, labelIds, labels, due, dueComplete\) |
|
||||
| ↳ `id` | string | Card ID |
|
||||
| ↳ `name` | string | Card name |
|
||||
| ↳ `desc` | string | Card description |
|
||||
| ↳ `url` | string | Full card URL |
|
||||
| ↳ `idBoard` | string | Board ID containing the card |
|
||||
| ↳ `idList` | string | List ID containing the card |
|
||||
| ↳ `closed` | boolean | Whether the card is archived |
|
||||
| ↳ `labelIds` | array | Label IDs applied to the card |
|
||||
| ↳ `labels` | array | Labels applied to the card |
|
||||
| ↳ `id` | string | Label ID |
|
||||
| ↳ `name` | string | Label name |
|
||||
| ↳ `color` | string | Label color |
|
||||
| ↳ `due` | string | Card due date in ISO 8601 format |
|
||||
| ↳ `dueComplete` | boolean | Whether the due date is complete |
|
||||
|
||||
### `trello_update_card`
|
||||
|
||||
@@ -111,7 +154,21 @@ Update an existing card on Trello
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `card` | object | The updated card object with id, name, desc, url, and other properties |
|
||||
| `card` | json | Updated card \(id, name, desc, url, idBoard, idList, closed, labelIds, labels, due, dueComplete\) |
|
||||
| ↳ `id` | string | Card ID |
|
||||
| ↳ `name` | string | Card name |
|
||||
| ↳ `desc` | string | Card description |
|
||||
| ↳ `url` | string | Full card URL |
|
||||
| ↳ `idBoard` | string | Board ID containing the card |
|
||||
| ↳ `idList` | string | List ID containing the card |
|
||||
| ↳ `closed` | boolean | Whether the card is archived |
|
||||
| ↳ `labelIds` | array | Label IDs applied to the card |
|
||||
| ↳ `labels` | array | Labels applied to the card |
|
||||
| ↳ `id` | string | Label ID |
|
||||
| ↳ `name` | string | Label name |
|
||||
| ↳ `color` | string | Label color |
|
||||
| ↳ `due` | string | Card due date in ISO 8601 format |
|
||||
| ↳ `dueComplete` | boolean | Whether the due date is complete |
|
||||
|
||||
### `trello_get_actions`
|
||||
|
||||
@@ -124,13 +181,36 @@ Get activity/actions from a board or card
|
||||
| `boardId` | string | No | Trello board ID \(24-character hex string\). Either boardId or cardId required |
|
||||
| `cardId` | string | No | Trello card ID \(24-character hex string\). Either boardId or cardId required |
|
||||
| `filter` | string | No | Filter actions by type \(e.g., "commentCard,updateCard,createCard" or "all"\) |
|
||||
| `limit` | number | No | Maximum number of actions to return \(default: 50, max: 1000\) |
|
||||
| `limit` | number | No | Maximum number of board actions to return |
|
||||
| `page` | number | No | Page number for action results |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `actions` | array | Array of action objects with type, date, member, and data |
|
||||
| `actions` | array | Action items \(id, type, date, idMemberCreator, text, memberCreator, card, board, list\) |
|
||||
| ↳ `id` | string | Action ID |
|
||||
| ↳ `type` | string | Action type |
|
||||
| ↳ `date` | string | Action timestamp |
|
||||
| ↳ `idMemberCreator` | string | ID of the member who created the action |
|
||||
| ↳ `text` | string | Comment text when present |
|
||||
| ↳ `memberCreator` | object | Member who created the action |
|
||||
| ↳ `id` | string | Member ID |
|
||||
| ↳ `fullName` | string | Member full name |
|
||||
| ↳ `username` | string | Member username |
|
||||
| ↳ `card` | object | Card referenced by the action |
|
||||
| ↳ `id` | string | Card ID |
|
||||
| ↳ `name` | string | Card name |
|
||||
| ↳ `shortLink` | string | Short card link |
|
||||
| ↳ `idShort` | number | Board-local card number |
|
||||
| ↳ `due` | string | Card due date |
|
||||
| ↳ `board` | object | Board referenced by the action |
|
||||
| ↳ `id` | string | Board ID |
|
||||
| ↳ `name` | string | Board name |
|
||||
| ↳ `shortLink` | string | Short board link |
|
||||
| ↳ `list` | object | List referenced by the action |
|
||||
| ↳ `id` | string | List ID |
|
||||
| ↳ `name` | string | List name |
|
||||
| `count` | number | Number of actions returned |
|
||||
|
||||
### `trello_add_comment`
|
||||
@@ -148,6 +228,28 @@ Add a comment to a Trello card
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `comment` | object | The created comment object with id, text, date, and member creator |
|
||||
| `comment` | json | Created comment action \(id, type, date, idMemberCreator, text, memberCreator, card, board, list\) |
|
||||
| ↳ `id` | string | Action ID |
|
||||
| ↳ `type` | string | Action type |
|
||||
| ↳ `date` | string | Action timestamp |
|
||||
| ↳ `idMemberCreator` | string | ID of the member who created the comment |
|
||||
| ↳ `text` | string | Comment text |
|
||||
| ↳ `memberCreator` | object | Member who created the comment |
|
||||
| ↳ `id` | string | Member ID |
|
||||
| ↳ `fullName` | string | Member full name |
|
||||
| ↳ `username` | string | Member username |
|
||||
| ↳ `card` | object | Card referenced by the comment |
|
||||
| ↳ `id` | string | Card ID |
|
||||
| ↳ `name` | string | Card name |
|
||||
| ↳ `shortLink` | string | Short card link |
|
||||
| ↳ `idShort` | number | Board-local card number |
|
||||
| ↳ `due` | string | Card due date |
|
||||
| ↳ `board` | object | Board referenced by the comment |
|
||||
| ↳ `id` | string | Board ID |
|
||||
| ↳ `name` | string | Board name |
|
||||
| ↳ `shortLink` | string | Short board link |
|
||||
| ↳ `list` | object | List referenced by the comment |
|
||||
| ↳ `id` | string | List ID |
|
||||
| ↳ `name` | string | List name |
|
||||
|
||||
|
||||
|
||||
@@ -34,15 +34,16 @@ Integrate WhatsApp into the workflow. Can send messages.
|
||||
|
||||
### `whatsapp_send_message`
|
||||
|
||||
Send WhatsApp messages
|
||||
Send a text message through the WhatsApp Cloud API.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `phoneNumber` | string | Yes | Recipient phone number with country code \(e.g., +14155552671\) |
|
||||
| `message` | string | Yes | Message content to send \(plain text or template content\) |
|
||||
| `message` | string | Yes | Plain text message content to send |
|
||||
| `phoneNumberId` | string | Yes | WhatsApp Business Phone Number ID \(from Meta Business Suite\) |
|
||||
| `previewUrl` | boolean | No | Whether WhatsApp should try to render a link preview for the first URL in the message |
|
||||
|
||||
#### Output
|
||||
|
||||
@@ -50,8 +51,12 @@ Send WhatsApp messages
|
||||
| --------- | ---- | ----------- |
|
||||
| `success` | boolean | WhatsApp message send success status |
|
||||
| `messageId` | string | Unique WhatsApp message identifier |
|
||||
| `phoneNumber` | string | Recipient phone number |
|
||||
| `status` | string | Message delivery status |
|
||||
| `timestamp` | string | Message send timestamp |
|
||||
| `messageStatus` | string | Initial delivery state returned by the API |
|
||||
| `messagingProduct` | string | Messaging product returned by the API |
|
||||
| `inputPhoneNumber` | string | Recipient phone number echoed back by WhatsApp |
|
||||
| `whatsappUserId` | string | WhatsApp user ID resolved for the recipient |
|
||||
| `contacts` | array | Recipient contact records returned by WhatsApp |
|
||||
| ↳ `input` | string | Input phone number sent to the API |
|
||||
| ↳ `wa_id` | string | WhatsApp user ID associated with the recipient |
|
||||
|
||||
|
||||
|
||||
@@ -29,7 +29,7 @@ Use the Start block for everything originating from the editor, deploy-to-API, o
|
||||
Receive external webhook payloads
|
||||
</Card>
|
||||
<Card title="Schedule" href="/triggers/schedule">
|
||||
Cron or interval based execution
|
||||
Cron or interval based runs
|
||||
</Card>
|
||||
<Card title="RSS Feed" href="/triggers/rss">
|
||||
Monitor RSS and Atom feeds for new content
|
||||
@@ -59,17 +59,17 @@ Use the Start block for everything originating from the editor, deploy-to-API, o
|
||||
|
||||
> Deployments power every trigger. Update the workflow, redeploy, and all trigger entry points pick up the new snapshot. Learn more in [Execution → Deployment Snapshots](/execution).
|
||||
|
||||
## Manual Execution Priority
|
||||
## Manual Run Priority
|
||||
|
||||
When you click **Run** in the editor, Sim automatically selects which trigger to execute based on the following priority order:
|
||||
When you click **Run** in the editor, Sim automatically selects which trigger to run based on the following priority order:
|
||||
|
||||
1. **Start Block** (highest priority)
|
||||
2. **Schedule Triggers**
|
||||
3. **External Triggers** (webhooks, integrations like Slack, Gmail, Airtable, etc.)
|
||||
|
||||
If your workflow has multiple triggers, the highest priority trigger will be executed. For example, if you have both a Start block and a Webhook trigger, clicking Run will execute the Start block.
|
||||
If your workflow has multiple triggers, the highest priority trigger will be used. For example, if you have both a Start block and a Webhook trigger, clicking Run will use the Start block.
|
||||
|
||||
**External triggers with mock payloads**: When external triggers (webhooks and integrations) are executed manually, Sim automatically generates mock payloads based on the trigger's expected data structure. This ensures downstream blocks can resolve variables correctly during testing.
|
||||
**External triggers with mock payloads**: When external triggers (webhooks and integrations) are run manually, Sim automatically generates mock payloads based on the trigger's expected data structure. This ensures downstream blocks can resolve variables correctly during testing.
|
||||
|
||||
## Email Polling Groups
|
||||
|
||||
@@ -94,10 +94,10 @@ Invitees receive an email with a link to connect their account. Once connected,
|
||||
When configuring an email trigger, select your polling group from the credentials dropdown instead of an individual account. The system creates webhooks for each member and routes all emails through your workflow.
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "Can I have multiple triggers on the same workflow?", answer: "Yes, a workflow can have multiple triggers (for example, a Start block and a Webhook trigger). When you click Run in the editor, Sim executes the highest-priority trigger: Start block first, then Schedule, then external triggers like webhooks. Each trigger type can also fire independently when its event occurs." },
|
||||
{ question: "Can I have multiple triggers on the same workflow?", answer: "Yes, a workflow can have multiple triggers (for example, a Start block and a Webhook trigger). When you click Run in the editor, Sim uses the highest-priority trigger: Start block first, then Schedule, then external triggers like webhooks. Each trigger type can also fire independently when its event occurs." },
|
||||
{ question: "How do I secure my webhook endpoint?", answer: "The Generic Webhook trigger supports authentication. Enable the Require Authentication toggle, set an auth token, and optionally specify a custom header name. Incoming requests must include the token as a Bearer token in the Authorization header (or in your custom header). Requests without a valid token are rejected." },
|
||||
{ question: "What happens when I test an external trigger manually?", answer: "When you click Run on a workflow with an external trigger (webhook, Slack, Gmail, etc.), Sim generates a mock payload based on the trigger's expected data structure. This lets downstream blocks resolve their variable references correctly so you can test the full workflow without waiting for a real event." },
|
||||
{ question: "Do triggers use the draft canvas or the deployed version?", answer: "All trigger-based executions (API, chat, schedule, webhook) run against the active deployment snapshot, not your draft canvas. After making changes, you need to redeploy for triggers to pick up the updated workflow version." },
|
||||
{ question: "Do triggers use the draft canvas or the deployed version?", answer: "All trigger-based runs (API, chat, schedule, webhook) use the active deployment snapshot, not your draft canvas. After making changes, you need to redeploy for triggers to pick up the updated workflow version." },
|
||||
{ question: "What integrations are available as triggers?", answer: "Sim supports a wide range of trigger integrations including GitHub (push, PR, issues), Slack, Gmail, Outlook, Linear, Jira, HubSpot, Stripe, Airtable, Calendly, Typeform, Telegram, WhatsApp, Microsoft Teams, RSS feeds, and more. Each integration provides event-specific triggers like issue_created or email_received." },
|
||||
{ question: "How does the Schedule trigger work?", answer: "The Schedule trigger runs your workflow on a timer using cron expressions or interval-based configuration. The schedule is managed within the schedule block settings. Like all triggers, scheduled runs execute the active deployment snapshot, so make sure to redeploy after making workflow changes." },
|
||||
]} />
|
||||
|
||||
@@ -51,9 +51,9 @@ RSS triggers only fire for items published after you save the trigger. Existing
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "How often is the RSS feed checked for new items?", answer: "The feed is polled every minute. On each poll, the service fetches the feed, compares items against the last checked timestamp and a list of previously seen GUIDs, and triggers your workflow only for genuinely new items." },
|
||||
{ question: "How does the poller avoid processing the same item twice?", answer: "The service tracks up to 100 recent item GUIDs and the last checked timestamp. An item is considered new only if its GUID has not been seen before and its publication date is after the last checked timestamp. Additionally, an idempotency layer prevents duplicate workflow executions for the same item." },
|
||||
{ question: "How does the poller avoid processing the same item twice?", answer: "The service tracks up to 100 recent item GUIDs and the last checked timestamp. An item is considered new only if its GUID has not been seen before and its publication date is after the last checked timestamp. Additionally, an idempotency layer prevents duplicate workflow runs for the same item." },
|
||||
{ question: "Is there a limit on how many new items are processed per poll?", answer: "Yes. Each polling cycle processes a maximum of 25 new items, sorted by publication date (newest first). If a feed publishes more than 25 items between polls, only the 25 most recent are processed." },
|
||||
{ question: "What output fields are available from the RSS trigger?", answer: "Each triggered execution receives: title, link, and pubDate as top-level convenience fields, plus a full item object containing all fields (including guid, summary, content, contentSnippet, author, categories, enclosure, and isoDate), a feed object with the feed's title, link, and description, and a timestamp of when the event was processed." },
|
||||
{ question: "What output fields are available from the RSS trigger?", answer: "Each triggered run receives: title, link, and pubDate as top-level convenience fields, plus a full item object containing all fields (including guid, summary, content, contentSnippet, author, categories, enclosure, and isoDate), a feed object with the feed's title, link, and description, and a timestamp of when the event was processed." },
|
||||
{ question: "What happens if the RSS feed is temporarily unreachable?", answer: "A failed fetch increments the webhook's consecutive failure counter. After 100 consecutive failures, the RSS trigger is automatically disabled. On any successful poll, the counter resets to zero." },
|
||||
{ question: "Does the RSS trigger support Atom feeds?", answer: "Yes. The underlying parser (rss-parser) supports both RSS and Atom feed formats. You can use the URL of either format in the Feed URL field." },
|
||||
]} />
|
||||
|
||||
@@ -79,10 +79,10 @@ Schedule blocks cannot receive incoming connections and serve as workflow entry
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "Do I need to deploy my workflow for the schedule to start?", answer: "Yes. Schedules are created in the database only when you deploy the workflow. Undeploying removes the schedule, and redeploying recreates it with the current configuration." },
|
||||
{ question: "What exactly happens after 100 consecutive failures?", answer: "After 100 consecutive failures, the schedule is automatically set to a disabled status to prevent runaway errors. A warning badge appears on the schedule block in the editor. You can click the badge to reactivate it. The failure counter resets to zero on any successful execution." },
|
||||
{ question: "What exactly happens after 100 consecutive failures?", answer: "After 100 consecutive failures, the schedule is automatically set to a disabled status to prevent runaway errors. A warning badge appears on the schedule block in the editor. You can click the badge to reactivate it. The failure counter resets to zero on any successful run." },
|
||||
{ question: "Does the schedule support timezones?", answer: "Yes. The schedule configuration includes a timezone setting. Cron expressions and simple intervals are evaluated relative to the configured timezone, which defaults to UTC if not specified." },
|
||||
{ question: "What happens if my scheduled workflow is rate-limited?", answer: "If a rate limit (HTTP 429) is encountered during execution, the schedule automatically retries after a 5-minute delay rather than counting it as a failure." },
|
||||
{ question: "What happens if my scheduled workflow is rate-limited?", answer: "If a rate limit (HTTP 429) is encountered during a run, the schedule automatically retries after a 5-minute delay rather than counting it as a failure." },
|
||||
{ question: "Can I have multiple schedule blocks in one workflow?", answer: "Yes. The deployment process finds all schedule blocks in the workflow and creates a separate schedule record for each one. Each schedule operates independently with its own cron expression and failure counter." },
|
||||
{ question: "What happens if the workflow is undeployed while a schedule execution is in progress?", answer: "The currently running execution will complete, but no new executions will be triggered. When the schedule next tries to fire, it checks that the workflow is still deployed and the schedule record still exists before executing." },
|
||||
{ question: "What happens if the workflow is undeployed while a scheduled run is in progress?", answer: "The currently running workflow will complete, but no new runs will be triggered. When the schedule next tries to fire, it checks that the workflow is still deployed and the schedule record still exists before running." },
|
||||
]} />
|
||||
|
||||
|
||||
@@ -19,12 +19,12 @@ The Start block is the default trigger for workflows built in Sim. It collects s
|
||||
</div>
|
||||
|
||||
<Callout type="info">
|
||||
The Start block sits in the start slot when you create a workflow. Keep it there when you want the same entry point to serve editor runs, deploy-to-API requests, and chat sessions. Swap it with Webhook or Schedule triggers when you only need event-driven execution.
|
||||
The Start block sits in the start slot when you create a workflow. Keep it there when you want the same entry point to serve editor runs, deploy-to-API requests, and chat sessions. Swap it with Webhook or Schedule triggers when you only need event-driven runs.
|
||||
</Callout>
|
||||
|
||||
## Fields exposed by Start
|
||||
|
||||
The Start block emits different data depending on the execution surface:
|
||||
The Start block emits different data depending on the run surface:
|
||||
|
||||
- **Input Format fields** — Every field you add becomes available as <code><start.fieldName></code>. For example, a `customerId` field shows up as <code><start.customerId></code> in downstream blocks and templates.
|
||||
- **Chat-only fields** — When the workflow runs from the chat side panel or a deployed chat experience, Sim also provides <code><start.input></code> (latest user message), <code><start.conversationId></code> (active session id), and <code><start.files></code> (chat attachments).
|
||||
@@ -33,11 +33,11 @@ Keep Input Format fields scoped to the names you expect to reference later—tho
|
||||
|
||||
## Configure the Input Format
|
||||
|
||||
Use the Input Format sub-block to define the schema that applies across execution modes:
|
||||
Use the Input Format sub-block to define the schema that applies across run modes:
|
||||
|
||||
1. Add a field for each value you want to collect.
|
||||
2. Choose a type (`string`, `number`, `boolean`, `object`, `array`, or `files`). File fields accept uploads from chat and API callers.
|
||||
3. Provide default values when you want the manual run modal to populate test data automatically. These defaults are ignored for deployed executions.
|
||||
3. Provide default values when you want the manual run modal to populate test data automatically. These defaults are ignored for deployed runs.
|
||||
4. Reorder fields to control how they appear in the editor form.
|
||||
|
||||
Reference structured values downstream with expressions such as <code><start.customerId></code> depending on the block you connect.
|
||||
@@ -53,7 +53,7 @@ Reference structured values downstream with expressions such as <code><start.
|
||||
tools or storage steps.
|
||||
</Tab>
|
||||
<Tab>
|
||||
Deploying to API turns the Input Format into a JSON contract for clients. Each field becomes part of the request body, and Sim coerces primitive types on ingestion. File fields expect objects that reference uploaded files; use the execution file upload endpoint before invoking the workflow.
|
||||
Deploying to API turns the Input Format into a JSON contract for clients. Each field becomes part of the request body, and Sim coerces primitive types on ingestion. File fields expect objects that reference uploaded files; use the file upload endpoint before invoking the workflow.
|
||||
|
||||
API callers can include additional optional properties. They are preserved
|
||||
inside <code><start.fieldName></code> outputs so you can experiment
|
||||
|
||||
@@ -8,7 +8,7 @@ import { Image } from '@/components/ui/image'
|
||||
import { Video } from '@/components/ui/video'
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
Webhooks allow external services to trigger workflow execution by sending HTTP requests to your workflow. Sim supports two approaches for webhook-based triggers.
|
||||
Webhooks allow external services to trigger workflow runs by sending HTTP requests to your workflow. Sim supports two approaches for webhook-based triggers.
|
||||
|
||||
## Generic Webhook Trigger
|
||||
|
||||
@@ -30,7 +30,7 @@ The Generic Webhook block creates a flexible endpoint that can receive any paylo
|
||||
2. **Configure Payload** - Set up the expected payload structure (optional)
|
||||
3. **Get Webhook URL** - Copy the automatically generated unique endpoint
|
||||
4. **External Integration** - Configure your external service to send POST requests to this URL
|
||||
5. **Workflow Execution** - Every request to the webhook URL triggers the workflow
|
||||
5. **Workflow Run** - Every request to the webhook URL triggers the workflow
|
||||
|
||||
### Features
|
||||
|
||||
@@ -38,7 +38,7 @@ The Generic Webhook block creates a flexible endpoint that can receive any paylo
|
||||
- **Automatic Parsing**: Webhook data is automatically parsed and available to subsequent blocks
|
||||
- **Authentication**: Optional bearer token or custom header authentication
|
||||
- **Rate Limiting**: Built-in protection against abuse
|
||||
- **Deduplication**: Prevents duplicate executions from repeated requests
|
||||
- **Deduplication**: Prevents duplicate runs from repeated requests
|
||||
|
||||
<Callout type="info">
|
||||
The Generic Webhook trigger fires every time the webhook URL receives a request, making it perfect for real-time integrations.
|
||||
@@ -58,7 +58,7 @@ Alternatively, you can use specific service blocks (like Slack, GitHub, etc.) in
|
||||
2. **Enable Trigger Mode** - Toggle "Use as Trigger" in the block settings
|
||||
3. **Configure Service** - Set up authentication and event filters specific to that service
|
||||
4. **Webhook Registration** - The service automatically registers the webhook with the external platform
|
||||
5. **Event-Based Execution** - Workflow triggers only for specific events from that service
|
||||
5. **Event-Based Runs** - Workflow triggers only for specific events from that service
|
||||
|
||||
### When to Use Each Approach
|
||||
|
||||
@@ -120,7 +120,7 @@ Alternatively, you can use specific service blocks (like Slack, GitHub, etc.) in
|
||||
### Testing Webhooks
|
||||
|
||||
1. Use tools like Postman or curl to test your webhook endpoints
|
||||
2. Check workflow execution logs for debugging
|
||||
2. Check workflow run logs for debugging
|
||||
3. Verify payload structure matches your expectations
|
||||
4. Test authentication and error scenarios
|
||||
|
||||
@@ -153,8 +153,8 @@ Always validate and sanitize incoming webhook data before processing it in your
|
||||
{ question: "What HTTP methods does the Generic Webhook endpoint accept?", answer: "The webhook endpoint handles POST requests for triggering workflows. GET requests are only used for provider-specific verification challenges (such as Microsoft Graph or WhatsApp verification). Other methods return a 405 Method Not Allowed response." },
|
||||
{ question: "How do I authenticate webhook requests?", answer: "Enable the Require Authentication toggle in the webhook configuration, then set an Authentication Token. Callers can send the token as a Bearer token in the Authorization header, or you can specify a custom header name (e.g., X-Secret-Key) and the token will be matched against that header instead." },
|
||||
{ question: "Can I define the expected payload structure for a webhook?", answer: "Yes. The Generic Webhook block includes an Input Format field where you can define the expected JSON schema. This is optional but helps document the expected structure. You can also use type \"file[]\" for file upload fields." },
|
||||
{ question: "Does the webhook have deduplication built in?", answer: "Yes. The webhook processing pipeline includes idempotency checks to prevent duplicate executions from repeated requests with the same payload." },
|
||||
{ question: "Does the webhook have deduplication built in?", answer: "Yes. The webhook processing pipeline includes idempotency checks to prevent duplicate runs from repeated requests with the same payload." },
|
||||
{ question: "What data from the webhook request is available in my workflow?", answer: "All request data including headers, body, and query parameters is parsed and made available to subsequent blocks. Common fields like event, id, and data are automatically extracted from the payload when present." },
|
||||
{ question: "Do I need to deploy my workflow for the webhook URL to work?", answer: "Yes. The webhook endpoint checks that the associated workflow is deployed before triggering execution. If the workflow is not deployed, the webhook returns a not-found response." },
|
||||
{ question: "Does the webhook auto-disable after repeated failures?", answer: "No. Unlike polling-based triggers (RSS, Gmail, IMAP), push-based generic webhooks do not auto-disable after consecutive failures. Each incoming request is processed independently. If your workflow consistently fails, check the execution logs for error details." },
|
||||
{ question: "Do I need to deploy my workflow for the webhook URL to work?", answer: "Yes. The webhook endpoint checks that the associated workflow is deployed before triggering a run. If the workflow is not deployed, the webhook returns a not-found response." },
|
||||
{ question: "Does the webhook auto-disable after repeated failures?", answer: "No. Unlike polling-based triggers (RSS, Gmail, IMAP), push-based generic webhooks do not auto-disable after consecutive failures. Each incoming request is processed independently. If your workflow consistently fails, check the run logs for error details." },
|
||||
]} />
|
||||
|
||||
@@ -21,7 +21,17 @@ Usa tus propias claves API para proveedores de modelos de IA en lugar de las cla
|
||||
| OpenAI | Embeddings de base de conocimiento, bloque Agent |
|
||||
| Anthropic | Bloque Agent |
|
||||
| Google | Bloque Agent |
|
||||
| Mistral | OCR de base de conocimiento |
|
||||
| Mistral | OCR de base de conocimiento, bloque Agent |
|
||||
| Fireworks | Bloque Agent |
|
||||
| Firecrawl | Web scraping, crawling, búsqueda y extracción |
|
||||
| Exa | Búsqueda e investigación impulsada por IA |
|
||||
| Serper | API de búsqueda de Google |
|
||||
| Linkup | Búsqueda web y recuperación de contenido |
|
||||
| Parallel AI | Búsqueda web, extracción e investigación profunda |
|
||||
| Perplexity | Chat y búsqueda web impulsada por IA |
|
||||
| Jina AI | Lectura y búsqueda web |
|
||||
| Google Cloud | APIs de Translate, Maps, PageSpeed y Books |
|
||||
| Brandfetch | Activos de marca, logos, colores e información de empresas |
|
||||
|
||||
### Configuración
|
||||
|
||||
|
||||
@@ -105,9 +105,108 @@ El desglose del modelo muestra:
|
||||
Los precios mostrados reflejan las tarifas a partir del 10 de septiembre de 2025. Consulta la documentación del proveedor para conocer los precios actuales.
|
||||
</Callout>
|
||||
|
||||
## Precios de herramientas alojadas
|
||||
|
||||
Cuando los flujos de trabajo usan bloques de herramientas con las claves API alojadas de Sim, los costos se cobran por operación. Usa tus propias claves a través de BYOK para pagar directamente a los proveedores.
|
||||
|
||||
<Tabs items={['Firecrawl', 'Exa', 'Serper', 'Perplexity', 'Linkup', 'Parallel AI', 'Jina AI', 'Google Cloud', 'Brandfetch']}>
|
||||
<Tab>
|
||||
**Firecrawl** - Web scraping, crawling, búsqueda y extracción
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Scrape | $0.001 per credit used |
|
||||
| Crawl | $0.001 per credit used |
|
||||
| Search | $0.001 per credit used |
|
||||
| Extract | $0.001 per credit used |
|
||||
| Map | $0.001 per credit used |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Exa** - Búsqueda e investigación impulsada por IA
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | Dynamic (returned by API) |
|
||||
| Get Contents | Dynamic (returned by API) |
|
||||
| Find Similar Links | Dynamic (returned by API) |
|
||||
| Answer | Dynamic (returned by API) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Serper** - API de búsqueda de Google
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search (≤10 results) | $0.001 |
|
||||
| Search (>10 results) | $0.002 |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Perplexity** - Chat y búsqueda web impulsada por IA
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | $0.005 per request |
|
||||
| Chat | Token-based (varies by model) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Linkup** - Búsqueda web y recuperación de contenido
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Standard search | ~$0.006 |
|
||||
| Deep search | ~$0.055 |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Parallel AI** - Búsqueda web, extracción e investigación profunda
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search (≤10 results) | $0.005 |
|
||||
| Search (>10 results) | $0.005 + $0.001 per additional result |
|
||||
| Extract | $0.001 per URL |
|
||||
| Deep Research | $0.005–$2.40 (varies by processor tier) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Jina AI** - Lectura y búsqueda web
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Read URL | $0.20 per 1M tokens |
|
||||
| Search | $0.20 per 1M tokens (minimum 10K tokens) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Google Cloud** - APIs de Translate, Maps, PageSpeed y Books
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Translate / Detect | $0.00002 per character |
|
||||
| Maps (Geocode, Directions, Distance Matrix, Elevation, Timezone, Reverse Geocode, Geolocate, Validate Address) | $0.005 per request |
|
||||
| Maps (Snap to Roads) | $0.01 per request |
|
||||
| Maps (Place Details) | $0.017 per request |
|
||||
| Maps (Places Search) | $0.032 per request |
|
||||
| PageSpeed | Free |
|
||||
| Books (Search, Details) | Free |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Brandfetch** - Activos de marca, logos, colores e información de empresas
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | Free |
|
||||
| Get Brand | $0.04 per request |
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Trae tu propia clave (BYOK)
|
||||
|
||||
Puedes usar tus propias claves API para modelos alojados (OpenAI, Anthropic, Google, Mistral) en **Configuración → BYOK** para pagar precios base. Las claves están encriptadas y se aplican a todo el espacio de trabajo.
|
||||
Puedes usar tus propias claves API para proveedores compatibles (OpenAI, Anthropic, Google, Mistral, Fireworks, Firecrawl, Exa, Serper, Linkup, Parallel AI, Perplexity, Jina AI, Google Cloud, Brandfetch) en **Configuración → BYOK** para pagar precios base. Las claves están encriptadas y se aplican a todo el espacio de trabajo.
|
||||
|
||||
## Estrategias de optimización de costos
|
||||
|
||||
|
||||
@@ -21,7 +21,17 @@ Utilisez vos propres clés API pour les fournisseurs de modèles IA au lieu des
|
||||
| OpenAI | Embeddings de base de connaissances, bloc Agent |
|
||||
| Anthropic | Bloc Agent |
|
||||
| Google | Bloc Agent |
|
||||
| Mistral | OCR de base de connaissances |
|
||||
| Mistral | OCR de base de connaissances, bloc Agent |
|
||||
| Fireworks | Bloc Agent |
|
||||
| Firecrawl | Web scraping, crawling, recherche et extraction |
|
||||
| Exa | Recherche et investigation alimentées par l'IA |
|
||||
| Serper | API de recherche Google |
|
||||
| Linkup | Recherche web et récupération de contenu |
|
||||
| Parallel AI | Recherche web, extraction et recherche approfondie |
|
||||
| Perplexity | Chat et recherche web alimentés par l'IA |
|
||||
| Jina AI | Lecture et recherche web |
|
||||
| Google Cloud | APIs Translate, Maps, PageSpeed et Books |
|
||||
| Brandfetch | Ressources de marque, logos, couleurs et informations d'entreprise |
|
||||
|
||||
### Configuration
|
||||
|
||||
|
||||
@@ -105,9 +105,108 @@ La répartition des modèles montre :
|
||||
Les prix indiqués reflètent les tarifs en date du 10 septembre 2025. Consultez la documentation des fournisseurs pour les tarifs actuels.
|
||||
</Callout>
|
||||
|
||||
## Tarification des outils hébergés
|
||||
|
||||
Lorsque les workflows utilisent des blocs d'outils avec les clés API hébergées par Sim, les coûts sont facturés par opération. Utilisez vos propres clés via BYOK pour payer directement les fournisseurs.
|
||||
|
||||
<Tabs items={['Firecrawl', 'Exa', 'Serper', 'Perplexity', 'Linkup', 'Parallel AI', 'Jina AI', 'Google Cloud', 'Brandfetch']}>
|
||||
<Tab>
|
||||
**Firecrawl** - Web scraping, crawling, recherche et extraction
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Scrape | $0.001 per credit used |
|
||||
| Crawl | $0.001 per credit used |
|
||||
| Search | $0.001 per credit used |
|
||||
| Extract | $0.001 per credit used |
|
||||
| Map | $0.001 per credit used |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Exa** - Recherche et investigation alimentées par l'IA
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | Dynamic (returned by API) |
|
||||
| Get Contents | Dynamic (returned by API) |
|
||||
| Find Similar Links | Dynamic (returned by API) |
|
||||
| Answer | Dynamic (returned by API) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Serper** - API de recherche Google
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search (≤10 results) | $0.001 |
|
||||
| Search (>10 results) | $0.002 |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Perplexity** - Chat et recherche web alimentés par l'IA
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | $0.005 per request |
|
||||
| Chat | Token-based (varies by model) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Linkup** - Recherche web et récupération de contenu
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Standard search | ~$0.006 |
|
||||
| Deep search | ~$0.055 |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Parallel AI** - Recherche web, extraction et recherche approfondie
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search (≤10 results) | $0.005 |
|
||||
| Search (>10 results) | $0.005 + $0.001 per additional result |
|
||||
| Extract | $0.001 per URL |
|
||||
| Deep Research | $0.005–$2.40 (varies by processor tier) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Jina AI** - Lecture et recherche web
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Read URL | $0.20 per 1M tokens |
|
||||
| Search | $0.20 per 1M tokens (minimum 10K tokens) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Google Cloud** - APIs Translate, Maps, PageSpeed et Books
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Translate / Detect | $0.00002 per character |
|
||||
| Maps (Geocode, Directions, Distance Matrix, Elevation, Timezone, Reverse Geocode, Geolocate, Validate Address) | $0.005 per request |
|
||||
| Maps (Snap to Roads) | $0.01 per request |
|
||||
| Maps (Place Details) | $0.017 per request |
|
||||
| Maps (Places Search) | $0.032 per request |
|
||||
| PageSpeed | Free |
|
||||
| Books (Search, Details) | Free |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Brandfetch** - Ressources de marque, logos, couleurs et informations d'entreprise
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | Free |
|
||||
| Get Brand | $0.04 per request |
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Apportez votre propre clé (BYOK)
|
||||
|
||||
Vous pouvez utiliser vos propres clés API pour les modèles hébergés (OpenAI, Anthropic, Google, Mistral) dans **Paramètres → BYOK** pour payer les prix de base. Les clés sont chiffrées et s'appliquent à l'ensemble de l'espace de travail.
|
||||
Vous pouvez utiliser vos propres clés API pour les fournisseurs pris en charge (OpenAI, Anthropic, Google, Mistral, Fireworks, Firecrawl, Exa, Serper, Linkup, Parallel AI, Perplexity, Jina AI, Google Cloud, Brandfetch) dans **Paramètres → BYOK** pour payer les prix de base. Les clés sont chiffrées et s'appliquent à l'ensemble de l'espace de travail.
|
||||
|
||||
## Stratégies d'optimisation des coûts
|
||||
|
||||
|
||||
@@ -20,7 +20,17 @@ Simのホストキーの代わりに、AIモデルプロバイダー用の独自
|
||||
| OpenAI | ナレッジベースの埋め込み、エージェントブロック |
|
||||
| Anthropic | エージェントブロック |
|
||||
| Google | エージェントブロック |
|
||||
| Mistral | ナレッジベースOCR |
|
||||
| Mistral | ナレッジベースOCR、エージェントブロック |
|
||||
| Fireworks | エージェントブロック |
|
||||
| Firecrawl | Webスクレイピング、クローリング、検索、抽出 |
|
||||
| Exa | AI搭載の検索とリサーチ |
|
||||
| Serper | Google検索API |
|
||||
| Linkup | Web検索とコンテンツ取得 |
|
||||
| Parallel AI | Web検索、抽出、ディープリサーチ |
|
||||
| Perplexity | AI搭載のチャットとWeb検索 |
|
||||
| Jina AI | Web閲覧と検索 |
|
||||
| Google Cloud | Translate、Maps、PageSpeed、Books API |
|
||||
| Brandfetch | ブランドアセット、ロゴ、カラー、企業情報 |
|
||||
|
||||
### セットアップ
|
||||
|
||||
|
||||
@@ -105,9 +105,108 @@ AIブロックを使用するワークフローでは、ログで詳細なコス
|
||||
表示価格は2025年9月10日時点のレートを反映しています。最新の価格については各プロバイダーのドキュメントをご確認ください。
|
||||
</Callout>
|
||||
|
||||
## ホスティングツールの料金
|
||||
|
||||
ワークフローがSimのホスティングAPIキーを使用するツールブロックを利用する場合、操作ごとに料金が発生します。BYOKで独自のキーを使用すると、プロバイダーに直接支払うことができます。
|
||||
|
||||
<Tabs items={['Firecrawl', 'Exa', 'Serper', 'Perplexity', 'Linkup', 'Parallel AI', 'Jina AI', 'Google Cloud', 'Brandfetch']}>
|
||||
<Tab>
|
||||
**Firecrawl** - Webスクレイピング、クローリング、検索、抽出
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Scrape | $0.001 per credit used |
|
||||
| Crawl | $0.001 per credit used |
|
||||
| Search | $0.001 per credit used |
|
||||
| Extract | $0.001 per credit used |
|
||||
| Map | $0.001 per credit used |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Exa** - AI搭載の検索とリサーチ
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | Dynamic (returned by API) |
|
||||
| Get Contents | Dynamic (returned by API) |
|
||||
| Find Similar Links | Dynamic (returned by API) |
|
||||
| Answer | Dynamic (returned by API) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Serper** - Google検索API
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search (≤10 results) | $0.001 |
|
||||
| Search (>10 results) | $0.002 |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Perplexity** - AI搭載のチャットとWeb検索
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | $0.005 per request |
|
||||
| Chat | Token-based (varies by model) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Linkup** - Web検索とコンテンツ取得
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Standard search | ~$0.006 |
|
||||
| Deep search | ~$0.055 |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Parallel AI** - Web検索、抽出、ディープリサーチ
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search (≤10 results) | $0.005 |
|
||||
| Search (>10 results) | $0.005 + $0.001 per additional result |
|
||||
| Extract | $0.001 per URL |
|
||||
| Deep Research | $0.005–$2.40 (varies by processor tier) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Jina AI** - Web閲覧と検索
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Read URL | $0.20 per 1M tokens |
|
||||
| Search | $0.20 per 1M tokens (minimum 10K tokens) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Google Cloud** - Translate、Maps、PageSpeed、Books API
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Translate / Detect | $0.00002 per character |
|
||||
| Maps (Geocode, Directions, Distance Matrix, Elevation, Timezone, Reverse Geocode, Geolocate, Validate Address) | $0.005 per request |
|
||||
| Maps (Snap to Roads) | $0.01 per request |
|
||||
| Maps (Place Details) | $0.017 per request |
|
||||
| Maps (Places Search) | $0.032 per request |
|
||||
| PageSpeed | Free |
|
||||
| Books (Search, Details) | Free |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Brandfetch** - ブランドアセット、ロゴ、カラー、企業情報
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | Free |
|
||||
| Get Brand | $0.04 per request |
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Bring Your Own Key (BYOK)
|
||||
|
||||
ホストされたモデル(OpenAI、Anthropic、Google、Mistral)に対して、**設定 → BYOK**で独自のAPIキーを使用し、基本価格で支払うことができます。キーは暗号化され、ワークスペース全体に適用されます。
|
||||
対応プロバイダー(OpenAI、Anthropic、Google、Mistral、Fireworks、Firecrawl、Exa、Serper、Linkup、Parallel AI、Perplexity、Jina AI、Google Cloud、Brandfetch)に対して、**設定 → BYOK**で独自のAPIキーを使用し、基本価格で支払うことができます。キーは暗号化され、ワークスペース全体に適用されます。
|
||||
|
||||
## コスト最適化戦略
|
||||
|
||||
|
||||
@@ -20,7 +20,17 @@ Sim 企业版为需要更高安全性、合规性和管理能力的组织提供
|
||||
| OpenAI | 知识库嵌入、Agent 模块 |
|
||||
| Anthropic | Agent 模块 |
|
||||
| Google | Agent 模块 |
|
||||
| Mistral | 知识库 OCR |
|
||||
| Mistral | 知识库 OCR、Agent 模块 |
|
||||
| Fireworks | Agent 模块 |
|
||||
| Firecrawl | 网页抓取、爬取、搜索和提取 |
|
||||
| Exa | AI 驱动的搜索和研究 |
|
||||
| Serper | Google 搜索 API |
|
||||
| Linkup | 网络搜索和内容检索 |
|
||||
| Parallel AI | 网络搜索、提取和深度研究 |
|
||||
| Perplexity | AI 驱动的聊天和网络搜索 |
|
||||
| Jina AI | 网页阅读和搜索 |
|
||||
| Google Cloud | Translate、Maps、PageSpeed 和 Books API |
|
||||
| Brandfetch | 品牌资产、标志、颜色和公司信息 |
|
||||
|
||||
### 配置方法
|
||||
|
||||
|
||||
@@ -105,9 +105,108 @@ totalCost = baseExecutionCharge + modelCost
|
||||
显示的价格为截至 2025 年 9 月 10 日的费率。请查看提供商文档以获取最新价格。
|
||||
</Callout>
|
||||
|
||||
## 托管工具定价
|
||||
|
||||
当工作流使用 Sim 托管 API 密钥的工具模块时,费用按操作收取。通过 BYOK 使用你自己的密钥可直接向服务商付费。
|
||||
|
||||
<Tabs items={['Firecrawl', 'Exa', 'Serper', 'Perplexity', 'Linkup', 'Parallel AI', 'Jina AI', 'Google Cloud', 'Brandfetch']}>
|
||||
<Tab>
|
||||
**Firecrawl** - 网页抓取、爬取、搜索和提取
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Scrape | $0.001 per credit used |
|
||||
| Crawl | $0.001 per credit used |
|
||||
| Search | $0.001 per credit used |
|
||||
| Extract | $0.001 per credit used |
|
||||
| Map | $0.001 per credit used |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Exa** - AI 驱动的搜索和研究
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | Dynamic (returned by API) |
|
||||
| Get Contents | Dynamic (returned by API) |
|
||||
| Find Similar Links | Dynamic (returned by API) |
|
||||
| Answer | Dynamic (returned by API) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Serper** - Google 搜索 API
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search (≤10 results) | $0.001 |
|
||||
| Search (>10 results) | $0.002 |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Perplexity** - AI 驱动的聊天和网络搜索
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | $0.005 per request |
|
||||
| Chat | Token-based (varies by model) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Linkup** - 网络搜索和内容检索
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Standard search | ~$0.006 |
|
||||
| Deep search | ~$0.055 |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Parallel AI** - 网络搜索、提取和深度研究
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search (≤10 results) | $0.005 |
|
||||
| Search (>10 results) | $0.005 + $0.001 per additional result |
|
||||
| Extract | $0.001 per URL |
|
||||
| Deep Research | $0.005–$2.40 (varies by processor tier) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Jina AI** - 网页阅读和搜索
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Read URL | $0.20 per 1M tokens |
|
||||
| Search | $0.20 per 1M tokens (minimum 10K tokens) |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Google Cloud** - Translate、Maps、PageSpeed 和 Books API
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Translate / Detect | $0.00002 per character |
|
||||
| Maps (Geocode, Directions, Distance Matrix, Elevation, Timezone, Reverse Geocode, Geolocate, Validate Address) | $0.005 per request |
|
||||
| Maps (Snap to Roads) | $0.01 per request |
|
||||
| Maps (Place Details) | $0.017 per request |
|
||||
| Maps (Places Search) | $0.032 per request |
|
||||
| PageSpeed | Free |
|
||||
| Books (Search, Details) | Free |
|
||||
</Tab>
|
||||
|
||||
<Tab>
|
||||
**Brandfetch** - 品牌资产、标志、颜色和公司信息
|
||||
|
||||
| Operation | Cost |
|
||||
|-----------|------|
|
||||
| Search | Free |
|
||||
| Get Brand | $0.04 per request |
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## 自带密钥(BYOK)
|
||||
|
||||
你可以在 **设置 → BYOK** 中为托管模型(OpenAI、Anthropic、Google、Mistral)使用你自己的 API 密钥,以按基础价格计费。密钥会被加密,并在整个工作区范围内生效。
|
||||
你可以在 **设置 → BYOK** 中为支持的服务商(OpenAI、Anthropic、Google、Mistral、Fireworks、Firecrawl、Exa、Serper、Linkup、Parallel AI、Perplexity、Jina AI、Google Cloud、Brandfetch)使用你自己的 API 密钥,以按基础价格计费。密钥会被加密,并在整个工作区范围内生效。
|
||||
|
||||
## 成本优化策略
|
||||
|
||||
|
||||
1
apps/docs/lib/urls.ts
Normal file
1
apps/docs/lib/urls.ts
Normal file
@@ -0,0 +1 @@
|
||||
export const DOCS_BASE_URL = process.env.NEXT_PUBLIC_DOCS_URL ?? 'https://docs.sim.ai'
|
||||
@@ -25,6 +25,10 @@
|
||||
"name": "Workflows",
|
||||
"description": "Execute workflows and manage workflow resources"
|
||||
},
|
||||
{
|
||||
"name": "Human in the Loop",
|
||||
"description": "Manage paused workflow executions and resume them with input"
|
||||
},
|
||||
{
|
||||
"name": "Logs",
|
||||
"description": "Query execution logs and retrieve details"
|
||||
@@ -235,6 +239,544 @@
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/workflows/{id}/paused": {
|
||||
"get": {
|
||||
"operationId": "listPausedExecutions",
|
||||
"summary": "List Paused Executions",
|
||||
"description": "List all paused executions for a workflow. Workflows pause at Human in the Loop blocks and wait for input before continuing. Use this endpoint to discover which executions need attention.",
|
||||
"tags": ["Human in the Loop"],
|
||||
"x-codeSamples": [
|
||||
{
|
||||
"id": "curl",
|
||||
"label": "cURL",
|
||||
"lang": "bash",
|
||||
"source": "curl -X GET \\\n \"https://www.sim.ai/api/workflows/{id}/paused?status=paused\" \\\n -H \"X-API-Key: YOUR_API_KEY\""
|
||||
}
|
||||
],
|
||||
"parameters": [
|
||||
{
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"description": "The unique identifier of the workflow.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"example": "wf_1a2b3c4d5e"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "status",
|
||||
"in": "query",
|
||||
"required": false,
|
||||
"description": "Filter paused executions by status.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"example": "paused"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "List of paused executions.",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"pausedExecutions": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/PausedExecutionSummary"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"example": {
|
||||
"pausedExecutions": [
|
||||
{
|
||||
"id": "pe_abc123",
|
||||
"workflowId": "wf_1a2b3c4d5e",
|
||||
"executionId": "exec_9f8e7d6c5b",
|
||||
"status": "paused",
|
||||
"totalPauseCount": 1,
|
||||
"resumedCount": 0,
|
||||
"pausedAt": "2026-01-15T10:30:00Z",
|
||||
"updatedAt": "2026-01-15T10:30:00Z",
|
||||
"expiresAt": null,
|
||||
"metadata": null,
|
||||
"triggerIds": [],
|
||||
"pausePoints": [
|
||||
{
|
||||
"contextId": "ctx_xyz789",
|
||||
"blockId": "block_hitl_1",
|
||||
"registeredAt": "2026-01-15T10:30:00Z",
|
||||
"resumeStatus": "paused",
|
||||
"snapshotReady": true,
|
||||
"resumeLinks": {
|
||||
"apiUrl": "https://www.sim.ai/api/resume/wf_1a2b3c4d5e/exec_9f8e7d6c5b/ctx_xyz789",
|
||||
"uiUrl": "https://www.sim.ai/resume/wf_1a2b3c4d5e/exec_9f8e7d6c5b",
|
||||
"contextId": "ctx_xyz789",
|
||||
"executionId": "exec_9f8e7d6c5b",
|
||||
"workflowId": "wf_1a2b3c4d5e"
|
||||
},
|
||||
"response": {
|
||||
"displayData": {
|
||||
"title": "Approval Required",
|
||||
"message": "Please review this request"
|
||||
},
|
||||
"formFields": []
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/BadRequest"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/Unauthorized"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/Forbidden"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/NotFound"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/workflows/{id}/paused/{executionId}": {
|
||||
"get": {
|
||||
"operationId": "getPausedExecution",
|
||||
"summary": "Get Paused Execution",
|
||||
"description": "Get detailed information about a specific paused execution, including its pause points, execution snapshot, and resume queue. Use this to inspect the state before resuming.",
|
||||
"tags": ["Human in the Loop"],
|
||||
"x-codeSamples": [
|
||||
{
|
||||
"id": "curl",
|
||||
"label": "cURL",
|
||||
"lang": "bash",
|
||||
"source": "curl -X GET \\\n \"https://www.sim.ai/api/workflows/{id}/paused/{executionId}\" \\\n -H \"X-API-Key: YOUR_API_KEY\""
|
||||
}
|
||||
],
|
||||
"parameters": [
|
||||
{
|
||||
"name": "id",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"description": "The unique identifier of the workflow.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"example": "wf_1a2b3c4d5e"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "executionId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"description": "The execution ID of the paused execution.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"example": "exec_9f8e7d6c5b"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Paused execution details.",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/PausedExecutionDetail"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/Unauthorized"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/Forbidden"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/NotFound"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/resume/{workflowId}/{executionId}": {
|
||||
"get": {
|
||||
"operationId": "getPausedExecutionByResumePath",
|
||||
"summary": "Get Paused Execution (Resume Path)",
|
||||
"description": "Get detailed information about a specific paused execution using the resume URL path. Returns the same data as the workflow paused execution detail endpoint.",
|
||||
"tags": ["Human in the Loop"],
|
||||
"x-codeSamples": [
|
||||
{
|
||||
"id": "curl",
|
||||
"label": "cURL",
|
||||
"lang": "bash",
|
||||
"source": "curl -X GET \\\n \"https://www.sim.ai/api/resume/{workflowId}/{executionId}\" \\\n -H \"X-API-Key: YOUR_API_KEY\""
|
||||
}
|
||||
],
|
||||
"parameters": [
|
||||
{
|
||||
"name": "workflowId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"description": "The unique identifier of the workflow.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"example": "wf_1a2b3c4d5e"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "executionId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"description": "The execution ID of the paused execution.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"example": "exec_9f8e7d6c5b"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Paused execution details.",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/PausedExecutionDetail"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/Unauthorized"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/Forbidden"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/NotFound"
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal server error.",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Human-readable error message."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/resume/{workflowId}/{executionId}/{contextId}": {
|
||||
"get": {
|
||||
"operationId": "getPauseContext",
|
||||
"summary": "Get Pause Context",
|
||||
"description": "Get detailed information about a specific pause context within a paused execution. Returns the pause point details, resume queue state, and any active resume entry.",
|
||||
"tags": ["Human in the Loop"],
|
||||
"x-codeSamples": [
|
||||
{
|
||||
"id": "curl",
|
||||
"label": "cURL",
|
||||
"lang": "bash",
|
||||
"source": "curl -X GET \\\n \"https://www.sim.ai/api/resume/{workflowId}/{executionId}/{contextId}\" \\\n -H \"X-API-Key: YOUR_API_KEY\""
|
||||
}
|
||||
],
|
||||
"parameters": [
|
||||
{
|
||||
"name": "workflowId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"description": "The unique identifier of the workflow.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"example": "wf_1a2b3c4d5e"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "executionId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"description": "The execution ID of the paused execution.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"example": "exec_9f8e7d6c5b"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "contextId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"description": "The pause context ID to retrieve details for.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"example": "ctx_xyz789"
|
||||
}
|
||||
}
|
||||
],
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Pause context details.",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/PauseContextDetail"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/Unauthorized"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/Forbidden"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/NotFound"
|
||||
}
|
||||
}
|
||||
},
|
||||
"post": {
|
||||
"operationId": "resumeExecution",
|
||||
"summary": "Resume Execution",
|
||||
"description": "Resume a paused workflow execution by providing input for a specific pause context. The execution continues from where it paused, using the provided input. Supports synchronous, asynchronous, and streaming modes (determined by the original execution's configuration).",
|
||||
"tags": ["Human in the Loop"],
|
||||
"x-codeSamples": [
|
||||
{
|
||||
"id": "curl",
|
||||
"label": "cURL",
|
||||
"lang": "bash",
|
||||
"source": "curl -X POST \\\n \"https://www.sim.ai/api/resume/{workflowId}/{executionId}/{contextId}\" \\\n -H \"X-API-Key: YOUR_API_KEY\" \\\n -H \"Content-Type: application/json\" \\\n -d '{\n \"input\": {\n \"approved\": true,\n \"comment\": \"Looks good to me\"\n }\n }'"
|
||||
}
|
||||
],
|
||||
"parameters": [
|
||||
{
|
||||
"name": "workflowId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"description": "The unique identifier of the workflow.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"example": "wf_1a2b3c4d5e"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "executionId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"description": "The execution ID of the paused execution.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"example": "exec_9f8e7d6c5b"
|
||||
}
|
||||
},
|
||||
{
|
||||
"name": "contextId",
|
||||
"in": "path",
|
||||
"required": true,
|
||||
"description": "The pause context ID to resume. Found in the pause point's contextId field or resumeLinks.",
|
||||
"schema": {
|
||||
"type": "string",
|
||||
"example": "ctx_xyz789"
|
||||
}
|
||||
}
|
||||
],
|
||||
"requestBody": {
|
||||
"description": "Input data for the resumed execution. The structure depends on the workflow's Human in the Loop block configuration.",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input": {
|
||||
"type": "object",
|
||||
"description": "Key-value pairs to pass as input to the resumed execution. If omitted, the entire request body is used as input.",
|
||||
"additionalProperties": true
|
||||
}
|
||||
}
|
||||
},
|
||||
"example": {
|
||||
"input": {
|
||||
"approved": true,
|
||||
"comment": "Looks good to me"
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"responses": {
|
||||
"200": {
|
||||
"description": "Resume execution completed synchronously, or resume was queued behind another in-progress resume.",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"oneOf": [
|
||||
{
|
||||
"$ref": "#/components/schemas/ResumeResult"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"description": "Resume has been queued behind another in-progress resume.",
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"enum": ["queued"],
|
||||
"description": "Indicates the resume is queued."
|
||||
},
|
||||
"executionId": {
|
||||
"type": "string",
|
||||
"description": "The execution ID assigned to this resume."
|
||||
},
|
||||
"queuePosition": {
|
||||
"type": "integer",
|
||||
"description": "Position in the resume queue."
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"description": "Human-readable status message."
|
||||
}
|
||||
}
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"description": "Resume execution started (non-API-key callers). The execution runs asynchronously.",
|
||||
"properties": {
|
||||
"status": {
|
||||
"type": "string",
|
||||
"enum": ["started"],
|
||||
"description": "Indicates the resume execution has started."
|
||||
},
|
||||
"executionId": {
|
||||
"type": "string",
|
||||
"description": "The execution ID for the resumed workflow."
|
||||
},
|
||||
"message": {
|
||||
"type": "string",
|
||||
"description": "Human-readable status message."
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"examples": {
|
||||
"sync": {
|
||||
"summary": "Synchronous completion",
|
||||
"value": {
|
||||
"success": true,
|
||||
"status": "completed",
|
||||
"executionId": "exec_new123",
|
||||
"output": {
|
||||
"result": "Approved and processed"
|
||||
},
|
||||
"error": null,
|
||||
"metadata": {
|
||||
"duration": 850,
|
||||
"startTime": "2026-01-15T10:35:00Z",
|
||||
"endTime": "2026-01-15T10:35:01Z"
|
||||
}
|
||||
}
|
||||
},
|
||||
"queued": {
|
||||
"summary": "Queued behind another resume",
|
||||
"value": {
|
||||
"status": "queued",
|
||||
"executionId": "exec_new123",
|
||||
"queuePosition": 2,
|
||||
"message": "Resume queued. It will run after current resumes finish."
|
||||
}
|
||||
},
|
||||
"started": {
|
||||
"summary": "Execution started (fire and forget)",
|
||||
"value": {
|
||||
"status": "started",
|
||||
"executionId": "exec_new123",
|
||||
"message": "Resume execution started."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"202": {
|
||||
"description": "Resume execution has been queued for asynchronous processing. Poll the statusUrl for results.",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"$ref": "#/components/schemas/AsyncExecutionResult"
|
||||
},
|
||||
"example": {
|
||||
"success": true,
|
||||
"async": true,
|
||||
"jobId": "job_4a3b2c1d0e",
|
||||
"executionId": "exec_new123",
|
||||
"message": "Resume execution queued",
|
||||
"statusUrl": "https://www.sim.ai/api/jobs/job_4a3b2c1d0e"
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"400": {
|
||||
"$ref": "#/components/responses/BadRequest"
|
||||
},
|
||||
"401": {
|
||||
"$ref": "#/components/responses/Unauthorized"
|
||||
},
|
||||
"403": {
|
||||
"$ref": "#/components/responses/Forbidden"
|
||||
},
|
||||
"404": {
|
||||
"$ref": "#/components/responses/NotFound"
|
||||
},
|
||||
"503": {
|
||||
"description": "Failed to queue the resume execution. Retry the request.",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Error message."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"500": {
|
||||
"description": "Internal server error.",
|
||||
"content": {
|
||||
"application/json": {
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"error": {
|
||||
"type": "string",
|
||||
"description": "Human-readable error message."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"/api/v1/workflows": {
|
||||
"get": {
|
||||
"operationId": "listWorkflows",
|
||||
@@ -5788,6 +6330,346 @@
|
||||
"description": "Upper bound value for 'between' operator."
|
||||
}
|
||||
}
|
||||
},
|
||||
"PausedExecutionSummary": {
|
||||
"type": "object",
|
||||
"description": "Summary of a paused workflow execution.",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": "Unique identifier for the paused execution record."
|
||||
},
|
||||
"workflowId": {
|
||||
"type": "string",
|
||||
"description": "The workflow this execution belongs to."
|
||||
},
|
||||
"executionId": {
|
||||
"type": "string",
|
||||
"description": "The execution that was paused."
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Current status of the paused execution.",
|
||||
"example": "paused"
|
||||
},
|
||||
"totalPauseCount": {
|
||||
"type": "integer",
|
||||
"description": "Total number of pause points in this execution."
|
||||
},
|
||||
"resumedCount": {
|
||||
"type": "integer",
|
||||
"description": "Number of pause points that have been resumed."
|
||||
},
|
||||
"pausedAt": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"nullable": true,
|
||||
"description": "When the execution was paused."
|
||||
},
|
||||
"updatedAt": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"nullable": true,
|
||||
"description": "When the paused execution record was last updated."
|
||||
},
|
||||
"expiresAt": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"nullable": true,
|
||||
"description": "When the paused execution will expire and be cleaned up."
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"nullable": true,
|
||||
"description": "Additional metadata associated with the paused execution.",
|
||||
"additionalProperties": true
|
||||
},
|
||||
"triggerIds": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "IDs of triggers that initiated the original execution."
|
||||
},
|
||||
"pausePoints": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/PausePoint"
|
||||
},
|
||||
"description": "List of pause points in the execution."
|
||||
}
|
||||
}
|
||||
},
|
||||
"PausePoint": {
|
||||
"type": "object",
|
||||
"description": "A point in the workflow where execution has been paused awaiting human input.",
|
||||
"properties": {
|
||||
"contextId": {
|
||||
"type": "string",
|
||||
"description": "Unique identifier for this pause context. Used when resuming execution."
|
||||
},
|
||||
"blockId": {
|
||||
"type": "string",
|
||||
"description": "The block ID where execution paused."
|
||||
},
|
||||
"response": {
|
||||
"description": "Data returned by the block before pausing, including display data and form fields."
|
||||
},
|
||||
"registeredAt": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "When this pause point was registered."
|
||||
},
|
||||
"resumeStatus": {
|
||||
"type": "string",
|
||||
"enum": ["paused", "resumed", "failed", "queued", "resuming"],
|
||||
"description": "Current status of this pause point."
|
||||
},
|
||||
"snapshotReady": {
|
||||
"type": "boolean",
|
||||
"description": "Whether the execution snapshot is ready for resumption."
|
||||
},
|
||||
"resumeLinks": {
|
||||
"type": "object",
|
||||
"description": "Links for resuming this pause point.",
|
||||
"properties": {
|
||||
"apiUrl": {
|
||||
"type": "string",
|
||||
"format": "uri",
|
||||
"description": "API endpoint URL to POST resume input to."
|
||||
},
|
||||
"uiUrl": {
|
||||
"type": "string",
|
||||
"format": "uri",
|
||||
"description": "UI URL for a human to review and approve."
|
||||
},
|
||||
"contextId": {
|
||||
"type": "string",
|
||||
"description": "The context ID for this pause point."
|
||||
},
|
||||
"executionId": {
|
||||
"type": "string",
|
||||
"description": "The execution ID."
|
||||
},
|
||||
"workflowId": {
|
||||
"type": "string",
|
||||
"description": "The workflow ID."
|
||||
}
|
||||
}
|
||||
},
|
||||
"queuePosition": {
|
||||
"type": "integer",
|
||||
"nullable": true,
|
||||
"description": "Position in the resume queue, if queued."
|
||||
},
|
||||
"latestResumeEntry": {
|
||||
"$ref": "#/components/schemas/ResumeQueueEntry",
|
||||
"nullable": true,
|
||||
"description": "The most recent resume queue entry for this pause point."
|
||||
},
|
||||
"parallelScope": {
|
||||
"type": "object",
|
||||
"description": "Scope information when the pause occurs inside a parallel branch.",
|
||||
"properties": {
|
||||
"parallelId": {
|
||||
"type": "string",
|
||||
"description": "Identifier of the parallel execution group."
|
||||
},
|
||||
"branchIndex": {
|
||||
"type": "integer",
|
||||
"description": "Index of the branch within the parallel group."
|
||||
},
|
||||
"branchTotal": {
|
||||
"type": "integer",
|
||||
"description": "Total number of branches in the parallel group."
|
||||
}
|
||||
}
|
||||
},
|
||||
"loopScope": {
|
||||
"type": "object",
|
||||
"description": "Scope information when the pause occurs inside a loop.",
|
||||
"properties": {
|
||||
"loopId": {
|
||||
"type": "string",
|
||||
"description": "Identifier of the loop."
|
||||
},
|
||||
"iteration": {
|
||||
"type": "integer",
|
||||
"description": "Current loop iteration number."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"ResumeQueueEntry": {
|
||||
"type": "object",
|
||||
"description": "An entry in the resume execution queue.",
|
||||
"properties": {
|
||||
"id": {
|
||||
"type": "string",
|
||||
"description": "Unique identifier for this queue entry."
|
||||
},
|
||||
"pausedExecutionId": {
|
||||
"type": "string",
|
||||
"description": "The paused execution this entry belongs to."
|
||||
},
|
||||
"parentExecutionId": {
|
||||
"type": "string",
|
||||
"description": "The original execution that was paused."
|
||||
},
|
||||
"newExecutionId": {
|
||||
"type": "string",
|
||||
"description": "The new execution ID created for the resume."
|
||||
},
|
||||
"contextId": {
|
||||
"type": "string",
|
||||
"description": "The pause context ID being resumed."
|
||||
},
|
||||
"resumeInput": {
|
||||
"description": "The input provided when resuming."
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Status of this queue entry (e.g., pending, claimed, completed, failed)."
|
||||
},
|
||||
"queuedAt": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"nullable": true,
|
||||
"description": "When the entry was added to the queue."
|
||||
},
|
||||
"claimedAt": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"nullable": true,
|
||||
"description": "When execution started processing this entry."
|
||||
},
|
||||
"completedAt": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"nullable": true,
|
||||
"description": "When execution completed."
|
||||
},
|
||||
"failureReason": {
|
||||
"type": "string",
|
||||
"nullable": true,
|
||||
"description": "Reason for failure, if the resume failed."
|
||||
}
|
||||
}
|
||||
},
|
||||
"PausedExecutionDetail": {
|
||||
"type": "object",
|
||||
"description": "Detailed information about a paused execution, including the execution snapshot and resume queue.",
|
||||
"allOf": [
|
||||
{
|
||||
"$ref": "#/components/schemas/PausedExecutionSummary"
|
||||
},
|
||||
{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"executionSnapshot": {
|
||||
"type": "object",
|
||||
"description": "Serialized execution state for resumption.",
|
||||
"properties": {
|
||||
"snapshot": {
|
||||
"type": "string",
|
||||
"description": "Serialized execution snapshot data."
|
||||
},
|
||||
"triggerIds": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"type": "string"
|
||||
},
|
||||
"description": "Trigger IDs from the snapshot."
|
||||
}
|
||||
}
|
||||
},
|
||||
"queue": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/ResumeQueueEntry"
|
||||
},
|
||||
"description": "Resume queue entries for this execution."
|
||||
}
|
||||
}
|
||||
}
|
||||
]
|
||||
},
|
||||
"PauseContextDetail": {
|
||||
"type": "object",
|
||||
"description": "Detailed information about a specific pause context within a paused execution.",
|
||||
"properties": {
|
||||
"execution": {
|
||||
"$ref": "#/components/schemas/PausedExecutionSummary",
|
||||
"description": "Summary of the parent paused execution."
|
||||
},
|
||||
"pausePoint": {
|
||||
"$ref": "#/components/schemas/PausePoint",
|
||||
"description": "The specific pause point for this context."
|
||||
},
|
||||
"queue": {
|
||||
"type": "array",
|
||||
"items": {
|
||||
"$ref": "#/components/schemas/ResumeQueueEntry"
|
||||
},
|
||||
"description": "Resume queue entries for this context."
|
||||
},
|
||||
"activeResumeEntry": {
|
||||
"$ref": "#/components/schemas/ResumeQueueEntry",
|
||||
"nullable": true,
|
||||
"description": "The currently active resume entry, if any."
|
||||
}
|
||||
}
|
||||
},
|
||||
"ResumeResult": {
|
||||
"type": "object",
|
||||
"description": "Result of a synchronous resume execution.",
|
||||
"properties": {
|
||||
"success": {
|
||||
"type": "boolean",
|
||||
"description": "Whether the resume execution completed successfully."
|
||||
},
|
||||
"status": {
|
||||
"type": "string",
|
||||
"description": "Execution status.",
|
||||
"enum": ["completed", "failed", "paused", "cancelled"],
|
||||
"example": "completed"
|
||||
},
|
||||
"executionId": {
|
||||
"type": "string",
|
||||
"description": "The new execution ID for the resumed workflow."
|
||||
},
|
||||
"output": {
|
||||
"type": "object",
|
||||
"description": "Workflow output from the resumed execution.",
|
||||
"additionalProperties": true
|
||||
},
|
||||
"error": {
|
||||
"type": "string",
|
||||
"nullable": true,
|
||||
"description": "Error message if the execution failed."
|
||||
},
|
||||
"metadata": {
|
||||
"type": "object",
|
||||
"description": "Execution timing metadata.",
|
||||
"properties": {
|
||||
"duration": {
|
||||
"type": "integer",
|
||||
"description": "Total execution duration in milliseconds."
|
||||
},
|
||||
"startTime": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "When the resume execution started."
|
||||
},
|
||||
"endTime": {
|
||||
"type": "string",
|
||||
"format": "date-time",
|
||||
"description": "When the resume execution completed."
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
},
|
||||
"responses": {
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
{
|
||||
"name": "Sim Documentation — Build AI Agents & Run Your Agentic Workforce",
|
||||
"name": "Sim Documentation — The AI Workspace for Teams",
|
||||
"short_name": "Sim Docs",
|
||||
"description": "Documentation for Sim — the open-source platform to build AI agents and run your agentic workforce. Connect 1,000+ integrations and LLMs to deploy and orchestrate agentic workflows.",
|
||||
"description": "Documentation for Sim — the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM.",
|
||||
"start_url": "/",
|
||||
"scope": "/",
|
||||
"icons": [
|
||||
|
||||
@@ -1,13 +1,15 @@
|
||||
# Sim Documentation
|
||||
|
||||
Sim is a visual workflow builder for AI applications that lets you build AI agent workflows visually. Create powerful AI agents, automation workflows, and data processing pipelines by connecting blocks on a canvas—no coding required.
|
||||
Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Create agents visually with the workflow builder, conversationally through Mothership, or programmatically with the API — connected to 1,000+ integrations and every major LLM.
|
||||
|
||||
## What is Sim?
|
||||
|
||||
Sim provides a complete ecosystem for AI workflow automation including:
|
||||
Sim provides a complete AI workspace including:
|
||||
- Mothership — natural language agent creation and workspace management
|
||||
- Visual workflow builder with drag-and-drop interface
|
||||
- AI agent creation and automation
|
||||
- 80+ built-in integrations (OpenAI, Slack, Gmail, GitHub, etc.)
|
||||
- 1,000+ built-in integrations (OpenAI, Anthropic, Slack, Gmail, GitHub, etc.)
|
||||
- Knowledge bases for retrieval-augmented generation
|
||||
- Built-in tables for structured data
|
||||
- Real-time team collaboration
|
||||
- Multiple deployment options (cloud-hosted or self-hosted)
|
||||
- Custom integrations via MCP protocol
|
||||
@@ -16,22 +18,22 @@ Sim provides a complete ecosystem for AI workflow automation including:
|
||||
|
||||
Here are the key areas covered in our documentation:
|
||||
|
||||
/introduction - Getting started with Sim visual workflow builder
|
||||
/getting-started - Quick start guide for building your first workflow
|
||||
/blocks - Understanding workflow blocks (AI agents, APIs, functions)
|
||||
/tools - 80+ built-in integrations and tools
|
||||
/introduction - Getting started with Sim AI workspace
|
||||
/getting-started - Quick start guide for building your first agent
|
||||
/blocks - Understanding blocks (AI agents, APIs, functions)
|
||||
/tools - 1,000+ integrations and tools
|
||||
/webhooks - Webhook triggers and handling
|
||||
/mcp - Custom integrations via MCP protocol
|
||||
/deployment - Cloud-hosted vs self-hosted deployment
|
||||
/permissions - Team collaboration and workspace management
|
||||
/collaboration - Real-time editing and team features
|
||||
/workflows - Building complex automation workflows
|
||||
/workflows - Building agent logic with the visual builder
|
||||
|
||||
## Technical Information
|
||||
|
||||
- Framework: Fumadocs (Next.js-based documentation platform)
|
||||
- Content: MDX files with interactive examples
|
||||
- Languages: English (primary), French, Chinese
|
||||
- Languages: English (primary), Spanish, French, German, Japanese, Chinese
|
||||
- Search: AI-powered search and assistance available
|
||||
|
||||
## Complete Documentation
|
||||
@@ -40,14 +42,10 @@ For the full documentation with all pages, examples, and interactive features, v
|
||||
|
||||
## Additional Resources
|
||||
|
||||
- GitHub repository with workflow examples
|
||||
- GitHub repository with agent examples
|
||||
- Discord community for support and discussions
|
||||
- 80+ built-in integrations with detailed guides
|
||||
- 1,000+ built-in integrations with detailed guides
|
||||
- MCP protocol documentation for custom integrations
|
||||
- Self-hosting guides and Docker deployment
|
||||
|
||||
For the complete documentation with interactive examples and visual workflow builder guides, visit https://docs.sim.ai
|
||||
|
||||
---
|
||||
|
||||
Last updated: 2025-09-15
|
||||
For the complete documentation visit https://docs.sim.ai
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
'use client'
|
||||
|
||||
import { useRef, useState } from 'react'
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { Eye, EyeOff, Loader2 } from 'lucide-react'
|
||||
import Link from 'next/link'
|
||||
@@ -20,6 +20,7 @@ import { validateCallbackUrl } from '@/lib/core/security/input-validation'
|
||||
import { cn } from '@/lib/core/utils/cn'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { quickValidateEmail } from '@/lib/messaging/email/validation'
|
||||
import { captureClientEvent } from '@/lib/posthog/client'
|
||||
import { AUTH_SUBMIT_BTN } from '@/app/(auth)/components/auth-button-classes'
|
||||
import { SocialLoginButtons } from '@/app/(auth)/components/social-login-buttons'
|
||||
import { SSOLoginButton } from '@/app/(auth)/components/sso-login-button'
|
||||
@@ -113,6 +114,10 @@ export default function LoginPage({
|
||||
: null
|
||||
)
|
||||
|
||||
useEffect(() => {
|
||||
captureClientEvent('login_page_viewed', {})
|
||||
}, [])
|
||||
|
||||
const handleEmailChange = (e: React.ChangeEvent<HTMLInputElement>) => {
|
||||
const newEmail = e.target.value
|
||||
setEmail(newEmail)
|
||||
|
||||
@@ -12,7 +12,7 @@ import { client, useSession } from '@/lib/auth/auth-client'
|
||||
import { getEnv, isFalsy, isTruthy } from '@/lib/core/config/env'
|
||||
import { cn } from '@/lib/core/utils/cn'
|
||||
import { quickValidateEmail } from '@/lib/messaging/email/validation'
|
||||
import { captureEvent } from '@/lib/posthog/client'
|
||||
import { captureClientEvent, captureEvent } from '@/lib/posthog/client'
|
||||
import { AUTH_SUBMIT_BTN } from '@/app/(auth)/components/auth-button-classes'
|
||||
import { SocialLoginButtons } from '@/app/(auth)/components/social-login-buttons'
|
||||
import { SSOLoginButton } from '@/app/(auth)/components/sso-login-button'
|
||||
@@ -71,15 +71,13 @@ const validateEmailField = (emailValue: string): string[] => {
|
||||
return errors
|
||||
}
|
||||
|
||||
function SignupFormContent({
|
||||
githubAvailable,
|
||||
googleAvailable,
|
||||
isProduction,
|
||||
}: {
|
||||
interface SignupFormProps {
|
||||
githubAvailable: boolean
|
||||
googleAvailable: boolean
|
||||
isProduction: boolean
|
||||
}) {
|
||||
}
|
||||
|
||||
function SignupFormContent({ githubAvailable, googleAvailable, isProduction }: SignupFormProps) {
|
||||
const router = useRouter()
|
||||
const searchParams = useSearchParams()
|
||||
const { refetch: refetchSession } = useSession()
|
||||
@@ -87,8 +85,8 @@ function SignupFormContent({
|
||||
const [isLoading, setIsLoading] = useState(false)
|
||||
|
||||
useEffect(() => {
|
||||
captureEvent(posthog, 'signup_page_viewed', {})
|
||||
}, [posthog])
|
||||
captureClientEvent('signup_page_viewed', {})
|
||||
}, [])
|
||||
const [showPassword, setShowPassword] = useState(false)
|
||||
const [password, setPassword] = useState('')
|
||||
const [passwordErrors, setPasswordErrors] = useState<string[]>([])
|
||||
@@ -243,8 +241,6 @@ function SignupFormContent({
|
||||
return
|
||||
}
|
||||
|
||||
const sanitizedName = trimmedName
|
||||
|
||||
let token: string | undefined
|
||||
const widget = turnstileRef.current
|
||||
if (turnstileSiteKey && widget) {
|
||||
@@ -267,7 +263,7 @@ function SignupFormContent({
|
||||
{
|
||||
email: emailValue,
|
||||
password: passwordValue,
|
||||
name: sanitizedName,
|
||||
name: trimmedName,
|
||||
},
|
||||
{
|
||||
headers: {
|
||||
@@ -629,11 +625,7 @@ export default function SignupPage({
|
||||
githubAvailable,
|
||||
googleAvailable,
|
||||
isProduction,
|
||||
}: {
|
||||
githubAvailable: boolean
|
||||
googleAvailable: boolean
|
||||
isProduction: boolean
|
||||
}) {
|
||||
}: SignupFormProps) {
|
||||
return (
|
||||
<Suspense
|
||||
fallback={<div className='flex h-screen items-center justify-center'>Loading...</div>}
|
||||
|
||||
@@ -9,6 +9,8 @@ import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { BackLink } from '@/app/(landing)/blog/[slug]/back-link'
|
||||
import { ShareButton } from '@/app/(landing)/blog/[slug]/share-button'
|
||||
|
||||
export const dynamicParams = false
|
||||
|
||||
export async function generateStaticParams() {
|
||||
const posts = await getAllPostMeta()
|
||||
return posts.map((p) => ({ slug: p.slug }))
|
||||
@@ -161,7 +163,7 @@ export default async function Page({ params }: { params: Promise<{ slug: string
|
||||
<h3 className='font-[430] font-season text-lg text-white leading-tight tracking-[-0.01em]'>
|
||||
{p.title}
|
||||
</h3>
|
||||
<p className='line-clamp-2 text-[#F6F6F0]/50 text-sm leading-[150%]'>
|
||||
<p className='line-clamp-2 text-[var(--landing-text-muted)] text-sm leading-[150%]'>
|
||||
{p.description}
|
||||
</p>
|
||||
</div>
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user