mirror of
https://github.com/simstudioai/sim.git
synced 2026-04-28 03:00:29 -04:00
Compare commits
34 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
387cc977fa | ||
|
|
0464a57601 | ||
|
|
23ccd4a50c | ||
|
|
ba6bc91681 | ||
|
|
c0bc62c592 | ||
|
|
377712c9f3 | ||
|
|
6dddc3f796 | ||
|
|
010435c53b | ||
|
|
cd8c5bd0b8 | ||
|
|
f0285adc38 | ||
|
|
a39dc158cf | ||
|
|
05c1c5b1f6 | ||
|
|
5274efd8f9 | ||
|
|
0b36c8bcb6 | ||
|
|
842aa2c254 | ||
|
|
46ffc4904e | ||
|
|
ff71a07e8f | ||
|
|
22d4639f13 | ||
|
|
80095788fc | ||
|
|
61b33e5978 | ||
|
|
29fbad2874 | ||
|
|
e281ca0dac | ||
|
|
cbf0a139ed | ||
|
|
751eeaccd4 | ||
|
|
1bf2d95813 | ||
|
|
3a1b1a8032 | ||
|
|
3d6660ba4d | ||
|
|
48e174b21f | ||
|
|
7529a75ac0 | ||
|
|
6b2e83bf58 | ||
|
|
fc07922536 | ||
|
|
367415f649 | ||
|
|
ff2e369c20 | ||
|
|
64cdab24f7 |
@@ -17,9 +17,10 @@ import { ResponseSection } from '@/components/ui/response-section'
|
||||
import { i18n } from '@/lib/i18n'
|
||||
import { getApiSpecContent, openapi } from '@/lib/openapi'
|
||||
import { type PageData, source } from '@/lib/source'
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
|
||||
const SUPPORTED_LANGUAGES: Set<string> = new Set(i18n.languages)
|
||||
const BASE_URL = 'https://docs.sim.ai'
|
||||
const BASE_URL = DOCS_BASE_URL
|
||||
|
||||
const OG_LOCALE_MAP: Record<string, string> = {
|
||||
en: 'en_US',
|
||||
|
||||
@@ -3,7 +3,6 @@ import { defineI18nUI } from 'fumadocs-ui/i18n'
|
||||
import { DocsLayout } from 'fumadocs-ui/layouts/docs'
|
||||
import { RootProvider } from 'fumadocs-ui/provider/next'
|
||||
import { Geist_Mono, Inter } from 'next/font/google'
|
||||
import Script from 'next/script'
|
||||
import {
|
||||
SidebarFolder,
|
||||
SidebarItem,
|
||||
@@ -13,6 +12,7 @@ import { Navbar } from '@/components/navbar/navbar'
|
||||
import { SimLogoFull } from '@/components/ui/sim-logo'
|
||||
import { i18n } from '@/lib/i18n'
|
||||
import { source } from '@/lib/source'
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
import '../global.css'
|
||||
|
||||
const inter = Inter({
|
||||
@@ -67,14 +67,14 @@ export default async function Layout({ children, params }: LayoutProps) {
|
||||
name: 'Sim Documentation',
|
||||
description:
|
||||
'Documentation for Sim — the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM.',
|
||||
url: 'https://docs.sim.ai',
|
||||
url: DOCS_BASE_URL,
|
||||
publisher: {
|
||||
'@type': 'Organization',
|
||||
name: 'Sim',
|
||||
url: 'https://sim.ai',
|
||||
logo: {
|
||||
'@type': 'ImageObject',
|
||||
url: 'https://docs.sim.ai/static/logo.png',
|
||||
url: `${DOCS_BASE_URL}/static/logo.png`,
|
||||
},
|
||||
},
|
||||
inLanguage: lang,
|
||||
@@ -82,7 +82,7 @@ export default async function Layout({ children, params }: LayoutProps) {
|
||||
'@type': 'SearchAction',
|
||||
target: {
|
||||
'@type': 'EntryPoint',
|
||||
urlTemplate: 'https://docs.sim.ai/api/search?q={search_term_string}',
|
||||
urlTemplate: `${DOCS_BASE_URL}/api/search?q={search_term_string}`,
|
||||
},
|
||||
'query-input': 'required name=search_term_string',
|
||||
},
|
||||
@@ -101,7 +101,6 @@ export default async function Layout({ children, params }: LayoutProps) {
|
||||
/>
|
||||
</head>
|
||||
<body className='flex min-h-screen flex-col font-sans'>
|
||||
<Script src='https://assets.onedollarstats.com/stonks.js' strategy='lazyOnload' />
|
||||
<RootProvider i18n={provider(lang)}>
|
||||
<Navbar />
|
||||
<DocsLayout
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import type { ReactNode } from 'react'
|
||||
import type { Viewport } from 'next'
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
|
||||
export default function RootLayout({ children }: { children: ReactNode }) {
|
||||
return children
|
||||
@@ -12,7 +13,7 @@ export const viewport: Viewport = {
|
||||
}
|
||||
|
||||
export const metadata = {
|
||||
metadataBase: new URL('https://docs.sim.ai'),
|
||||
metadataBase: new URL(DOCS_BASE_URL),
|
||||
title: {
|
||||
default: 'Sim Documentation — The AI Workspace for Teams',
|
||||
template: '%s | Sim Docs',
|
||||
@@ -61,14 +62,14 @@ export const metadata = {
|
||||
type: 'website',
|
||||
locale: 'en_US',
|
||||
alternateLocale: ['es_ES', 'fr_FR', 'de_DE', 'ja_JP', 'zh_CN'],
|
||||
url: 'https://docs.sim.ai',
|
||||
url: DOCS_BASE_URL,
|
||||
siteName: 'Sim Documentation',
|
||||
title: 'Sim Documentation — The AI Workspace for Teams',
|
||||
description:
|
||||
'Documentation for Sim — the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM.',
|
||||
images: [
|
||||
{
|
||||
url: 'https://docs.sim.ai/api/og?title=Sim%20Documentation',
|
||||
url: `${DOCS_BASE_URL}/api/og?title=Sim%20Documentation`,
|
||||
width: 1200,
|
||||
height: 630,
|
||||
alt: 'Sim Documentation',
|
||||
@@ -82,7 +83,7 @@ export const metadata = {
|
||||
'Documentation for Sim — the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM.',
|
||||
creator: '@simdotai',
|
||||
site: '@simdotai',
|
||||
images: ['https://docs.sim.ai/api/og?title=Sim%20Documentation'],
|
||||
images: [`${DOCS_BASE_URL}/api/og?title=Sim%20Documentation`],
|
||||
},
|
||||
robots: {
|
||||
index: true,
|
||||
@@ -96,15 +97,15 @@ export const metadata = {
|
||||
},
|
||||
},
|
||||
alternates: {
|
||||
canonical: 'https://docs.sim.ai',
|
||||
canonical: DOCS_BASE_URL,
|
||||
languages: {
|
||||
'x-default': 'https://docs.sim.ai',
|
||||
en: 'https://docs.sim.ai',
|
||||
es: 'https://docs.sim.ai/es',
|
||||
fr: 'https://docs.sim.ai/fr',
|
||||
de: 'https://docs.sim.ai/de',
|
||||
ja: 'https://docs.sim.ai/ja',
|
||||
zh: 'https://docs.sim.ai/zh',
|
||||
'x-default': DOCS_BASE_URL,
|
||||
en: DOCS_BASE_URL,
|
||||
es: `${DOCS_BASE_URL}/es`,
|
||||
fr: `${DOCS_BASE_URL}/fr`,
|
||||
de: `${DOCS_BASE_URL}/de`,
|
||||
ja: `${DOCS_BASE_URL}/ja`,
|
||||
zh: `${DOCS_BASE_URL}/zh`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import { source } from '@/lib/source'
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
|
||||
export const revalidate = false
|
||||
|
||||
export async function GET() {
|
||||
const baseUrl = 'https://docs.sim.ai'
|
||||
const baseUrl = DOCS_BASE_URL
|
||||
|
||||
try {
|
||||
const pages = source.getPages().filter((page) => {
|
||||
|
||||
@@ -1,70 +1,18 @@
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
|
||||
export const revalidate = false
|
||||
|
||||
export async function GET() {
|
||||
const baseUrl = 'https://docs.sim.ai'
|
||||
const baseUrl = DOCS_BASE_URL
|
||||
|
||||
const robotsTxt = `# Robots.txt for Sim Documentation
|
||||
|
||||
User-agent: *
|
||||
Allow: /
|
||||
|
||||
# Search engine crawlers
|
||||
User-agent: Googlebot
|
||||
Allow: /
|
||||
|
||||
User-agent: Bingbot
|
||||
Allow: /
|
||||
|
||||
User-agent: Slurp
|
||||
Allow: /
|
||||
|
||||
User-agent: DuckDuckBot
|
||||
Allow: /
|
||||
|
||||
User-agent: Baiduspider
|
||||
Allow: /
|
||||
|
||||
User-agent: YandexBot
|
||||
Allow: /
|
||||
|
||||
# AI and LLM crawlers - explicitly allowed for documentation indexing
|
||||
User-agent: GPTBot
|
||||
Allow: /
|
||||
|
||||
User-agent: ChatGPT-User
|
||||
Allow: /
|
||||
|
||||
User-agent: CCBot
|
||||
Allow: /
|
||||
|
||||
User-agent: anthropic-ai
|
||||
Allow: /
|
||||
|
||||
User-agent: Claude-Web
|
||||
Allow: /
|
||||
|
||||
User-agent: Applebot
|
||||
Allow: /
|
||||
|
||||
User-agent: PerplexityBot
|
||||
Allow: /
|
||||
|
||||
User-agent: Diffbot
|
||||
Allow: /
|
||||
|
||||
User-agent: FacebookBot
|
||||
Allow: /
|
||||
|
||||
User-agent: cohere-ai
|
||||
Allow: /
|
||||
|
||||
# Disallow admin and internal paths (if any exist)
|
||||
Disallow: /.next/
|
||||
Disallow: /api/internal/
|
||||
Disallow: /_next/static/
|
||||
Disallow: /admin/
|
||||
|
||||
# Allow but don't prioritize these
|
||||
Allow: /
|
||||
Allow: /api/search
|
||||
Allow: /llms.txt
|
||||
Allow: /llms-full.txt
|
||||
@@ -73,23 +21,12 @@ Allow: /llms.mdx/
|
||||
# Sitemaps
|
||||
Sitemap: ${baseUrl}/sitemap.xml
|
||||
|
||||
# Crawl delay for aggressive bots (optional)
|
||||
# Crawl-delay: 1
|
||||
|
||||
# Additional resources for AI indexing
|
||||
# See https://github.com/AnswerDotAI/llms-txt for more info
|
||||
# LLM-friendly content:
|
||||
# Manifest: ${baseUrl}/llms.txt
|
||||
# Full content: ${baseUrl}/llms-full.txt
|
||||
# Individual pages: ${baseUrl}/llms.mdx/[page-path]
|
||||
|
||||
# Multi-language documentation available at:
|
||||
# ${baseUrl}/en - English
|
||||
# ${baseUrl}/es - Español
|
||||
# ${baseUrl}/fr - Français
|
||||
# ${baseUrl}/de - Deutsch
|
||||
# ${baseUrl}/ja - 日本語
|
||||
# ${baseUrl}/zh - 简体中文`
|
||||
# Individual pages: ${baseUrl}/llms.mdx/[page-path]`
|
||||
|
||||
return new Response(robotsTxt, {
|
||||
headers: {
|
||||
|
||||
42
apps/docs/app/sitemap.ts
Normal file
42
apps/docs/app/sitemap.ts
Normal file
@@ -0,0 +1,42 @@
|
||||
import type { MetadataRoute } from 'next'
|
||||
import { i18n } from '@/lib/i18n'
|
||||
import { source } from '@/lib/source'
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
|
||||
export const revalidate = 3600
|
||||
|
||||
export default function sitemap(): MetadataRoute.Sitemap {
|
||||
const baseUrl = DOCS_BASE_URL
|
||||
const languages = source.getLanguages()
|
||||
|
||||
const pagesBySlug = new Map<string, Map<string, string>>()
|
||||
for (const { language, pages } of languages) {
|
||||
for (const page of pages) {
|
||||
const key = page.slugs.join('/')
|
||||
if (!pagesBySlug.has(key)) {
|
||||
pagesBySlug.set(key, new Map())
|
||||
}
|
||||
pagesBySlug.get(key)!.set(language, `${baseUrl}${page.url}`)
|
||||
}
|
||||
}
|
||||
|
||||
const entries: MetadataRoute.Sitemap = []
|
||||
for (const [, localeMap] of pagesBySlug) {
|
||||
const defaultUrl = localeMap.get(i18n.defaultLanguage)
|
||||
if (!defaultUrl) continue
|
||||
|
||||
const langAlternates: Record<string, string> = {}
|
||||
for (const [lang, url] of localeMap) {
|
||||
langAlternates[lang] = url
|
||||
}
|
||||
|
||||
langAlternates['x-default'] = defaultUrl
|
||||
|
||||
entries.push({
|
||||
url: defaultUrl,
|
||||
alternates: { languages: langAlternates },
|
||||
})
|
||||
}
|
||||
|
||||
return entries
|
||||
}
|
||||
@@ -1,62 +0,0 @@
|
||||
import { i18n } from '@/lib/i18n'
|
||||
import { source } from '@/lib/source'
|
||||
|
||||
export const revalidate = 3600
|
||||
|
||||
export async function GET() {
|
||||
const baseUrl = 'https://docs.sim.ai'
|
||||
|
||||
const allPages = source.getPages()
|
||||
|
||||
const getPriority = (url: string): string => {
|
||||
if (url === '/introduction' || url === '/') return '1.0'
|
||||
if (url === '/getting-started') return '0.9'
|
||||
if (url.match(/^\/[^/]+$/)) return '0.8'
|
||||
if (url.includes('/sdks/') || url.includes('/tools/')) return '0.7'
|
||||
return '0.6'
|
||||
}
|
||||
|
||||
const urls = allPages
|
||||
.flatMap((page) => {
|
||||
const urlWithoutLang = page.url.replace(/^\/[a-z]{2}\//, '/')
|
||||
|
||||
return i18n.languages.map((lang) => {
|
||||
const url =
|
||||
lang === i18n.defaultLanguage
|
||||
? `${baseUrl}${urlWithoutLang}`
|
||||
: `${baseUrl}/${lang}${urlWithoutLang}`
|
||||
|
||||
return ` <url>
|
||||
<loc>${url}</loc>
|
||||
<priority>${getPriority(urlWithoutLang)}</priority>
|
||||
${i18n.languages.length > 1 ? generateAlternateLinks(baseUrl, urlWithoutLang) : ''}
|
||||
</url>`
|
||||
})
|
||||
})
|
||||
.join('\n')
|
||||
|
||||
const sitemap = `<?xml version="1.0" encoding="UTF-8"?>
|
||||
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:xhtml="http://www.w3.org/1999/xhtml">
|
||||
${urls}
|
||||
</urlset>`
|
||||
|
||||
return new Response(sitemap, {
|
||||
headers: {
|
||||
'Content-Type': 'application/xml',
|
||||
'Cache-Control': 'public, max-age=3600, s-maxage=3600',
|
||||
},
|
||||
})
|
||||
}
|
||||
|
||||
function generateAlternateLinks(baseUrl: string, urlWithoutLang: string): string {
|
||||
const langLinks = i18n.languages
|
||||
.map((lang) => {
|
||||
const url =
|
||||
lang === i18n.defaultLanguage
|
||||
? `${baseUrl}${urlWithoutLang}`
|
||||
: `${baseUrl}/${lang}${urlWithoutLang}`
|
||||
return ` <xhtml:link rel="alternate" hreflang="${lang}" href="${url}" />`
|
||||
})
|
||||
.join('\n')
|
||||
return `${langLinks}\n <xhtml:link rel="alternate" hreflang="x-default" href="${baseUrl}${urlWithoutLang}" />`
|
||||
}
|
||||
@@ -2087,6 +2087,21 @@ export function BrandfetchIcon(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function BrightDataIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} viewBox='54 93 22 52' fill='none' xmlns='http://www.w3.org/2000/svg'>
|
||||
<path
|
||||
d='M62 95.21c.19 2.16 1.85 3.24 2.82 4.74.25.38.48.11.67-.16.21-.31.6-1.21 1.15-1.28-.35 1.38-.04 3.15.16 4.45.49 3.05-1.22 5.64-4.07 6.18-3.38.65-6.22-2.21-5.6-5.62.23-1.24 1.37-2.5.77-3.7-.85-1.7.54-.52.79-.22 1.04 1.2 1.21.09 1.45-.55.24-.63.31-1.31.47-1.97.19-.77.55-1.4 1.39-1.87z'
|
||||
fill='currentColor'
|
||||
/>
|
||||
<path
|
||||
d='M66.70 123.37c0 3.69.04 7.38-.03 11.07-.02 1.04.31 1.48 1.32 1.49.29 0 .59.12.88.13.93.01 1.18.47 1.16 1.37-.05 2.19 0 2.19-2.24 2.19-3.48 0-6.96-.04-10.44.03-1.09.02-1.47-.33-1.3-1.36.02-.12.02-.26 0-.38-.28-1.39.39-1.96 1.7-1.9 1.36.06 1.76-.51 1.74-1.88-.09-5.17-.08-10.35 0-15.53.02-1.22-.32-1.87-1.52-2.17-.57-.14-1.47-.11-1.57-.85-.15-1.04-.05-2.11.01-3.17.02-.34.44-.35.73-.39 2.81-.39 5.63-.77 8.44-1.18.92-.14 1.15.2 1.14 1.09-.04 3.8-.02 7.62-.02 11.44z'
|
||||
fill='currentColor'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function BrowserUseIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import { DOCS_BASE_URL } from '@/lib/urls'
|
||||
|
||||
interface StructuredDataProps {
|
||||
title: string
|
||||
description: string
|
||||
@@ -15,7 +17,7 @@ export function StructuredData({
|
||||
dateModified,
|
||||
breadcrumb,
|
||||
}: StructuredDataProps) {
|
||||
const baseUrl = 'https://docs.sim.ai'
|
||||
const baseUrl = DOCS_BASE_URL
|
||||
|
||||
const articleStructuredData = {
|
||||
'@context': 'https://schema.org',
|
||||
|
||||
@@ -23,6 +23,7 @@ import {
|
||||
BoxCompanyIcon,
|
||||
BrainIcon,
|
||||
BrandfetchIcon,
|
||||
BrightDataIcon,
|
||||
BrowserUseIcon,
|
||||
CalComIcon,
|
||||
CalendlyIcon,
|
||||
@@ -215,6 +216,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
|
||||
attio: AttioIcon,
|
||||
box: BoxCompanyIcon,
|
||||
brandfetch: BrandfetchIcon,
|
||||
brightdata: BrightDataIcon,
|
||||
browser_use: BrowserUseIcon,
|
||||
calcom: CalComIcon,
|
||||
calendly: CalendlyIcon,
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Video } from '@/components/ui/video'
|
||||
|
||||
Sim provides a comprehensive external API for querying workflow execution logs and setting up webhooks for real-time notifications when workflows complete.
|
||||
Sim provides a comprehensive external API for querying workflow run logs and setting up webhooks for real-time notifications when workflows complete.
|
||||
|
||||
## Authentication
|
||||
|
||||
@@ -21,7 +21,7 @@ You can generate API keys from the Sim platform and navigate to **Settings**, th
|
||||
|
||||
## Logs API
|
||||
|
||||
All API responses include information about your workflow execution limits and usage:
|
||||
All API responses include information about your workflow run limits and usage:
|
||||
|
||||
```json
|
||||
"limits": {
|
||||
@@ -48,11 +48,11 @@ All API responses include information about your workflow execution limits and u
|
||||
}
|
||||
```
|
||||
|
||||
**Note:** Rate limits use a token bucket algorithm. `remaining` can exceed `requestsPerMinute` up to `maxBurst` when you haven't used your full allowance recently, allowing for burst traffic. The rate limits in the response body are for workflow executions. The rate limits for calling this API endpoint are in the response headers (`X-RateLimit-*`).
|
||||
**Note:** Rate limits use a token bucket algorithm. `remaining` can exceed `requestsPerMinute` up to `maxBurst` when you haven't used your full allowance recently, allowing for burst traffic. The rate limits in the response body are for workflow runs. The rate limits for calling this API endpoint are in the response headers (`X-RateLimit-*`).
|
||||
|
||||
### Query Logs
|
||||
|
||||
Query workflow execution logs with extensive filtering options.
|
||||
Query workflow run logs with extensive filtering options.
|
||||
|
||||
<Tabs items={['Request', 'Response']}>
|
||||
<Tab value="Request">
|
||||
@@ -70,11 +70,11 @@ Query workflow execution logs with extensive filtering options.
|
||||
- `level` - Filter by level: `info`, `error`
|
||||
- `startDate` - ISO timestamp for date range start
|
||||
- `endDate` - ISO timestamp for date range end
|
||||
- `executionId` - Exact execution ID match
|
||||
- `minDurationMs` - Minimum execution duration in milliseconds
|
||||
- `maxDurationMs` - Maximum execution duration in milliseconds
|
||||
- `minCost` - Minimum execution cost
|
||||
- `maxCost` - Maximum execution cost
|
||||
- `executionId` - Exact run ID match
|
||||
- `minDurationMs` - Minimum run duration in milliseconds
|
||||
- `maxDurationMs` - Maximum run duration in milliseconds
|
||||
- `minCost` - Minimum run cost
|
||||
- `maxCost` - Maximum run cost
|
||||
- `model` - Filter by AI model used
|
||||
|
||||
**Pagination:**
|
||||
@@ -213,9 +213,9 @@ Retrieve detailed information about a specific log entry.
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### Get Execution Details
|
||||
### Get Run Details
|
||||
|
||||
Retrieve execution details including the workflow state snapshot.
|
||||
Retrieve run details including the workflow state snapshot.
|
||||
|
||||
<Tabs items={['Request', 'Response']}>
|
||||
<Tab value="Request">
|
||||
@@ -248,7 +248,7 @@ Retrieve execution details including the workflow state snapshot.
|
||||
|
||||
## Notifications
|
||||
|
||||
Get real-time notifications when workflow executions complete via webhook, email, or Slack. Notifications are configured at the workspace level from the Logs page.
|
||||
Get real-time notifications when workflow runs complete via webhook, email, or Slack. Notifications are configured at the workspace level from the Logs page.
|
||||
|
||||
### Configuration
|
||||
|
||||
@@ -256,7 +256,7 @@ Configure notifications from the Logs page by clicking the menu button and selec
|
||||
|
||||
**Notification Channels:**
|
||||
- **Webhook**: Send HTTP POST requests to your endpoint
|
||||
- **Email**: Receive email notifications with execution details
|
||||
- **Email**: Receive email notifications with run details
|
||||
- **Slack**: Post messages to a Slack channel
|
||||
|
||||
**Workflow Selection:**
|
||||
@@ -269,38 +269,38 @@ Configure notifications from the Logs page by clicking the menu button and selec
|
||||
|
||||
**Optional Data:**
|
||||
- `includeFinalOutput`: Include the workflow's final output
|
||||
- `includeTraceSpans`: Include detailed execution trace spans
|
||||
- `includeTraceSpans`: Include detailed trace spans
|
||||
- `includeRateLimits`: Include rate limit information (sync/async limits and remaining)
|
||||
- `includeUsageData`: Include billing period usage and limits
|
||||
|
||||
### Alert Rules
|
||||
|
||||
Instead of receiving notifications for every execution, configure alert rules to be notified only when issues are detected:
|
||||
Instead of receiving notifications for every run, configure alert rules to be notified only when issues are detected:
|
||||
|
||||
**Consecutive Failures**
|
||||
- Alert after X consecutive failed executions (e.g., 3 failures in a row)
|
||||
- Resets when an execution succeeds
|
||||
- Alert after X consecutive failed runs (e.g., 3 failures in a row)
|
||||
- Resets when a run succeeds
|
||||
|
||||
**Failure Rate**
|
||||
- Alert when failure rate exceeds X% over the last Y hours
|
||||
- Requires minimum 5 executions in the window
|
||||
- Requires minimum 5 runs in the window
|
||||
- Only triggers after the full time window has elapsed
|
||||
|
||||
**Latency Threshold**
|
||||
- Alert when any execution takes longer than X seconds
|
||||
- Alert when any run takes longer than X seconds
|
||||
- Useful for catching slow or hanging workflows
|
||||
|
||||
**Latency Spike**
|
||||
- Alert when execution is X% slower than the average
|
||||
- Alert when a run is X% slower than the average
|
||||
- Compares against the average duration over the configured time window
|
||||
- Requires minimum 5 executions to establish baseline
|
||||
- Requires minimum 5 runs to establish baseline
|
||||
|
||||
**Cost Threshold**
|
||||
- Alert when a single execution costs more than $X
|
||||
- Alert when a single run costs more than $X
|
||||
- Useful for catching expensive LLM calls
|
||||
|
||||
**No Activity**
|
||||
- Alert when no executions occur within X hours
|
||||
- Alert when no runs occur within X hours
|
||||
- Useful for monitoring scheduled workflows that should run regularly
|
||||
|
||||
**Error Count**
|
||||
@@ -317,7 +317,7 @@ For webhooks, additional options are available:
|
||||
|
||||
### Payload Structure
|
||||
|
||||
When a workflow execution completes, Sim sends the following payload (via webhook POST, email, or Slack):
|
||||
When a workflow run completes, Sim sends the following payload (via webhook POST, email, or Slack):
|
||||
|
||||
```json
|
||||
{
|
||||
@@ -456,7 +456,7 @@ Failed webhook deliveries are retried with exponential backoff and jitter:
|
||||
- Deliveries timeout after 30 seconds
|
||||
|
||||
<Callout type="info">
|
||||
Webhook deliveries are processed asynchronously and don't affect workflow execution performance.
|
||||
Webhook deliveries are processed asynchronously and don't affect workflow run performance.
|
||||
</Callout>
|
||||
|
||||
## Best Practices
|
||||
@@ -596,11 +596,11 @@ app.listen(3000, () => {
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "How do I trigger async execution via the API?", answer: "Set the X-Execution-Mode header to 'async' on your POST request to /api/workflows/{id}/execute. The API returns a 202 response with a jobId, executionId, and a statusUrl you can poll to check when the job completes. Async mode does not support draft state, workflow overrides, or selective output options." },
|
||||
{ question: "How do I trigger an async run via the API?", answer: "Set the X-Execution-Mode header to 'async' on your POST request to /api/workflows/{id}/execute. The API returns a 202 response with a jobId, executionId, and a statusUrl you can poll to check when the job completes. Async mode does not support draft state, workflow overrides, or selective output options." },
|
||||
{ question: "What authentication methods does the API support?", answer: "The API supports two authentication methods: API keys passed in the x-api-key header, and session-based authentication for logged-in users. API keys can be generated from Settings > Sim Keys in the platform. Workflows with public API access enabled can also be called without authentication." },
|
||||
{ question: "How does the webhook retry policy work?", answer: "Failed webhook deliveries are retried up to 5 times with exponential backoff: 5 seconds, 15 seconds, 1 minute, 3 minutes, and 10 minutes, plus up to 10% jitter. Only HTTP 5xx and 429 responses trigger retries. Each delivery times out after 30 seconds." },
|
||||
{ question: "What rate limits apply to the Logs API?", answer: "Rate limits use a token bucket algorithm. Free plans get 30 requests/minute with 60 burst capacity, Pro gets 100/200, Team gets 200/400, and Enterprise gets 500/1000. These are separate from workflow execution rate limits, which are shown in the response body." },
|
||||
{ question: "What rate limits apply to the Logs API?", answer: "Rate limits use a token bucket algorithm. Free plans get 30 requests/minute with 60 burst capacity, Pro gets 100/200, Team gets 200/400, and Enterprise gets 500/1000. These are separate from workflow run rate limits, which are shown in the response body." },
|
||||
{ question: "How do I verify that a webhook is from Sim?", answer: "Configure a webhook secret when setting up notifications. Sim signs each delivery with HMAC-SHA256 using the format 't={timestamp},v1={signature}' in the sim-signature header. Compute the HMAC of '{timestamp}.{body}' with your secret and compare it to the signature value." },
|
||||
{ question: "What alert rules are available for notifications?", answer: "You can configure alerts for consecutive failures, failure rate thresholds, latency thresholds, latency spikes (percentage above average), cost thresholds, no-activity periods, and error counts within a time window. All alert types include a 1-hour cooldown to prevent notification spam." },
|
||||
{ question: "Can I filter which executions trigger notifications?", answer: "Yes. You can filter notifications by specific workflows (or select all), log level (info or error), and trigger type (api, webhook, schedule, manual, chat). You can also choose whether to include final output, trace spans, rate limits, and usage data in the notification payload." },
|
||||
{ question: "Can I filter which runs trigger notifications?", answer: "Yes. You can filter notifications by specific workflows (or select all), log level (info or error), and trigger type (api, webhook, schedule, manual, chat). You can also choose whether to include final output, trace spans, rate limits, and usage data in the notification payload." },
|
||||
]} />
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Image } from '@/components/ui/image'
|
||||
|
||||
Understanding how workflows execute in Sim is key to building efficient and reliable automations. The execution engine automatically handles dependencies, concurrency, and data flow to ensure your workflows run smoothly and predictably.
|
||||
Understanding how workflows run in Sim is key to building efficient and reliable automations. The execution engine automatically handles dependencies, concurrency, and data flow to ensure your workflows run smoothly and predictably.
|
||||
|
||||
## How Workflows Execute
|
||||
|
||||
@@ -14,7 +14,7 @@ Sim's execution engine processes workflows intelligently by analyzing dependenci
|
||||
|
||||
### Concurrent Execution by Default
|
||||
|
||||
Multiple blocks run concurrently when they don't depend on each other. This parallel execution dramatically improves performance without requiring manual configuration.
|
||||
Multiple blocks run concurrently when they don't depend on each other. This dramatically improves performance without requiring manual configuration.
|
||||
|
||||
<Image
|
||||
src="/static/execution/concurrency.png"
|
||||
@@ -49,7 +49,7 @@ Workflows can branch in multiple directions using routing blocks. The execution
|
||||
height={500}
|
||||
/>
|
||||
|
||||
This workflow demonstrates how execution can follow different paths based on conditions or AI decisions, with each path executing independently.
|
||||
This workflow demonstrates how a run can follow different paths based on conditions or AI decisions, with each path running independently.
|
||||
|
||||
## Block Types
|
||||
|
||||
@@ -57,7 +57,7 @@ Sim provides different types of blocks that serve specific purposes in your work
|
||||
|
||||
<Cards>
|
||||
<Card title="Triggers" href="/triggers">
|
||||
**Starter blocks** initiate workflows and **Webhook blocks** respond to external events. Every workflow needs a trigger to begin execution.
|
||||
**Starter blocks** initiate workflows and **Webhook blocks** respond to external events. Every workflow needs a trigger to begin a run.
|
||||
</Card>
|
||||
|
||||
<Card title="Processing Blocks" href="/blocks">
|
||||
@@ -73,37 +73,37 @@ Sim provides different types of blocks that serve specific purposes in your work
|
||||
</Card>
|
||||
</Cards>
|
||||
|
||||
All blocks execute automatically based on their dependencies - you don't need to manually manage execution order or timing.
|
||||
All blocks run automatically based on their dependencies - you don't need to manually manage run order or timing.
|
||||
|
||||
## Execution Monitoring
|
||||
## Run Monitoring
|
||||
|
||||
When workflows run, Sim provides real-time visibility into the execution process:
|
||||
When workflows run, Sim provides real-time visibility into the process:
|
||||
|
||||
- **Live Block States**: See which blocks are currently executing, completed, or failed
|
||||
- **Execution Logs**: Detailed logs appear in real-time showing inputs, outputs, and any errors
|
||||
- **Performance Metrics**: Track execution time and costs for each block
|
||||
- **Path Visualization**: Understand which execution paths were taken through your workflow
|
||||
- **Live Block States**: See which blocks are currently running, completed, or failed
|
||||
- **Run Logs**: Detailed logs appear in real-time showing inputs, outputs, and any errors
|
||||
- **Performance Metrics**: Track run time and costs for each block
|
||||
- **Path Visualization**: Understand which paths were taken through your workflow
|
||||
|
||||
<Callout type="info">
|
||||
All execution details are captured and available for review even after workflows complete, helping with debugging and optimization.
|
||||
All run details are captured and available for review even after workflows complete, helping with debugging and optimization.
|
||||
</Callout>
|
||||
|
||||
## Key Execution Principles
|
||||
## Key Principles
|
||||
|
||||
Understanding these core principles will help you build better workflows:
|
||||
|
||||
1. **Dependency-Based Execution**: Blocks only run when all their dependencies have completed
|
||||
2. **Automatic Parallelization**: Independent blocks run concurrently without configuration
|
||||
3. **Smart Data Flow**: Outputs flow automatically to connected blocks
|
||||
4. **Error Handling**: Failed blocks stop their execution path but don't affect independent paths
|
||||
5. **Response Blocks as Exit Points**: When a Response block executes, the entire workflow stops and the API response is sent immediately. Multiple Response blocks can exist on different branches — the first one to execute wins
|
||||
6. **State Persistence**: All block outputs and execution details are preserved for debugging
|
||||
7. **Cycle Protection**: Workflows that call other workflows (via Workflow blocks, MCP tools, or API blocks) are tracked with a call chain. If the chain exceeds 25 hops, execution is stopped to prevent infinite loops
|
||||
4. **Error Handling**: Failed blocks stop their run path but don't affect independent paths
|
||||
5. **Response Blocks as Exit Points**: When a Response block runs, the entire workflow stops and the API response is sent immediately. Multiple Response blocks can exist on different branches — the first one to run wins
|
||||
6. **State Persistence**: All block outputs and run details are preserved for debugging
|
||||
7. **Cycle Protection**: Workflows that call other workflows (via Workflow blocks, MCP tools, or API blocks) are tracked with a call chain. If the chain exceeds 25 hops, the run is stopped to prevent infinite loops
|
||||
|
||||
## Next Steps
|
||||
|
||||
Now that you understand execution basics, explore:
|
||||
- **[Block Types](/blocks)** - Learn about specific block capabilities
|
||||
- **[Logging](/execution/logging)** - Monitor workflow executions and debug issues
|
||||
- **[Logging](/execution/logging)** - Monitor workflow runs and debug issues
|
||||
- **[Cost Calculation](/execution/costs)** - Understand and optimize workflow costs
|
||||
- **[Triggers](/triggers)** - Set up different ways to run your workflows
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Image } from '@/components/ui/image'
|
||||
|
||||
Sim automatically calculates costs for all workflow executions, providing transparent pricing based on AI model usage and execution charges. Understanding these costs helps you optimize workflows and manage your budget effectively.
|
||||
Sim automatically calculates costs for all workflow runs, providing transparent pricing based on AI model usage and run charges. Understanding these costs helps you optimize workflows and manage your budget effectively.
|
||||
|
||||
## Credits
|
||||
|
||||
@@ -16,18 +16,18 @@ All plan limits, usage meters, and billing thresholds are displayed in credits t
|
||||
|
||||
## How Costs Are Calculated
|
||||
|
||||
Every workflow execution includes two cost components:
|
||||
Every workflow run includes two cost components:
|
||||
|
||||
**Base Execution Charge**: 1 credit ($0.005) per execution
|
||||
**Base Run Charge**: 1 credit ($0.005) per run
|
||||
|
||||
**AI Model Usage**: Variable cost based on token consumption
|
||||
```javascript
|
||||
modelCost = (inputTokens × inputPrice + outputTokens × outputPrice) / 1,000,000
|
||||
totalCredits = baseExecutionCharge + modelCost × 200
|
||||
totalCredits = baseRunCharge + modelCost × 200
|
||||
```
|
||||
|
||||
<Callout type="info">
|
||||
AI model prices are per million tokens. The calculation divides by 1,000,000 to get the actual cost. Workflows without AI blocks only incur the base execution charge.
|
||||
AI model prices are per million tokens. The calculation divides by 1,000,000 to get the actual cost. Workflows without AI blocks only incur the base run charge.
|
||||
</Callout>
|
||||
|
||||
## Model Breakdown in Logs
|
||||
@@ -48,7 +48,7 @@ The model breakdown shows:
|
||||
- **Token Usage**: Input and output token counts for each model
|
||||
- **Cost Breakdown**: Individual costs per model and operation
|
||||
- **Model Distribution**: Which models were used and how many times
|
||||
- **Total Cost**: Aggregate cost for the entire workflow execution
|
||||
- **Total Cost**: Aggregate cost for the entire workflow run
|
||||
|
||||
## Pricing Options
|
||||
|
||||
@@ -330,18 +330,18 @@ Max (individual) shares the same rate limits as team plans. Team plans (Pro or M
|
||||
|
||||
Team plans (Pro or Max for Teams) use 500 GB.
|
||||
|
||||
### Execution Time Limits
|
||||
### Run Time Limits
|
||||
|
||||
| Plan | Sync | Async |
|
||||
|------|------|-------|
|
||||
| **Free** | 5 minutes | 90 minutes |
|
||||
| **Pro / Max / Team / Enterprise** | 50 minutes | 90 minutes |
|
||||
|
||||
**Sync executions** run immediately and return results directly. These are triggered via the API with `async: false` (default) or through the UI.
|
||||
**Async executions** (triggered via API with `async: true`, webhooks, or schedules) run in the background.
|
||||
**Sync runs** complete immediately and return results directly. These are triggered via the API with `async: false` (default) or through the UI.
|
||||
**Async runs** (triggered via API with `async: true`, webhooks, or schedules) run in the background.
|
||||
|
||||
<Callout type="info">
|
||||
If a workflow exceeds its time limit, it will be terminated and marked as failed with a timeout error. Design long-running workflows to use async execution or break them into smaller workflows.
|
||||
If a workflow exceeds its time limit, it will be terminated and marked as failed with a timeout error. Design long-running workflows to use async runs or break them into smaller workflows.
|
||||
</Callout>
|
||||
|
||||
## Billing Model
|
||||
@@ -452,18 +452,18 @@ curl -X GET -H "X-API-Key: YOUR_API_KEY" -H "Content-Type: application/json" htt
|
||||
## Next Steps
|
||||
|
||||
- Review your current usage in [Settings → Subscription](https://sim.ai/settings/subscription)
|
||||
- Learn about [Logging](/execution/logging) to track execution details
|
||||
- Learn about [Logging](/execution/logging) to track run details
|
||||
- Explore the [External API](/execution/api) for programmatic cost monitoring
|
||||
- Check out [workflow optimization techniques](/blocks) to reduce costs
|
||||
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "How much does a single workflow execution cost?", answer: "Every execution incurs a base charge of 1 credit ($0.005). On top of that, any AI model usage is billed based on token consumption. Workflows that do not use AI blocks only pay the base execution charge." },
|
||||
{ question: "How much does a single workflow run cost?", answer: "Every run incurs a base charge of 1 credit ($0.005). On top of that, any AI model usage is billed based on token consumption. Workflows that do not use AI blocks only pay the base run charge." },
|
||||
{ question: "What is the credit-to-dollar conversion rate?", answer: "1 credit equals $0.005. All plan limits, usage meters, and billing thresholds in the Sim UI are displayed in credits." },
|
||||
{ question: "Do unused daily refresh credits carry over?", answer: "No. Daily refresh credits reset every 24 hours and do not accumulate. If you do not use them within the day, they are lost." },
|
||||
{ question: "What happens when I exceed my plan's credit limit?", answer: "By default, your usage is capped at your plan's included credits and executions will stop. If you enable on-demand billing or manually raise your usage limit in Settings, you can continue running workflows and pay for the overage at the end of the billing period." },
|
||||
{ question: "What happens when I exceed my plan's credit limit?", answer: "By default, your usage is capped at your plan's included credits and runs will stop. If you enable on-demand billing or manually raise your usage limit in Settings, you can continue running workflows and pay for the overage at the end of the billing period." },
|
||||
{ question: "How does the 1.1x hosted model multiplier work?", answer: "When you use Sim's hosted API keys (instead of bringing your own), a 1.1x multiplier is applied to the base model pricing for Agent blocks. This covers infrastructure and API management costs. You can avoid this multiplier by using your own API keys via the BYOK feature." },
|
||||
{ question: "Are there any free options for AI models?", answer: "Yes. If you run local models through Ollama or VLLM, there are no API costs for those model calls. You still pay the base execution charge of 1 credit per execution." },
|
||||
{ question: "Are there any free options for AI models?", answer: "Yes. If you run local models through Ollama or VLLM, there are no API costs for those model calls. You still pay the base run charge of 1 credit per run." },
|
||||
{ question: "When does threshold billing trigger?", answer: "When on-demand billing is enabled and your unbilled overage reaches $50, Sim automatically bills the full unbilled amount. This spreads large charges throughout the month instead of accumulating one large bill at period end." },
|
||||
]} />
|
||||
|
||||
@@ -156,7 +156,7 @@ Use `url` for direct downloads or `base64` for inline processing.
|
||||
- **Dropbox** - Dropbox file operations
|
||||
|
||||
<Callout type="info">
|
||||
Files are automatically available to downstream blocks. The execution engine handles all file transfer and format conversion.
|
||||
Files are automatically available to downstream blocks. The engine handles all file transfer and format conversion.
|
||||
</Callout>
|
||||
|
||||
## Best Practices
|
||||
@@ -165,15 +165,15 @@ Use `url` for direct downloads or `base64` for inline processing.
|
||||
|
||||
2. **Check file types** - Ensure the file type matches what the receiving block expects. The Vision block needs images, the File block handles documents.
|
||||
|
||||
3. **Consider file size** - Large files increase execution time. For very large files, consider using storage blocks (S3, Supabase) for intermediate storage.
|
||||
3. **Consider file size** - Large files increase run time. For very large files, consider using storage blocks (S3, Supabase) for intermediate storage.
|
||||
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "What is the maximum file size for uploads?", answer: "The maximum file size for files processed during workflow execution is 20 MB. Files exceeding this limit will be rejected with an error indicating the actual file size. For larger files, use storage blocks like S3 or Supabase for intermediate storage." },
|
||||
{ question: "What is the maximum file size for uploads?", answer: "The maximum file size for files processed during a workflow run is 20 MB. Files exceeding this limit will be rejected with an error indicating the actual file size. For larger files, use storage blocks like S3 or Supabase for intermediate storage." },
|
||||
{ question: "What file input formats are supported via the API?", answer: "When triggering a workflow via API, you can send files as base64-encoded data (using a data URI with the format 'data:{mime};base64,{data}') or as a URL pointing to a publicly accessible file. In both cases, include the file name and MIME type in the request." },
|
||||
{ question: "How are files passed between blocks internally?", answer: "Files are represented as standardized UserFile objects with name, url, base64, type, and size properties. Most blocks accept the full file object and extract what they need automatically, so you typically pass the entire object rather than individual properties." },
|
||||
{ question: "Which blocks can output files?", answer: "Gmail outputs email attachments, Slack outputs downloaded files, TTS generates audio files, Video Generator and Image Generator produce media files. Storage blocks like S3, Supabase, Google Drive, and Dropbox can also retrieve files for use in downstream blocks." },
|
||||
{ question: "Do I need to extract base64 or URL from file objects manually?", answer: "No. Most blocks accept the full file object and handle the format conversion automatically. Simply pass the entire file reference (e.g., <gmail.attachments[0]>) and the receiving block will extract the data it needs." },
|
||||
{ question: "How do file fields work in the Start block's input format?", answer: "When you define a field with type 'file[]' in the Start block's input format, the execution engine automatically processes incoming file data (base64 or URL) and uploads it to storage, converting it into UserFile objects before the workflow runs." },
|
||||
{ question: "How do file fields work in the Start block's input format?", answer: "When you define a field with type 'file[]' in the Start block's input format, the engine automatically processes incoming file data (base64 or URL) and uploads it to storage, converting it into UserFile objects before the workflow runs." },
|
||||
]} />
|
||||
|
||||
@@ -7,10 +7,10 @@ import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Image } from '@/components/ui/image'
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
Sim's execution engine brings your workflows to life by processing blocks in the correct order, managing data flow, and handling errors gracefully, so you can understand exactly how workflows are executed in Sim.
|
||||
Sim's execution engine brings your workflows to life by processing blocks in the correct order, managing data flow, and handling errors gracefully, so you can understand exactly how workflows run in Sim.
|
||||
|
||||
<Callout type="info">
|
||||
Every workflow execution follows a deterministic path based on your block connections and logic, ensuring predictable and reliable results.
|
||||
Every workflow run follows a deterministic path based on your block connections and logic, ensuring predictable and reliable results.
|
||||
</Callout>
|
||||
|
||||
## Documentation Overview
|
||||
@@ -22,33 +22,33 @@ Sim's execution engine brings your workflows to life by processing blocks in the
|
||||
</Card>
|
||||
|
||||
<Card title="Logging" href="/execution/logging">
|
||||
Monitor workflow executions with comprehensive logging and real-time visibility
|
||||
Monitor workflow runs with comprehensive logging and real-time visibility
|
||||
</Card>
|
||||
|
||||
|
||||
<Card title="Cost Calculation" href="/execution/costs">
|
||||
Understand how workflow execution costs are calculated and optimized
|
||||
Understand how workflow run costs are calculated and optimized
|
||||
</Card>
|
||||
|
||||
|
||||
<Card title="External API" href="/execution/api">
|
||||
Access execution logs and set up webhooks programmatically via REST API
|
||||
Access run logs and set up webhooks programmatically via REST API
|
||||
</Card>
|
||||
</Cards>
|
||||
|
||||
## Key Concepts
|
||||
|
||||
### Topological Execution
|
||||
Blocks execute in dependency order, similar to how a spreadsheet recalculates cells. The execution engine automatically determines which blocks can run based on completed dependencies.
|
||||
Blocks run in dependency order, similar to how a spreadsheet recalculates cells. The execution engine automatically determines which blocks can run based on completed dependencies.
|
||||
|
||||
### Path Tracking
|
||||
The engine actively tracks execution paths through your workflow. Router and Condition blocks dynamically update these paths, ensuring only relevant blocks execute.
|
||||
The engine actively tracks run paths through your workflow. Router and Condition blocks dynamically update these paths, ensuring only relevant blocks run.
|
||||
|
||||
### Layer-Based Processing
|
||||
Instead of executing blocks one-by-one, the engine identifies layers of blocks that can run in parallel, optimizing performance for complex workflows.
|
||||
|
||||
### Execution Context
|
||||
Each workflow maintains a rich context during execution containing:
|
||||
### Run Context
|
||||
Each workflow maintains a rich context during a run containing:
|
||||
- Block outputs and states
|
||||
- Active execution paths
|
||||
- Active run paths
|
||||
- Loop and parallel iteration tracking
|
||||
- Environment variables
|
||||
- Routing decisions
|
||||
@@ -56,7 +56,7 @@ Each workflow maintains a rich context during execution containing:
|
||||
|
||||
## Deployment Snapshots
|
||||
|
||||
API, Chat, Schedule, and Webhook executions run against the workflow’s active deployment snapshot. Manual runs from the editor execute the current draft canvas state, letting you test changes before deploying. Publish a new deployment whenever you change the canvas so every trigger uses the updated version.
|
||||
API, Chat, Schedule, and Webhook runs use the workflow’s active deployment snapshot. Manual runs from the editor use the current draft canvas state, letting you test changes before deploying. Publish a new deployment whenever you change the canvas so every trigger uses the updated version.
|
||||
|
||||
<div className='flex justify-center my-6'>
|
||||
<Image
|
||||
@@ -70,9 +70,9 @@ API, Chat, Schedule, and Webhook executions run against the workflow’s active
|
||||
|
||||
The Deploy modal keeps a full version history—inspect any snapshot, compare it against your draft, and promote or roll back with one click when you need to restore a prior release.
|
||||
|
||||
## Programmatic Execution
|
||||
## Programmatic Access
|
||||
|
||||
Execute workflows from your applications using our official SDKs:
|
||||
Run workflows from your applications using our official SDKs:
|
||||
|
||||
```bash
|
||||
# TypeScript/JavaScript
|
||||
@@ -107,21 +107,21 @@ const result = await client.executeWorkflow('workflow-id', {
|
||||
- Use parallel execution for independent operations
|
||||
- Cache results with Memory blocks when appropriate
|
||||
|
||||
### Monitor Executions
|
||||
### Monitor Runs
|
||||
- Review logs regularly to understand performance patterns
|
||||
- Track costs for AI model usage
|
||||
- Use workflow snapshots to debug issues
|
||||
|
||||
## What's Next?
|
||||
|
||||
Start with [Execution Basics](/execution/basics) to understand how workflows run, then explore [Logging](/execution/logging) to monitor your executions and [Cost Calculation](/execution/costs) to optimize your spending.
|
||||
Start with [Execution Basics](/execution/basics) to understand how workflows run, then explore [Logging](/execution/logging) to monitor your runs and [Cost Calculation](/execution/costs) to optimize your spending.
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "What are the execution timeout limits?", answer: "Synchronous executions (API, chat) have a default timeout of 5 minutes on the Free plan and 50 minutes on Pro, Team, and Enterprise plans. Asynchronous executions (schedules, webhooks) allow up to 90 minutes across all plans. These limits are configurable by the platform administrator." },
|
||||
{ question: "What are the run timeout limits?", answer: "Synchronous runs (API, chat) have a default timeout of 5 minutes on the Free plan and 50 minutes on Pro, Team, and Enterprise plans. Asynchronous runs (schedules, webhooks) allow up to 90 minutes across all plans. These limits are configurable by the platform administrator." },
|
||||
{ question: "How does parallel execution work?", answer: "The engine identifies layers of blocks with no dependencies on each other and runs them concurrently. Within loops and parallel blocks, the engine supports up to 20 parallel branches by default and up to 1,000 loop iterations. Nested subflows (loops inside parallels, or vice versa) are supported up to 10 levels deep." },
|
||||
{ question: "Can I cancel a running execution?", answer: "Yes. The engine supports cancellation through an abort signal mechanism. When you cancel an execution, the engine checks for cancellation between block executions (at roughly 500ms intervals when using Redis-backed cancellation). Any in-progress blocks complete, and the execution returns with a cancelled status." },
|
||||
{ question: "What is a deployment snapshot?", answer: "A deployment snapshot is a frozen copy of your workflow at the time you click Deploy. Trigger-based executions (API, chat, schedule, webhook) run against the active snapshot, not your draft canvas. Manual runs from the editor execute the current draft canvas state, so you can test changes before deploying. You can view, compare, and roll back snapshots from the Deploy modal." },
|
||||
{ question: "How are execution costs calculated?", answer: "Costs are tracked per block based on the AI model used. Each block log records input tokens, output tokens, and the computed cost using the model's pricing. The total workflow cost is the sum of all block-level costs for that execution. You can review costs in the execution logs." },
|
||||
{ question: "What happens when a block fails during execution?", answer: "When a block throws an error, the engine captures the error message in the block log, finalizes any incomplete logs with timing data, and halts the execution with a failure status. If the failing block has an error output handle connected to another block, that error path is followed instead of halting entirely." },
|
||||
{ question: "Can I re-run part of a workflow without starting from scratch?", answer: "Yes. The run-from-block feature lets you select a specific block and re-execute from that point. The engine computes which upstream blocks need to be re-run (the dirty set) and preserves cached outputs from blocks that have not changed, so only the affected portion of the workflow re-executes." },
|
||||
{ question: "Can I cancel a running workflow?", answer: "Yes. The engine supports cancellation through an abort signal mechanism. When you cancel a run, the engine checks for cancellation between blocks (at roughly 500ms intervals when using Redis-backed cancellation). Any in-progress blocks complete, and the run returns with a cancelled status." },
|
||||
{ question: "What is a deployment snapshot?", answer: "A deployment snapshot is a frozen copy of your workflow at the time you click Deploy. Trigger-based runs (API, chat, schedule, webhook) use the active snapshot, not your draft canvas. Manual runs from the editor use the current draft canvas state, so you can test changes before deploying. You can view, compare, and roll back snapshots from the Deploy modal." },
|
||||
{ question: "How are run costs calculated?", answer: "Costs are tracked per block based on the AI model used. Each block log records input tokens, output tokens, and the computed cost using the model's pricing. The total workflow cost is the sum of all block-level costs for that run. You can review costs in the run logs." },
|
||||
{ question: "What happens when a block fails during a run?", answer: "When a block throws an error, the engine captures the error message in the block log, finalizes any incomplete logs with timing data, and halts the run with a failure status. If the failing block has an error output handle connected to another block, that error path is followed instead of halting entirely." },
|
||||
{ question: "Can I re-run part of a workflow without starting from scratch?", answer: "Yes. The run-from-block feature lets you select a specific block and re-run from that point. The engine computes which upstream blocks need to be re-run (the dirty set) and preserves cached outputs from blocks that have not changed, so only the affected portion of the workflow re-runs." },
|
||||
]} />
|
||||
|
||||
@@ -6,7 +6,7 @@ import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Image } from '@/components/ui/image'
|
||||
|
||||
Sim provides comprehensive logging for all workflow executions, giving you complete visibility into how your workflows run, what data flows through them, and where issues might occur.
|
||||
Sim provides comprehensive logging for all workflow runs, giving you complete visibility into how your workflows run, what data flows through them, and where issues might occur.
|
||||
|
||||
## Logging System
|
||||
|
||||
@@ -14,7 +14,7 @@ Sim offers two complementary logging interfaces to match different workflows and
|
||||
|
||||
### Real-Time Console
|
||||
|
||||
During manual or chat workflow execution, logs appear in real-time in the Console panel on the right side of the workflow editor:
|
||||
During manual or chat workflow runs, logs appear in real-time in the Console panel on the right side of the workflow editor:
|
||||
|
||||
<div className="flex justify-center">
|
||||
<Image
|
||||
@@ -27,14 +27,14 @@ During manual or chat workflow execution, logs appear in real-time in the Consol
|
||||
</div>
|
||||
|
||||
The console shows:
|
||||
- Block execution progress with active block highlighting
|
||||
- Block progress with active block highlighting
|
||||
- Real-time outputs as blocks complete
|
||||
- Execution timing for each block
|
||||
- Timing for each block
|
||||
- Success/error status indicators
|
||||
|
||||
### Logs Page
|
||||
|
||||
All workflow executions—whether triggered manually, via API, Chat, Schedule, or Webhook—are logged to the dedicated Logs page:
|
||||
All workflow runs—whether triggered manually, via API, Chat, Schedule, or Webhook—are logged to the dedicated Logs page:
|
||||
|
||||
<div className="flex justify-center">
|
||||
<Image
|
||||
@@ -72,7 +72,7 @@ View the complete data flow for each block with tabs to switch between:
|
||||
|
||||
<Tabs items={['Output', 'Input']}>
|
||||
<Tab>
|
||||
**Output Tab** shows the block's execution result:
|
||||
**Output Tab** shows the block's result:
|
||||
- Structured data with JSON formatting
|
||||
- Markdown rendering for AI-generated content
|
||||
- Copy button for easy data extraction
|
||||
@@ -87,17 +87,17 @@ View the complete data flow for each block with tabs to switch between:
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
### Execution Timeline
|
||||
### Run Timeline
|
||||
|
||||
For workflow-level logs, view detailed execution metrics:
|
||||
For workflow-level logs, view detailed run metrics:
|
||||
- Start and end timestamps
|
||||
- Total workflow duration
|
||||
- Individual block execution times
|
||||
- Individual block run times
|
||||
- Performance bottleneck identification
|
||||
|
||||
## Workflow Snapshots
|
||||
|
||||
For any logged execution, click "View Snapshot" to see the exact workflow state at execution time:
|
||||
For any logged run, click "View Snapshot" to see the exact workflow state at the time of the run:
|
||||
|
||||
<div className="flex justify-center">
|
||||
<Image
|
||||
@@ -111,12 +111,12 @@ For any logged execution, click "View Snapshot" to see the exact workflow state
|
||||
|
||||
The snapshot provides:
|
||||
- Frozen canvas showing the workflow structure
|
||||
- Block states and connections as they were during execution
|
||||
- Block states and connections as they were during the run
|
||||
- Click any block to see its inputs and outputs
|
||||
- Useful for debugging workflows that have since been modified
|
||||
|
||||
<Callout type="info">
|
||||
Workflow snapshots are only available for executions after the enhanced logging system was introduced. Older migrated logs show a "Logged State Not Found" message.
|
||||
Workflow snapshots are only available for runs after the enhanced logging system was introduced. Older migrated logs show a "Logged State Not Found" message.
|
||||
</Callout>
|
||||
|
||||
## Log Retention
|
||||
@@ -134,11 +134,11 @@ The snapshot provides:
|
||||
### For Production
|
||||
- Monitor the Logs page regularly for errors or performance issues
|
||||
- Set up filters to focus on specific workflows or time periods
|
||||
- Use live mode during critical deployments to watch executions in real-time
|
||||
- Use live mode during critical deployments to watch runs in real-time
|
||||
|
||||
### For Debugging
|
||||
- Always check the execution timeline to identify slow blocks
|
||||
- Compare inputs between working and failing executions
|
||||
- Always check the run timeline to identify slow blocks
|
||||
- Compare inputs between working and failing runs
|
||||
- Use workflow snapshots to see the exact state when issues occurred
|
||||
|
||||
## Next Steps
|
||||
@@ -150,10 +150,10 @@ The snapshot provides:
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "How long are execution logs retained?", answer: "Free plans retain logs for 7 days — after that, logs are archived to cloud storage and deleted from the database. Pro, Team, and Enterprise plans retain logs indefinitely with no automatic cleanup." },
|
||||
{ question: "What data is captured in each execution log?", answer: "Each log entry includes the execution ID, workflow ID, trigger type, start and end timestamps, total duration in milliseconds, cost breakdown (total cost, token counts, and per-model breakdowns), execution data with trace spans, final output, and any associated files. The log details sidebar lets you inspect block-level inputs and outputs." },
|
||||
{ question: "How long are run logs retained?", answer: "Free plans retain logs for 7 days — after that, logs are archived to cloud storage and deleted from the database. Pro, Team, and Enterprise plans retain logs indefinitely with no automatic cleanup." },
|
||||
{ question: "What data is captured in each run log?", answer: "Each log entry includes the run ID, workflow ID, trigger type, start and end timestamps, total duration in milliseconds, cost breakdown (total cost, token counts, and per-model breakdowns), run data with trace spans, final output, and any associated files. The log details sidebar lets you inspect block-level inputs and outputs." },
|
||||
{ question: "Are API keys visible in the logs?", answer: "No. API keys and credentials are automatically redacted in the log input tab for security. You can safely inspect block inputs without exposing sensitive values." },
|
||||
{ question: "What is a workflow snapshot?", answer: "A workflow snapshot is a frozen copy of the workflow's structure (blocks, connections, and configuration) captured at execution time. It lets you see the exact state of the workflow when a particular execution ran, which is useful for debugging workflows that have been modified since the execution." },
|
||||
{ question: "Can I access logs programmatically?", answer: "Yes. The External API provides endpoints to query logs with filtering by workflow, time range, trigger type, duration, cost, and model. You can also set up webhook, email, or Slack notifications for real-time alerts when executions complete." },
|
||||
{ question: "What does Live mode do on the Logs page?", answer: "Live mode automatically refreshes the Logs page in real-time so new execution entries appear as they are logged, without requiring manual page refreshes. This is useful during deployments or when monitoring active workflows." },
|
||||
{ question: "What is a workflow snapshot?", answer: "A workflow snapshot is a frozen copy of the workflow's structure (blocks, connections, and configuration) captured at the time of a run. It lets you see the exact state of the workflow when a particular run happened, which is useful for debugging workflows that have been modified since." },
|
||||
{ question: "Can I access logs programmatically?", answer: "Yes. The External API provides endpoints to query logs with filtering by workflow, time range, trigger type, duration, cost, and model. You can also set up webhook, email, or Slack notifications for real-time alerts when runs complete." },
|
||||
{ question: "What does Live mode do on the Logs page?", answer: "Live mode automatically refreshes the Logs page in real-time so new log entries appear as they are recorded, without requiring manual page refreshes. This is useful during deployments or when monitoring active workflows." },
|
||||
]} />
|
||||
@@ -7,7 +7,7 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="agiloft"
|
||||
color="#263A5C"
|
||||
color="#FFFFFF"
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
|
||||
201
apps/docs/content/docs/en/tools/brightdata.mdx
Normal file
201
apps/docs/content/docs/en/tools/brightdata.mdx
Normal file
@@ -0,0 +1,201 @@
|
||||
---
|
||||
title: Bright Data
|
||||
description: Scrape websites, search engines, and extract structured data
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="brightdata"
|
||||
color="#FFFFFF"
|
||||
/>
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Bright Data into the workflow. Scrape any URL with Web Unlocker, search Google and other engines with SERP API, discover web content ranked by intent, or trigger pre-built scrapers for structured data extraction.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `brightdata_scrape_url`
|
||||
|
||||
Fetch content from any URL using Bright Data Web Unlocker. Bypasses anti-bot protections, CAPTCHAs, and IP blocks automatically.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `zone` | string | Yes | Web Unlocker zone name from your Bright Data dashboard \(e.g., "web_unlocker1"\) |
|
||||
| `url` | string | Yes | The URL to scrape \(e.g., "https://example.com/page"\) |
|
||||
| `format` | string | No | Response format: "raw" for HTML or "json" for parsed content. Defaults to "raw" |
|
||||
| `country` | string | No | Two-letter country code for geo-targeting \(e.g., "us", "gb"\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `content` | string | The scraped page content \(HTML or JSON depending on format\) |
|
||||
| `url` | string | The URL that was scraped |
|
||||
| `statusCode` | number | HTTP status code of the response |
|
||||
|
||||
### `brightdata_serp_search`
|
||||
|
||||
Search Google, Bing, DuckDuckGo, or Yandex and get structured search results using Bright Data SERP API.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `zone` | string | Yes | SERP API zone name from your Bright Data dashboard \(e.g., "serp_api1"\) |
|
||||
| `query` | string | Yes | The search query \(e.g., "best project management tools"\) |
|
||||
| `searchEngine` | string | No | Search engine to use: "google", "bing", "duckduckgo", or "yandex". Defaults to "google" |
|
||||
| `country` | string | No | Two-letter country code for localized results \(e.g., "us", "gb"\) |
|
||||
| `language` | string | No | Two-letter language code \(e.g., "en", "es"\) |
|
||||
| `numResults` | number | No | Number of results to return \(e.g., 10, 20\). Defaults to 10 |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | array | Array of search results |
|
||||
| ↳ `title` | string | Title of the search result |
|
||||
| ↳ `url` | string | URL of the search result |
|
||||
| ↳ `description` | string | Snippet or description of the result |
|
||||
| ↳ `rank` | number | Position in search results |
|
||||
| `query` | string | The search query that was executed |
|
||||
| `searchEngine` | string | The search engine that was used |
|
||||
|
||||
### `brightdata_discover`
|
||||
|
||||
AI-powered web discovery that finds and ranks results by intent. Returns up to 1,000 results with optional cleaned page content for RAG and verification.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `query` | string | Yes | The search query \(e.g., "competitor pricing changes enterprise plan"\) |
|
||||
| `numResults` | number | No | Number of results to return, up to 1000. Defaults to 10 |
|
||||
| `intent` | string | No | Describes what the agent is trying to accomplish, used to rank results by relevance \(e.g., "find official pricing pages and change notes"\) |
|
||||
| `includeContent` | boolean | No | Whether to include cleaned page content in results |
|
||||
| `format` | string | No | Response format: "json" or "markdown". Defaults to "json" |
|
||||
| `language` | string | No | Search language code \(e.g., "en", "es", "fr"\). Defaults to "en" |
|
||||
| `country` | string | No | Two-letter ISO country code for localized results \(e.g., "us", "gb"\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `results` | array | Array of discovered web results ranked by intent relevance |
|
||||
| ↳ `url` | string | URL of the discovered page |
|
||||
| ↳ `title` | string | Page title |
|
||||
| ↳ `description` | string | Page description or snippet |
|
||||
| ↳ `relevanceScore` | number | AI-calculated relevance score for intent-based ranking |
|
||||
| ↳ `content` | string | Cleaned page content in the requested format \(when includeContent is true\) |
|
||||
| `query` | string | The search query that was executed |
|
||||
| `totalResults` | number | Total number of results returned |
|
||||
|
||||
### `brightdata_sync_scrape`
|
||||
|
||||
Scrape URLs synchronously using a Bright Data pre-built scraper and get structured results directly. Supports up to 20 URLs with a 1-minute timeout.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `datasetId` | string | Yes | Dataset scraper ID from your Bright Data dashboard \(e.g., "gd_l1viktl72bvl7bjuj0"\) |
|
||||
| `urls` | string | Yes | JSON array of URL objects to scrape, up to 20 \(e.g., \[\{"url": "https://example.com/product"\}\]\) |
|
||||
| `format` | string | No | Output format: "json", "ndjson", or "csv". Defaults to "json" |
|
||||
| `includeErrors` | boolean | No | Whether to include error reports in results |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `data` | array | Array of scraped result objects with fields specific to the dataset scraper used |
|
||||
| `snapshotId` | string | Snapshot ID returned if the request exceeded the 1-minute timeout and switched to async processing |
|
||||
| `isAsync` | boolean | Whether the request fell back to async mode \(true means use snapshot ID to retrieve results\) |
|
||||
|
||||
### `brightdata_scrape_dataset`
|
||||
|
||||
Trigger a Bright Data pre-built scraper to extract structured data from URLs. Supports 660+ scrapers for platforms like Amazon, LinkedIn, Instagram, and more.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `datasetId` | string | Yes | Dataset scraper ID from your Bright Data dashboard \(e.g., "gd_l1viktl72bvl7bjuj0"\) |
|
||||
| `urls` | string | Yes | JSON array of URL objects to scrape \(e.g., \[\{"url": "https://example.com/product"\}\]\) |
|
||||
| `format` | string | No | Output format: "json" or "csv". Defaults to "json" |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `snapshotId` | string | The snapshot ID to retrieve results later |
|
||||
| `status` | string | Status of the scraping job \(e.g., "triggered", "running"\) |
|
||||
|
||||
### `brightdata_snapshot_status`
|
||||
|
||||
Check the progress of an async Bright Data scraping job. Returns status: starting, running, ready, or failed.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `snapshotId` | string | Yes | The snapshot ID returned when the collection was triggered \(e.g., "s_m4x7enmven8djfqak"\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `snapshotId` | string | The snapshot ID that was queried |
|
||||
| `datasetId` | string | The dataset ID associated with this snapshot |
|
||||
| `status` | string | Current status of the snapshot: "starting", "running", "ready", or "failed" |
|
||||
|
||||
### `brightdata_download_snapshot`
|
||||
|
||||
Download the results of a completed Bright Data scraping job using its snapshot ID. The snapshot must have ready status.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `snapshotId` | string | Yes | The snapshot ID returned when the collection was triggered \(e.g., "s_m4x7enmven8djfqak"\) |
|
||||
| `format` | string | No | Output format: "json", "ndjson", "jsonl", or "csv". Defaults to "json" |
|
||||
| `compress` | boolean | No | Whether to compress the results |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `data` | array | Array of scraped result records |
|
||||
| `format` | string | The content type of the downloaded data |
|
||||
| `snapshotId` | string | The snapshot ID that was downloaded |
|
||||
|
||||
### `brightdata_cancel_snapshot`
|
||||
|
||||
Cancel an active Bright Data scraping job using its snapshot ID. Terminates data collection in progress.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Bright Data API token |
|
||||
| `snapshotId` | string | Yes | The snapshot ID of the collection to cancel \(e.g., "s_m4x7enmven8djfqak"\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `snapshotId` | string | The snapshot ID that was cancelled |
|
||||
| `cancelled` | boolean | Whether the cancellation was successful |
|
||||
|
||||
|
||||
@@ -251,7 +251,7 @@ Update a Jira issue
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `issueKey` | string | Yes | Jira issue key to update \(e.g., PROJ-123\) |
|
||||
| `summary` | string | No | New summary for the issue |
|
||||
| `description` | string | No | New description for the issue |
|
||||
| `description` | string | No | New description for the issue. Accepts plain text \(auto-wrapped in ADF\) or a raw ADF document object |
|
||||
| `priority` | string | No | New priority ID or name for the issue \(e.g., "High"\) |
|
||||
| `assignee` | string | No | New assignee account ID for the issue |
|
||||
| `labels` | json | No | Labels to set on the issue \(array of label name strings\) |
|
||||
@@ -284,7 +284,7 @@ Create a new Jira issue
|
||||
| `domain` | string | Yes | Your Jira domain \(e.g., yourcompany.atlassian.net\) |
|
||||
| `projectId` | string | Yes | Jira project key \(e.g., PROJ\) |
|
||||
| `summary` | string | Yes | Summary for the issue |
|
||||
| `description` | string | No | Description for the issue |
|
||||
| `description` | string | No | Description for the issue. Accepts plain text \(auto-wrapped in ADF\) or a raw ADF document object |
|
||||
| `priority` | string | No | Priority ID or name for the issue \(e.g., "10000" or "High"\) |
|
||||
| `assignee` | string | No | Assignee account ID for the issue |
|
||||
| `cloudId` | string | No | Jira Cloud ID for the instance. If not provided, it will be fetched using the domain. |
|
||||
|
||||
@@ -18,6 +18,7 @@
|
||||
"attio",
|
||||
"box",
|
||||
"brandfetch",
|
||||
"brightdata",
|
||||
"browser_use",
|
||||
"calcom",
|
||||
"calendly",
|
||||
|
||||
@@ -45,6 +45,7 @@ Read data from a specific sheet in a Microsoft Excel spreadsheet
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `spreadsheetId` | string | Yes | The ID of the spreadsheet/workbook to read from \(e.g., "01ABC123DEF456"\) |
|
||||
| `driveId` | string | No | The ID of the drive containing the spreadsheet. Required for SharePoint files. If omitted, uses personal OneDrive. |
|
||||
| `range` | string | No | The range of cells to read from. Accepts "SheetName!A1:B2" for explicit ranges or just "SheetName" to read the used range of that sheet. If omitted, reads the used range of the first sheet. |
|
||||
|
||||
#### Output
|
||||
@@ -67,6 +68,7 @@ Write data to a specific sheet in a Microsoft Excel spreadsheet
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `spreadsheetId` | string | Yes | The ID of the spreadsheet/workbook to write to \(e.g., "01ABC123DEF456"\) |
|
||||
| `driveId` | string | No | The ID of the drive containing the spreadsheet. Required for SharePoint files. If omitted, uses personal OneDrive. |
|
||||
| `range` | string | No | The range of cells to write to \(e.g., "Sheet1!A1:B2"\) |
|
||||
| `values` | array | Yes | The data to write as a 2D array \(e.g., \[\["Name", "Age"\], \["Alice", 30\]\]\) or array of objects |
|
||||
| `valueInputOption` | string | No | The format of the data to write |
|
||||
|
||||
@@ -29,7 +29,7 @@ Use the Start block for everything originating from the editor, deploy-to-API, o
|
||||
Receive external webhook payloads
|
||||
</Card>
|
||||
<Card title="Schedule" href="/triggers/schedule">
|
||||
Cron or interval based execution
|
||||
Cron or interval based runs
|
||||
</Card>
|
||||
<Card title="RSS Feed" href="/triggers/rss">
|
||||
Monitor RSS and Atom feeds for new content
|
||||
@@ -59,17 +59,17 @@ Use the Start block for everything originating from the editor, deploy-to-API, o
|
||||
|
||||
> Deployments power every trigger. Update the workflow, redeploy, and all trigger entry points pick up the new snapshot. Learn more in [Execution → Deployment Snapshots](/execution).
|
||||
|
||||
## Manual Execution Priority
|
||||
## Manual Run Priority
|
||||
|
||||
When you click **Run** in the editor, Sim automatically selects which trigger to execute based on the following priority order:
|
||||
When you click **Run** in the editor, Sim automatically selects which trigger to run based on the following priority order:
|
||||
|
||||
1. **Start Block** (highest priority)
|
||||
2. **Schedule Triggers**
|
||||
3. **External Triggers** (webhooks, integrations like Slack, Gmail, Airtable, etc.)
|
||||
|
||||
If your workflow has multiple triggers, the highest priority trigger will be executed. For example, if you have both a Start block and a Webhook trigger, clicking Run will execute the Start block.
|
||||
If your workflow has multiple triggers, the highest priority trigger will be used. For example, if you have both a Start block and a Webhook trigger, clicking Run will use the Start block.
|
||||
|
||||
**External triggers with mock payloads**: When external triggers (webhooks and integrations) are executed manually, Sim automatically generates mock payloads based on the trigger's expected data structure. This ensures downstream blocks can resolve variables correctly during testing.
|
||||
**External triggers with mock payloads**: When external triggers (webhooks and integrations) are run manually, Sim automatically generates mock payloads based on the trigger's expected data structure. This ensures downstream blocks can resolve variables correctly during testing.
|
||||
|
||||
## Email Polling Groups
|
||||
|
||||
@@ -94,10 +94,10 @@ Invitees receive an email with a link to connect their account. Once connected,
|
||||
When configuring an email trigger, select your polling group from the credentials dropdown instead of an individual account. The system creates webhooks for each member and routes all emails through your workflow.
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "Can I have multiple triggers on the same workflow?", answer: "Yes, a workflow can have multiple triggers (for example, a Start block and a Webhook trigger). When you click Run in the editor, Sim executes the highest-priority trigger: Start block first, then Schedule, then external triggers like webhooks. Each trigger type can also fire independently when its event occurs." },
|
||||
{ question: "Can I have multiple triggers on the same workflow?", answer: "Yes, a workflow can have multiple triggers (for example, a Start block and a Webhook trigger). When you click Run in the editor, Sim uses the highest-priority trigger: Start block first, then Schedule, then external triggers like webhooks. Each trigger type can also fire independently when its event occurs." },
|
||||
{ question: "How do I secure my webhook endpoint?", answer: "The Generic Webhook trigger supports authentication. Enable the Require Authentication toggle, set an auth token, and optionally specify a custom header name. Incoming requests must include the token as a Bearer token in the Authorization header (or in your custom header). Requests without a valid token are rejected." },
|
||||
{ question: "What happens when I test an external trigger manually?", answer: "When you click Run on a workflow with an external trigger (webhook, Slack, Gmail, etc.), Sim generates a mock payload based on the trigger's expected data structure. This lets downstream blocks resolve their variable references correctly so you can test the full workflow without waiting for a real event." },
|
||||
{ question: "Do triggers use the draft canvas or the deployed version?", answer: "All trigger-based executions (API, chat, schedule, webhook) run against the active deployment snapshot, not your draft canvas. After making changes, you need to redeploy for triggers to pick up the updated workflow version." },
|
||||
{ question: "Do triggers use the draft canvas or the deployed version?", answer: "All trigger-based runs (API, chat, schedule, webhook) use the active deployment snapshot, not your draft canvas. After making changes, you need to redeploy for triggers to pick up the updated workflow version." },
|
||||
{ question: "What integrations are available as triggers?", answer: "Sim supports a wide range of trigger integrations including GitHub (push, PR, issues), Slack, Gmail, Outlook, Linear, Jira, HubSpot, Stripe, Airtable, Calendly, Typeform, Telegram, WhatsApp, Microsoft Teams, RSS feeds, and more. Each integration provides event-specific triggers like issue_created or email_received." },
|
||||
{ question: "How does the Schedule trigger work?", answer: "The Schedule trigger runs your workflow on a timer using cron expressions or interval-based configuration. The schedule is managed within the schedule block settings. Like all triggers, scheduled runs execute the active deployment snapshot, so make sure to redeploy after making workflow changes." },
|
||||
]} />
|
||||
|
||||
@@ -51,9 +51,9 @@ RSS triggers only fire for items published after you save the trigger. Existing
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "How often is the RSS feed checked for new items?", answer: "The feed is polled every minute. On each poll, the service fetches the feed, compares items against the last checked timestamp and a list of previously seen GUIDs, and triggers your workflow only for genuinely new items." },
|
||||
{ question: "How does the poller avoid processing the same item twice?", answer: "The service tracks up to 100 recent item GUIDs and the last checked timestamp. An item is considered new only if its GUID has not been seen before and its publication date is after the last checked timestamp. Additionally, an idempotency layer prevents duplicate workflow executions for the same item." },
|
||||
{ question: "How does the poller avoid processing the same item twice?", answer: "The service tracks up to 100 recent item GUIDs and the last checked timestamp. An item is considered new only if its GUID has not been seen before and its publication date is after the last checked timestamp. Additionally, an idempotency layer prevents duplicate workflow runs for the same item." },
|
||||
{ question: "Is there a limit on how many new items are processed per poll?", answer: "Yes. Each polling cycle processes a maximum of 25 new items, sorted by publication date (newest first). If a feed publishes more than 25 items between polls, only the 25 most recent are processed." },
|
||||
{ question: "What output fields are available from the RSS trigger?", answer: "Each triggered execution receives: title, link, and pubDate as top-level convenience fields, plus a full item object containing all fields (including guid, summary, content, contentSnippet, author, categories, enclosure, and isoDate), a feed object with the feed's title, link, and description, and a timestamp of when the event was processed." },
|
||||
{ question: "What output fields are available from the RSS trigger?", answer: "Each triggered run receives: title, link, and pubDate as top-level convenience fields, plus a full item object containing all fields (including guid, summary, content, contentSnippet, author, categories, enclosure, and isoDate), a feed object with the feed's title, link, and description, and a timestamp of when the event was processed." },
|
||||
{ question: "What happens if the RSS feed is temporarily unreachable?", answer: "A failed fetch increments the webhook's consecutive failure counter. After 100 consecutive failures, the RSS trigger is automatically disabled. On any successful poll, the counter resets to zero." },
|
||||
{ question: "Does the RSS trigger support Atom feeds?", answer: "Yes. The underlying parser (rss-parser) supports both RSS and Atom feed formats. You can use the URL of either format in the Feed URL field." },
|
||||
]} />
|
||||
|
||||
@@ -79,10 +79,10 @@ Schedule blocks cannot receive incoming connections and serve as workflow entry
|
||||
|
||||
<FAQ items={[
|
||||
{ question: "Do I need to deploy my workflow for the schedule to start?", answer: "Yes. Schedules are created in the database only when you deploy the workflow. Undeploying removes the schedule, and redeploying recreates it with the current configuration." },
|
||||
{ question: "What exactly happens after 100 consecutive failures?", answer: "After 100 consecutive failures, the schedule is automatically set to a disabled status to prevent runaway errors. A warning badge appears on the schedule block in the editor. You can click the badge to reactivate it. The failure counter resets to zero on any successful execution." },
|
||||
{ question: "What exactly happens after 100 consecutive failures?", answer: "After 100 consecutive failures, the schedule is automatically set to a disabled status to prevent runaway errors. A warning badge appears on the schedule block in the editor. You can click the badge to reactivate it. The failure counter resets to zero on any successful run." },
|
||||
{ question: "Does the schedule support timezones?", answer: "Yes. The schedule configuration includes a timezone setting. Cron expressions and simple intervals are evaluated relative to the configured timezone, which defaults to UTC if not specified." },
|
||||
{ question: "What happens if my scheduled workflow is rate-limited?", answer: "If a rate limit (HTTP 429) is encountered during execution, the schedule automatically retries after a 5-minute delay rather than counting it as a failure." },
|
||||
{ question: "What happens if my scheduled workflow is rate-limited?", answer: "If a rate limit (HTTP 429) is encountered during a run, the schedule automatically retries after a 5-minute delay rather than counting it as a failure." },
|
||||
{ question: "Can I have multiple schedule blocks in one workflow?", answer: "Yes. The deployment process finds all schedule blocks in the workflow and creates a separate schedule record for each one. Each schedule operates independently with its own cron expression and failure counter." },
|
||||
{ question: "What happens if the workflow is undeployed while a schedule execution is in progress?", answer: "The currently running execution will complete, but no new executions will be triggered. When the schedule next tries to fire, it checks that the workflow is still deployed and the schedule record still exists before executing." },
|
||||
{ question: "What happens if the workflow is undeployed while a scheduled run is in progress?", answer: "The currently running workflow will complete, but no new runs will be triggered. When the schedule next tries to fire, it checks that the workflow is still deployed and the schedule record still exists before running." },
|
||||
]} />
|
||||
|
||||
|
||||
@@ -19,12 +19,12 @@ The Start block is the default trigger for workflows built in Sim. It collects s
|
||||
</div>
|
||||
|
||||
<Callout type="info">
|
||||
The Start block sits in the start slot when you create a workflow. Keep it there when you want the same entry point to serve editor runs, deploy-to-API requests, and chat sessions. Swap it with Webhook or Schedule triggers when you only need event-driven execution.
|
||||
The Start block sits in the start slot when you create a workflow. Keep it there when you want the same entry point to serve editor runs, deploy-to-API requests, and chat sessions. Swap it with Webhook or Schedule triggers when you only need event-driven runs.
|
||||
</Callout>
|
||||
|
||||
## Fields exposed by Start
|
||||
|
||||
The Start block emits different data depending on the execution surface:
|
||||
The Start block emits different data depending on the run surface:
|
||||
|
||||
- **Input Format fields** — Every field you add becomes available as <code><start.fieldName></code>. For example, a `customerId` field shows up as <code><start.customerId></code> in downstream blocks and templates.
|
||||
- **Chat-only fields** — When the workflow runs from the chat side panel or a deployed chat experience, Sim also provides <code><start.input></code> (latest user message), <code><start.conversationId></code> (active session id), and <code><start.files></code> (chat attachments).
|
||||
@@ -33,11 +33,11 @@ Keep Input Format fields scoped to the names you expect to reference later—tho
|
||||
|
||||
## Configure the Input Format
|
||||
|
||||
Use the Input Format sub-block to define the schema that applies across execution modes:
|
||||
Use the Input Format sub-block to define the schema that applies across run modes:
|
||||
|
||||
1. Add a field for each value you want to collect.
|
||||
2. Choose a type (`string`, `number`, `boolean`, `object`, `array`, or `files`). File fields accept uploads from chat and API callers.
|
||||
3. Provide default values when you want the manual run modal to populate test data automatically. These defaults are ignored for deployed executions.
|
||||
3. Provide default values when you want the manual run modal to populate test data automatically. These defaults are ignored for deployed runs.
|
||||
4. Reorder fields to control how they appear in the editor form.
|
||||
|
||||
Reference structured values downstream with expressions such as <code><start.customerId></code> depending on the block you connect.
|
||||
@@ -53,7 +53,7 @@ Reference structured values downstream with expressions such as <code><start.
|
||||
tools or storage steps.
|
||||
</Tab>
|
||||
<Tab>
|
||||
Deploying to API turns the Input Format into a JSON contract for clients. Each field becomes part of the request body, and Sim coerces primitive types on ingestion. File fields expect objects that reference uploaded files; use the execution file upload endpoint before invoking the workflow.
|
||||
Deploying to API turns the Input Format into a JSON contract for clients. Each field becomes part of the request body, and Sim coerces primitive types on ingestion. File fields expect objects that reference uploaded files; use the file upload endpoint before invoking the workflow.
|
||||
|
||||
API callers can include additional optional properties. They are preserved
|
||||
inside <code><start.fieldName></code> outputs so you can experiment
|
||||
|
||||
@@ -8,7 +8,7 @@ import { Image } from '@/components/ui/image'
|
||||
import { Video } from '@/components/ui/video'
|
||||
import { FAQ } from '@/components/ui/faq'
|
||||
|
||||
Webhooks allow external services to trigger workflow execution by sending HTTP requests to your workflow. Sim supports two approaches for webhook-based triggers.
|
||||
Webhooks allow external services to trigger workflow runs by sending HTTP requests to your workflow. Sim supports two approaches for webhook-based triggers.
|
||||
|
||||
## Generic Webhook Trigger
|
||||
|
||||
@@ -30,7 +30,7 @@ The Generic Webhook block creates a flexible endpoint that can receive any paylo
|
||||
2. **Configure Payload** - Set up the expected payload structure (optional)
|
||||
3. **Get Webhook URL** - Copy the automatically generated unique endpoint
|
||||
4. **External Integration** - Configure your external service to send POST requests to this URL
|
||||
5. **Workflow Execution** - Every request to the webhook URL triggers the workflow
|
||||
5. **Workflow Run** - Every request to the webhook URL triggers the workflow
|
||||
|
||||
### Features
|
||||
|
||||
@@ -38,7 +38,7 @@ The Generic Webhook block creates a flexible endpoint that can receive any paylo
|
||||
- **Automatic Parsing**: Webhook data is automatically parsed and available to subsequent blocks
|
||||
- **Authentication**: Optional bearer token or custom header authentication
|
||||
- **Rate Limiting**: Built-in protection against abuse
|
||||
- **Deduplication**: Prevents duplicate executions from repeated requests
|
||||
- **Deduplication**: Prevents duplicate runs from repeated requests
|
||||
|
||||
<Callout type="info">
|
||||
The Generic Webhook trigger fires every time the webhook URL receives a request, making it perfect for real-time integrations.
|
||||
@@ -58,7 +58,7 @@ Alternatively, you can use specific service blocks (like Slack, GitHub, etc.) in
|
||||
2. **Enable Trigger Mode** - Toggle "Use as Trigger" in the block settings
|
||||
3. **Configure Service** - Set up authentication and event filters specific to that service
|
||||
4. **Webhook Registration** - The service automatically registers the webhook with the external platform
|
||||
5. **Event-Based Execution** - Workflow triggers only for specific events from that service
|
||||
5. **Event-Based Runs** - Workflow triggers only for specific events from that service
|
||||
|
||||
### When to Use Each Approach
|
||||
|
||||
@@ -120,7 +120,7 @@ Alternatively, you can use specific service blocks (like Slack, GitHub, etc.) in
|
||||
### Testing Webhooks
|
||||
|
||||
1. Use tools like Postman or curl to test your webhook endpoints
|
||||
2. Check workflow execution logs for debugging
|
||||
2. Check workflow run logs for debugging
|
||||
3. Verify payload structure matches your expectations
|
||||
4. Test authentication and error scenarios
|
||||
|
||||
@@ -153,8 +153,8 @@ Always validate and sanitize incoming webhook data before processing it in your
|
||||
{ question: "What HTTP methods does the Generic Webhook endpoint accept?", answer: "The webhook endpoint handles POST requests for triggering workflows. GET requests are only used for provider-specific verification challenges (such as Microsoft Graph or WhatsApp verification). Other methods return a 405 Method Not Allowed response." },
|
||||
{ question: "How do I authenticate webhook requests?", answer: "Enable the Require Authentication toggle in the webhook configuration, then set an Authentication Token. Callers can send the token as a Bearer token in the Authorization header, or you can specify a custom header name (e.g., X-Secret-Key) and the token will be matched against that header instead." },
|
||||
{ question: "Can I define the expected payload structure for a webhook?", answer: "Yes. The Generic Webhook block includes an Input Format field where you can define the expected JSON schema. This is optional but helps document the expected structure. You can also use type \"file[]\" for file upload fields." },
|
||||
{ question: "Does the webhook have deduplication built in?", answer: "Yes. The webhook processing pipeline includes idempotency checks to prevent duplicate executions from repeated requests with the same payload." },
|
||||
{ question: "Does the webhook have deduplication built in?", answer: "Yes. The webhook processing pipeline includes idempotency checks to prevent duplicate runs from repeated requests with the same payload." },
|
||||
{ question: "What data from the webhook request is available in my workflow?", answer: "All request data including headers, body, and query parameters is parsed and made available to subsequent blocks. Common fields like event, id, and data are automatically extracted from the payload when present." },
|
||||
{ question: "Do I need to deploy my workflow for the webhook URL to work?", answer: "Yes. The webhook endpoint checks that the associated workflow is deployed before triggering execution. If the workflow is not deployed, the webhook returns a not-found response." },
|
||||
{ question: "Does the webhook auto-disable after repeated failures?", answer: "No. Unlike polling-based triggers (RSS, Gmail, IMAP), push-based generic webhooks do not auto-disable after consecutive failures. Each incoming request is processed independently. If your workflow consistently fails, check the execution logs for error details." },
|
||||
{ question: "Do I need to deploy my workflow for the webhook URL to work?", answer: "Yes. The webhook endpoint checks that the associated workflow is deployed before triggering a run. If the workflow is not deployed, the webhook returns a not-found response." },
|
||||
{ question: "Does the webhook auto-disable after repeated failures?", answer: "No. Unlike polling-based triggers (RSS, Gmail, IMAP), push-based generic webhooks do not auto-disable after consecutive failures. Each incoming request is processed independently. If your workflow consistently fails, check the run logs for error details." },
|
||||
]} />
|
||||
|
||||
1
apps/docs/lib/urls.ts
Normal file
1
apps/docs/lib/urls.ts
Normal file
@@ -0,0 +1 @@
|
||||
export const DOCS_BASE_URL = process.env.NEXT_PUBLIC_DOCS_URL ?? 'https://docs.sim.ai'
|
||||
@@ -9,6 +9,8 @@ import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { BackLink } from '@/app/(landing)/blog/[slug]/back-link'
|
||||
import { ShareButton } from '@/app/(landing)/blog/[slug]/share-button'
|
||||
|
||||
export const dynamicParams = false
|
||||
|
||||
export async function generateStaticParams() {
|
||||
const posts = await getAllPostMeta()
|
||||
return posts.map((p) => ({ slug: p.slug }))
|
||||
|
||||
@@ -2,6 +2,7 @@ import type { Metadata } from 'next'
|
||||
import Image from 'next/image'
|
||||
import Link from 'next/link'
|
||||
import { getAllPostMeta } from '@/lib/blog/registry'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
export const revalidate = 3600
|
||||
|
||||
@@ -17,11 +18,11 @@ export async function generateMetadata({
|
||||
return {
|
||||
title: `${name} — Sim Blog`,
|
||||
description: `Read articles by ${name} on the Sim blog.`,
|
||||
alternates: { canonical: `https://sim.ai/blog/authors/${id}` },
|
||||
alternates: { canonical: `${SITE_URL}/blog/authors/${id}` },
|
||||
openGraph: {
|
||||
title: `${name} — Sim Blog`,
|
||||
description: `Read articles by ${name} on the Sim blog.`,
|
||||
url: `https://sim.ai/blog/authors/${id}`,
|
||||
url: `${SITE_URL}/blog/authors/${id}`,
|
||||
siteName: 'Sim',
|
||||
type: 'profile',
|
||||
...(author?.avatarUrl
|
||||
@@ -55,25 +56,25 @@ export default async function AuthorPage({ params }: { params: Promise<{ id: str
|
||||
{
|
||||
'@type': 'Person',
|
||||
name: author.name,
|
||||
url: `https://sim.ai/blog/authors/${author.id}`,
|
||||
url: `${SITE_URL}/blog/authors/${author.id}`,
|
||||
sameAs: author.url ? [author.url] : [],
|
||||
image: author.avatarUrl,
|
||||
worksFor: {
|
||||
'@type': 'Organization',
|
||||
name: 'Sim',
|
||||
url: 'https://sim.ai',
|
||||
url: SITE_URL,
|
||||
},
|
||||
},
|
||||
{
|
||||
'@type': 'BreadcrumbList',
|
||||
itemListElement: [
|
||||
{ '@type': 'ListItem', position: 1, name: 'Home', item: 'https://sim.ai' },
|
||||
{ '@type': 'ListItem', position: 2, name: 'Blog', item: 'https://sim.ai/blog' },
|
||||
{ '@type': 'ListItem', position: 1, name: 'Home', item: SITE_URL },
|
||||
{ '@type': 'ListItem', position: 2, name: 'Blog', item: `${SITE_URL}/blog` },
|
||||
{
|
||||
'@type': 'ListItem',
|
||||
position: 3,
|
||||
name: author.name,
|
||||
item: `https://sim.ai/blog/authors/${author.id}`,
|
||||
item: `${SITE_URL}/blog/authors/${author.id}`,
|
||||
},
|
||||
],
|
||||
},
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { getNavBlogPosts } from '@/lib/blog/registry'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import Footer from '@/app/(landing)/components/footer/footer'
|
||||
import Navbar from '@/app/(landing)/components/navbar/navbar'
|
||||
|
||||
@@ -8,10 +9,10 @@ export default async function StudioLayout({ children }: { children: React.React
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'Organization',
|
||||
name: 'Sim',
|
||||
url: 'https://sim.ai',
|
||||
url: SITE_URL,
|
||||
description:
|
||||
'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents.',
|
||||
logo: 'https://sim.ai/logo/primary/small.png',
|
||||
logo: `${SITE_URL}/logo/primary/small.png`,
|
||||
sameAs: [
|
||||
'https://x.com/simdotai',
|
||||
'https://github.com/simstudioai/sim',
|
||||
@@ -23,7 +24,7 @@ export default async function StudioLayout({ children }: { children: React.React
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'WebSite',
|
||||
name: 'Sim',
|
||||
url: 'https://sim.ai',
|
||||
url: SITE_URL,
|
||||
}
|
||||
|
||||
return (
|
||||
|
||||
@@ -4,6 +4,7 @@ import Link from 'next/link'
|
||||
import { Badge } from '@/components/emcn'
|
||||
import { getAllPostMeta } from '@/lib/blog/registry'
|
||||
import { buildCollectionPageJsonLd } from '@/lib/blog/seo'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
export async function generateMetadata({
|
||||
searchParams,
|
||||
@@ -26,7 +27,7 @@ export async function generateMetadata({
|
||||
if (tag) canonicalParams.set('tag', tag)
|
||||
if (pageNum > 1) canonicalParams.set('page', String(pageNum))
|
||||
const qs = canonicalParams.toString()
|
||||
const canonical = `https://sim.ai/blog${qs ? `?${qs}` : ''}`
|
||||
const canonical = `${SITE_URL}/blog${qs ? `?${qs}` : ''}`
|
||||
|
||||
return {
|
||||
title,
|
||||
@@ -41,7 +42,7 @@ export async function generateMetadata({
|
||||
type: 'website',
|
||||
images: [
|
||||
{
|
||||
url: 'https://sim.ai/logo/primary/medium.png',
|
||||
url: `${SITE_URL}/logo/primary/medium.png`,
|
||||
width: 1200,
|
||||
height: 630,
|
||||
alt: 'Sim Blog',
|
||||
|
||||
@@ -1,12 +1,13 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { getAllPostMeta } from '@/lib/blog/registry'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
export const revalidate = 3600
|
||||
|
||||
export async function GET() {
|
||||
const posts = await getAllPostMeta()
|
||||
const items = posts.slice(0, 50)
|
||||
const site = 'https://sim.ai'
|
||||
const site = SITE_URL
|
||||
const lastBuildDate =
|
||||
items.length > 0 ? new Date(items[0].date).toUTCString() : new Date().toUTCString()
|
||||
|
||||
|
||||
@@ -1,11 +1,12 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { getAllPostMeta } from '@/lib/blog/registry'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
export const revalidate = 3600
|
||||
|
||||
export async function GET() {
|
||||
const posts = await getAllPostMeta()
|
||||
const base = 'https://sim.ai'
|
||||
const base = SITE_URL
|
||||
const xml = `<?xml version="1.0" encoding="UTF-8"?>
|
||||
<urlset xmlns="http://www.sitemaps.org/schemas/sitemap/0.9" xmlns:image="http://www.google.com/schemas/sitemap-image/1.1">
|
||||
${posts
|
||||
|
||||
@@ -1,15 +1,16 @@
|
||||
import type { Metadata } from 'next'
|
||||
import Link from 'next/link'
|
||||
import { getAllTags } from '@/lib/blog/registry'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: 'Tags',
|
||||
description: 'Browse Sim blog posts by topic — AI agents, workflows, integrations, and more.',
|
||||
alternates: { canonical: 'https://sim.ai/blog/tags' },
|
||||
alternates: { canonical: `${SITE_URL}/blog/tags` },
|
||||
openGraph: {
|
||||
title: 'Blog Tags | Sim',
|
||||
description: 'Browse Sim blog posts by topic — AI agents, workflows, integrations, and more.',
|
||||
url: 'https://sim.ai/blog/tags',
|
||||
url: `${SITE_URL}/blog/tags`,
|
||||
siteName: 'Sim',
|
||||
locale: 'en_US',
|
||||
type: 'website',
|
||||
@@ -26,9 +27,9 @@ const breadcrumbJsonLd = {
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'BreadcrumbList',
|
||||
itemListElement: [
|
||||
{ '@type': 'ListItem', position: 1, name: 'Home', item: 'https://sim.ai' },
|
||||
{ '@type': 'ListItem', position: 2, name: 'Blog', item: 'https://sim.ai/blog' },
|
||||
{ '@type': 'ListItem', position: 3, name: 'Tags', item: 'https://sim.ai/blog/tags' },
|
||||
{ '@type': 'ListItem', position: 1, name: 'Home', item: SITE_URL },
|
||||
{ '@type': 'ListItem', position: 2, name: 'Blog', item: `${SITE_URL}/blog` },
|
||||
{ '@type': 'ListItem', position: 3, name: 'Tags', item: `${SITE_URL}/blog/tags` },
|
||||
],
|
||||
}
|
||||
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
'use client'
|
||||
|
||||
import { useCallback, useEffect, useRef, useState } from 'react'
|
||||
import dynamic from 'next/dynamic'
|
||||
import Image from 'next/image'
|
||||
import Link from 'next/link'
|
||||
import { Badge } from '@/components/emcn'
|
||||
import { AuthModal } from '@/app/(landing)/components/auth-modal/auth-modal'
|
||||
import { trackLandingCta } from '@/app/(landing)/landing-analytics'
|
||||
|
||||
const AuthModal = dynamic(
|
||||
() => import('@/app/(landing)/components/auth-modal/auth-modal').then((m) => m.AuthModal),
|
||||
{ loading: () => null }
|
||||
)
|
||||
|
||||
interface DotGridProps {
|
||||
className?: string
|
||||
cols: number
|
||||
|
||||
@@ -2,12 +2,17 @@
|
||||
|
||||
import { useRef, useState } from 'react'
|
||||
import { type MotionValue, motion, useScroll, useTransform } from 'framer-motion'
|
||||
import dynamic from 'next/dynamic'
|
||||
import Image from 'next/image'
|
||||
import { Badge } from '@/components/emcn'
|
||||
import { AuthModal } from '@/app/(landing)/components/auth-modal/auth-modal'
|
||||
import { FeaturesPreview } from '@/app/(landing)/components/features/components/features-preview'
|
||||
import { trackLandingCta } from '@/app/(landing)/landing-analytics'
|
||||
|
||||
const AuthModal = dynamic(
|
||||
() => import('@/app/(landing)/components/auth-modal/auth-modal').then((m) => m.AuthModal),
|
||||
{ loading: () => null }
|
||||
)
|
||||
|
||||
function hexToRgba(hex: string, alpha: number): string {
|
||||
const r = Number.parseInt(hex.slice(1, 3), 16)
|
||||
const g = Number.parseInt(hex.slice(3, 5), 16)
|
||||
|
||||
@@ -2,13 +2,18 @@
|
||||
|
||||
import { useCallback, useRef, useState } from 'react'
|
||||
import { ArrowUp } from 'lucide-react'
|
||||
import dynamic from 'next/dynamic'
|
||||
import { cn } from '@/lib/core/utils/cn'
|
||||
import { captureClientEvent } from '@/lib/posthog/client'
|
||||
import { AuthModal } from '@/app/(landing)/components/auth-modal/auth-modal'
|
||||
import { useLandingSubmit } from '@/app/(landing)/components/landing-preview/components/landing-preview-panel/landing-preview-panel'
|
||||
import { trackLandingCta } from '@/app/(landing)/landing-analytics'
|
||||
import { useAnimatedPlaceholder } from '@/hooks/use-animated-placeholder'
|
||||
|
||||
const AuthModal = dynamic(
|
||||
() => import('@/app/(landing)/components/auth-modal/auth-modal').then((m) => m.AuthModal),
|
||||
{ loading: () => null }
|
||||
)
|
||||
|
||||
const MAX_HEIGHT = 120
|
||||
|
||||
const CTA_BUTTON =
|
||||
|
||||
@@ -38,7 +38,7 @@ const BLOCK_LINKS: FooterItem[] = [
|
||||
{ label: 'Router', href: 'https://docs.sim.ai/blocks/router', external: true },
|
||||
{ label: 'Function', href: 'https://docs.sim.ai/blocks/function', external: true },
|
||||
{ label: 'Condition', href: 'https://docs.sim.ai/blocks/condition', external: true },
|
||||
{ label: 'API', href: 'https://docs.sim.ai/blocks/api', external: true },
|
||||
{ label: 'API Block', href: 'https://docs.sim.ai/blocks/api', external: true },
|
||||
{ label: 'Workflow', href: 'https://docs.sim.ai/blocks/workflow', external: true },
|
||||
{ label: 'Parallel', href: 'https://docs.sim.ai/blocks/parallel', external: true },
|
||||
{ label: 'Guardrails', href: 'https://docs.sim.ai/blocks/guardrails', external: true },
|
||||
@@ -194,7 +194,7 @@ export default function Footer({ hideCTA }: FooterProps) {
|
||||
<Link href='/' aria-label='Sim home'>
|
||||
<Image
|
||||
src='/logo/sim-landing.svg'
|
||||
alt='Sim'
|
||||
alt=''
|
||||
width={85}
|
||||
height={26}
|
||||
className='h-[26.4px] w-auto'
|
||||
|
||||
@@ -2,10 +2,21 @@
|
||||
|
||||
import dynamic from 'next/dynamic'
|
||||
import { cn } from '@/lib/core/utils/cn'
|
||||
import { AuthModal } from '@/app/(landing)/components/auth-modal/auth-modal'
|
||||
import { DemoRequestModal } from '@/app/(landing)/components/demo-request/demo-request-modal'
|
||||
import { trackLandingCta } from '@/app/(landing)/landing-analytics'
|
||||
|
||||
const AuthModal = dynamic(
|
||||
() => import('@/app/(landing)/components/auth-modal/auth-modal').then((m) => m.AuthModal),
|
||||
{ loading: () => null }
|
||||
)
|
||||
|
||||
const DemoRequestModal = dynamic(
|
||||
() =>
|
||||
import('@/app/(landing)/components/demo-request/demo-request-modal').then(
|
||||
(m) => m.DemoRequestModal
|
||||
),
|
||||
{ loading: () => null }
|
||||
)
|
||||
|
||||
const LandingPreview = dynamic(
|
||||
() =>
|
||||
import('@/app/(landing)/components/landing-preview/landing-preview').then(
|
||||
|
||||
@@ -334,6 +334,7 @@ export const LandingPreviewHome = memo(function LandingPreviewHome({
|
||||
type='button'
|
||||
onClick={handleSubmit}
|
||||
disabled={isEmpty}
|
||||
aria-label='Submit message'
|
||||
className='flex h-[28px] w-[28px] items-center justify-center rounded-full border-0 p-0 transition-colors'
|
||||
style={{
|
||||
background: isEmpty ? '#808080' : '#e0e0e0',
|
||||
|
||||
@@ -3,13 +3,13 @@
|
||||
import { memo, useCallback, useEffect, useRef, useState } from 'react'
|
||||
import { AnimatePresence, motion } from 'framer-motion'
|
||||
import { ArrowUp } from 'lucide-react'
|
||||
import dynamic from 'next/dynamic'
|
||||
import { useRouter } from 'next/navigation'
|
||||
import { createPortal } from 'react-dom'
|
||||
import { Blimp, BubbleChatPreview, ChevronDown, MoreHorizontal, Play } from '@/components/emcn'
|
||||
import { AgentIcon, HubspotIcon, OpenAIIcon, SalesforceIcon } from '@/components/icons'
|
||||
import { LandingPromptStorage } from '@/lib/core/utils/browser-storage'
|
||||
import { captureClientEvent } from '@/lib/posthog/client'
|
||||
import { AuthModal } from '@/app/(landing)/components/auth-modal/auth-modal'
|
||||
import {
|
||||
EASE_OUT,
|
||||
type EditorPromptData,
|
||||
@@ -21,6 +21,11 @@ import {
|
||||
} from '@/app/(landing)/components/landing-preview/components/landing-preview-workflow/workflow-data'
|
||||
import { trackLandingCta } from '@/app/(landing)/landing-analytics'
|
||||
|
||||
const AuthModal = dynamic(
|
||||
() => import('@/app/(landing)/components/auth-modal/auth-modal').then((m) => m.AuthModal),
|
||||
{ loading: () => null }
|
||||
)
|
||||
|
||||
type PanelTab = 'copilot' | 'editor'
|
||||
|
||||
const EDITOR_BLOCK_ICONS: Record<string, React.ComponentType<{ className?: string }>> = {
|
||||
|
||||
@@ -1,13 +1,13 @@
|
||||
'use client'
|
||||
|
||||
import { useCallback, useEffect, useRef, useState, useSyncExternalStore } from 'react'
|
||||
import { useCallback, useContext, useEffect, useRef, useState, useSyncExternalStore } from 'react'
|
||||
import dynamic from 'next/dynamic'
|
||||
import Image from 'next/image'
|
||||
import Link from 'next/link'
|
||||
import { useSearchParams } from 'next/navigation'
|
||||
import { GithubOutlineIcon } from '@/components/icons'
|
||||
import { useSession } from '@/lib/auth/auth-client'
|
||||
import { cn } from '@/lib/core/utils/cn'
|
||||
import { AuthModal } from '@/app/(landing)/components/auth-modal/auth-modal'
|
||||
import { SessionContext } from '@/app/_shell/providers/session-provider'
|
||||
import {
|
||||
BlogDropdown,
|
||||
type NavBlogPost,
|
||||
@@ -17,6 +17,11 @@ import { GitHubStars } from '@/app/(landing)/components/navbar/components/github
|
||||
import { trackLandingCta } from '@/app/(landing)/landing-analytics'
|
||||
import { getBrandConfig } from '@/ee/whitelabeling'
|
||||
|
||||
const AuthModal = dynamic(
|
||||
() => import('@/app/(landing)/components/auth-modal/auth-modal').then((m) => m.AuthModal),
|
||||
{ loading: () => null }
|
||||
)
|
||||
|
||||
type DropdownId = 'docs' | 'blog' | null
|
||||
|
||||
interface NavLink {
|
||||
@@ -48,7 +53,9 @@ interface NavbarProps {
|
||||
export default function Navbar({ logoOnly = false, blogPosts = [] }: NavbarProps) {
|
||||
const brand = getBrandConfig()
|
||||
const searchParams = useSearchParams()
|
||||
const { data: session, isPending: isSessionPending } = useSession()
|
||||
const sessionCtx = useContext(SessionContext)
|
||||
const session = sessionCtx?.data ?? null
|
||||
const isSessionPending = sessionCtx?.isPending ?? true
|
||||
const isAuthenticated = Boolean(session?.user?.id)
|
||||
const isBrowsingHome = searchParams.has('home')
|
||||
const useHomeLinks = isAuthenticated || isBrowsingHome
|
||||
@@ -125,7 +132,7 @@ export default function Navbar({ logoOnly = false, blogPosts = [] }: NavbarProps
|
||||
) : (
|
||||
<Image
|
||||
src='/logo/sim-landing.svg'
|
||||
alt='Sim'
|
||||
alt=''
|
||||
width={71}
|
||||
height={22}
|
||||
className='h-[22px] w-auto'
|
||||
|
||||
@@ -1,10 +1,22 @@
|
||||
'use client'
|
||||
|
||||
import dynamic from 'next/dynamic'
|
||||
import { Badge } from '@/components/emcn'
|
||||
import { AuthModal } from '@/app/(landing)/components/auth-modal/auth-modal'
|
||||
import { DemoRequestModal } from '@/app/(landing)/components/demo-request/demo-request-modal'
|
||||
import { trackLandingCta } from '@/app/(landing)/landing-analytics'
|
||||
|
||||
const AuthModal = dynamic(
|
||||
() => import('@/app/(landing)/components/auth-modal/auth-modal').then((m) => m.AuthModal),
|
||||
{ loading: () => null }
|
||||
)
|
||||
|
||||
const DemoRequestModal = dynamic(
|
||||
() =>
|
||||
import('@/app/(landing)/components/demo-request/demo-request-modal').then(
|
||||
(m) => m.DemoRequestModal
|
||||
),
|
||||
{ loading: () => null }
|
||||
)
|
||||
|
||||
interface PricingTier {
|
||||
id: string
|
||||
name: string
|
||||
|
||||
@@ -1,3 +1,5 @@
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
/**
|
||||
* JSON-LD structured data for the landing page.
|
||||
*
|
||||
@@ -23,22 +25,22 @@ export default function StructuredData() {
|
||||
'@graph': [
|
||||
{
|
||||
'@type': 'Organization',
|
||||
'@id': 'https://sim.ai/#organization',
|
||||
'@id': `${SITE_URL}/#organization`,
|
||||
name: 'Sim',
|
||||
alternateName: 'Sim Studio',
|
||||
description:
|
||||
'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work.',
|
||||
url: 'https://sim.ai',
|
||||
url: SITE_URL,
|
||||
logo: {
|
||||
'@type': 'ImageObject',
|
||||
'@id': 'https://sim.ai/#logo',
|
||||
url: 'https://sim.ai/logo/b%26w/text/b%26w.svg',
|
||||
contentUrl: 'https://sim.ai/logo/b%26w/text/b%26w.svg',
|
||||
'@id': `${SITE_URL}/#logo`,
|
||||
url: `${SITE_URL}/logo/b%26w/text/b%26w.svg`,
|
||||
contentUrl: `${SITE_URL}/logo/b%26w/text/b%26w.svg`,
|
||||
width: 49.78314,
|
||||
height: 24.276,
|
||||
caption: 'Sim Logo',
|
||||
},
|
||||
image: { '@id': 'https://sim.ai/#logo' },
|
||||
image: { '@id': `${SITE_URL}/#logo` },
|
||||
sameAs: [
|
||||
'https://x.com/simdotai',
|
||||
'https://github.com/simstudioai/sim',
|
||||
@@ -53,44 +55,42 @@ export default function StructuredData() {
|
||||
},
|
||||
{
|
||||
'@type': 'WebSite',
|
||||
'@id': 'https://sim.ai/#website',
|
||||
url: 'https://sim.ai',
|
||||
'@id': `${SITE_URL}/#website`,
|
||||
url: SITE_URL,
|
||||
name: 'Sim — The AI Workspace | Build, Deploy & Manage AI Agents',
|
||||
description:
|
||||
'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM. Join 100,000+ builders.',
|
||||
publisher: { '@id': 'https://sim.ai/#organization' },
|
||||
publisher: { '@id': `${SITE_URL}/#organization` },
|
||||
inLanguage: 'en-US',
|
||||
},
|
||||
{
|
||||
'@type': 'WebPage',
|
||||
'@id': 'https://sim.ai/#webpage',
|
||||
url: 'https://sim.ai',
|
||||
'@id': `${SITE_URL}/#webpage`,
|
||||
url: SITE_URL,
|
||||
name: 'Sim — The AI Workspace | Build, Deploy & Manage AI Agents',
|
||||
isPartOf: { '@id': 'https://sim.ai/#website' },
|
||||
about: { '@id': 'https://sim.ai/#software' },
|
||||
isPartOf: { '@id': `${SITE_URL}/#website` },
|
||||
about: { '@id': `${SITE_URL}/#software` },
|
||||
datePublished: '2024-01-01T00:00:00+00:00',
|
||||
dateModified: new Date().toISOString(),
|
||||
description:
|
||||
'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work.',
|
||||
breadcrumb: { '@id': 'https://sim.ai/#breadcrumb' },
|
||||
breadcrumb: { '@id': `${SITE_URL}/#breadcrumb` },
|
||||
inLanguage: 'en-US',
|
||||
speakable: {
|
||||
'@type': 'SpeakableSpecification',
|
||||
cssSelector: ['#hero-heading', '[id="hero"] p'],
|
||||
},
|
||||
potentialAction: [{ '@type': 'ReadAction', target: ['https://sim.ai'] }],
|
||||
potentialAction: [{ '@type': 'ReadAction', target: [SITE_URL] }],
|
||||
},
|
||||
{
|
||||
'@type': 'BreadcrumbList',
|
||||
'@id': 'https://sim.ai/#breadcrumb',
|
||||
itemListElement: [
|
||||
{ '@type': 'ListItem', position: 1, name: 'Home', item: 'https://sim.ai' },
|
||||
],
|
||||
'@id': `${SITE_URL}/#breadcrumb`,
|
||||
itemListElement: [{ '@type': 'ListItem', position: 1, name: 'Home', item: SITE_URL }],
|
||||
},
|
||||
{
|
||||
'@type': 'WebApplication',
|
||||
'@id': 'https://sim.ai/#software',
|
||||
url: 'https://sim.ai',
|
||||
'@id': `${SITE_URL}/#software`,
|
||||
url: SITE_URL,
|
||||
name: 'Sim — The AI Workspace',
|
||||
description:
|
||||
'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work — visually, conversationally, or with code. Trusted by over 100,000 builders. SOC2 compliant.',
|
||||
@@ -98,7 +98,7 @@ export default function StructuredData() {
|
||||
applicationSubCategory: 'AI Workspace',
|
||||
operatingSystem: 'Web',
|
||||
browserRequirements: 'Requires a modern browser with JavaScript enabled',
|
||||
installUrl: 'https://sim.ai/signup',
|
||||
installUrl: `${SITE_URL}/signup`,
|
||||
offers: [
|
||||
{
|
||||
'@type': 'Offer',
|
||||
@@ -175,16 +175,16 @@ export default function StructuredData() {
|
||||
},
|
||||
{
|
||||
'@type': 'SoftwareSourceCode',
|
||||
'@id': 'https://sim.ai/#source',
|
||||
'@id': `${SITE_URL}/#source`,
|
||||
codeRepository: 'https://github.com/simstudioai/sim',
|
||||
programmingLanguage: ['TypeScript', 'Python'],
|
||||
runtimePlatform: 'Node.js',
|
||||
license: 'https://opensource.org/licenses/Apache-2.0',
|
||||
isPartOf: { '@id': 'https://sim.ai/#software' },
|
||||
isPartOf: { '@id': `${SITE_URL}/#software` },
|
||||
},
|
||||
{
|
||||
'@type': 'FAQPage',
|
||||
'@id': 'https://sim.ai/#faq',
|
||||
'@id': `${SITE_URL}/#faq`,
|
||||
mainEntity: [
|
||||
{
|
||||
'@type': 'Question',
|
||||
|
||||
@@ -470,7 +470,7 @@ export default function Templates() {
|
||||
aria-labelledby={`template-tab-${activeIndex}`}
|
||||
className='relative hidden flex-1 lg:block'
|
||||
>
|
||||
<div aria-hidden='true' className='h-full'>
|
||||
<div aria-hidden='true' inert className='h-full'>
|
||||
<LandingPreviewWorkflow
|
||||
key={activeIndex}
|
||||
workflow={activeWorkflow}
|
||||
|
||||
@@ -2,7 +2,7 @@ import type { Metadata } from 'next'
|
||||
import Image from 'next/image'
|
||||
import Link from 'next/link'
|
||||
import { notFound } from 'next/navigation'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { IntegrationCtaButton } from '@/app/(landing)/integrations/[slug]/components/integration-cta-button'
|
||||
import { IntegrationFAQ } from '@/app/(landing)/integrations/[slug]/components/integration-faq'
|
||||
import { TemplateCardButton } from '@/app/(landing)/integrations/[slug]/components/template-card-button'
|
||||
@@ -14,12 +14,14 @@ import { TEMPLATES } from '@/app/workspace/[workspaceId]/home/components/templat
|
||||
|
||||
const allIntegrations = integrations as Integration[]
|
||||
const INTEGRATION_COUNT = allIntegrations.length
|
||||
const baseUrl = getBaseUrl()
|
||||
const baseUrl = SITE_URL
|
||||
|
||||
/** Fast O(1) lookups — avoids repeated linear scans inside render loops. */
|
||||
const bySlug = new Map(allIntegrations.map((i) => [i.slug, i]))
|
||||
const byType = new Map(allIntegrations.map((i) => [i.type, i]))
|
||||
|
||||
export const dynamicParams = false
|
||||
|
||||
/**
|
||||
* Returns up to `limit` related integration slugs.
|
||||
*
|
||||
|
||||
@@ -23,6 +23,7 @@ import {
|
||||
BoxCompanyIcon,
|
||||
BrainIcon,
|
||||
BrandfetchIcon,
|
||||
BrightDataIcon,
|
||||
BrowserUseIcon,
|
||||
CalComIcon,
|
||||
CalendlyIcon,
|
||||
@@ -215,6 +216,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
|
||||
attio: AttioIcon,
|
||||
box: BoxCompanyIcon,
|
||||
brandfetch: BrandfetchIcon,
|
||||
brightdata: BrightDataIcon,
|
||||
browser_use: BrowserUseIcon,
|
||||
calcom: CalComIcon,
|
||||
calendly: CalendlyIcon,
|
||||
|
||||
@@ -214,7 +214,7 @@
|
||||
"name": "Agiloft",
|
||||
"description": "Manage records in Agiloft CLM",
|
||||
"longDescription": "Integrate with Agiloft contract lifecycle management to create, read, update, delete, and search records. Supports file attachments, SQL-based selection, saved searches, and record locking across any table in your knowledge base.",
|
||||
"bgColor": "#263A5C",
|
||||
"bgColor": "#FFFFFF",
|
||||
"iconName": "AgiloftIcon",
|
||||
"docsUrl": "https://docs.sim.ai/tools/agiloft",
|
||||
"operations": [
|
||||
@@ -1743,6 +1743,57 @@
|
||||
"integrationTypes": ["sales", "analytics"],
|
||||
"tags": ["enrichment", "marketing"]
|
||||
},
|
||||
{
|
||||
"type": "brightdata",
|
||||
"slug": "bright-data",
|
||||
"name": "Bright Data",
|
||||
"description": "Scrape websites, search engines, and extract structured data",
|
||||
"longDescription": "Integrate Bright Data into the workflow. Scrape any URL with Web Unlocker, search Google and other engines with SERP API, discover web content ranked by intent, or trigger pre-built scrapers for structured data extraction.",
|
||||
"bgColor": "#FFFFFF",
|
||||
"iconName": "BrightDataIcon",
|
||||
"docsUrl": "https://docs.sim.ai/tools/brightdata",
|
||||
"operations": [
|
||||
{
|
||||
"name": "Scrape URL",
|
||||
"description": "Fetch content from any URL using Bright Data Web Unlocker. Bypasses anti-bot protections, CAPTCHAs, and IP blocks automatically."
|
||||
},
|
||||
{
|
||||
"name": "SERP Search",
|
||||
"description": "Search Google, Bing, DuckDuckGo, or Yandex and get structured search results using Bright Data SERP API."
|
||||
},
|
||||
{
|
||||
"name": "Discover",
|
||||
"description": "AI-powered web discovery that finds and ranks results by intent. Returns up to 1,000 results with optional cleaned page content for RAG and verification."
|
||||
},
|
||||
{
|
||||
"name": "Sync Scrape",
|
||||
"description": "Scrape URLs synchronously using a Bright Data pre-built scraper and get structured results directly. Supports up to 20 URLs with a 1-minute timeout."
|
||||
},
|
||||
{
|
||||
"name": "Scrape Dataset",
|
||||
"description": "Trigger a Bright Data pre-built scraper to extract structured data from URLs. Supports 660+ scrapers for platforms like Amazon, LinkedIn, Instagram, and more."
|
||||
},
|
||||
{
|
||||
"name": "Snapshot Status",
|
||||
"description": "Check the progress of an async Bright Data scraping job. Returns status: starting, running, ready, or failed."
|
||||
},
|
||||
{
|
||||
"name": "Download Snapshot",
|
||||
"description": "Download the results of a completed Bright Data scraping job using its snapshot ID. The snapshot must have ready status."
|
||||
},
|
||||
{
|
||||
"name": "Cancel Snapshot",
|
||||
"description": "Cancel an active Bright Data scraping job using its snapshot ID. Terminates data collection in progress."
|
||||
}
|
||||
],
|
||||
"operationCount": 8,
|
||||
"triggers": [],
|
||||
"triggerCount": 0,
|
||||
"authType": "api-key",
|
||||
"category": "tools",
|
||||
"integrationTypes": ["search", "developer-tools"],
|
||||
"tags": ["web-scraping", "automation"]
|
||||
},
|
||||
{
|
||||
"type": "browser_use",
|
||||
"slug": "browser-use",
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import { getNavBlogPosts } from '@/lib/blog/registry'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import Footer from '@/app/(landing)/components/footer/footer'
|
||||
import Navbar from '@/app/(landing)/components/navbar/navbar'
|
||||
|
||||
export default async function IntegrationsLayout({ children }: { children: React.ReactNode }) {
|
||||
const blogPosts = await getNavBlogPosts()
|
||||
const url = getBaseUrl()
|
||||
const url = SITE_URL
|
||||
const orgJsonLd = {
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'Organization',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { Metadata } from 'next'
|
||||
import { Badge } from '@/components/emcn'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { IntegrationCard } from './components/integration-card'
|
||||
import { IntegrationGrid } from './components/integration-grid'
|
||||
import { RequestIntegrationModal } from './components/request-integration-modal'
|
||||
@@ -18,7 +18,7 @@ const INTEGRATION_COUNT = allIntegrations.length
|
||||
*/
|
||||
const TOP_NAMES = [...new Set(POPULAR_WORKFLOWS.flatMap((p) => [p.from, p.to]))].slice(0, 6)
|
||||
|
||||
const baseUrl = getBaseUrl()
|
||||
const baseUrl = SITE_URL
|
||||
|
||||
/** Curated featured integrations — high-recognition services shown as cards. */
|
||||
const FEATURED_SLUGS = ['slack', 'notion', 'github', 'gmail'] as const
|
||||
|
||||
@@ -1,9 +1,10 @@
|
||||
import type { Metadata } from 'next'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { martianMono } from '@/app/_styles/fonts/martian-mono/martian-mono'
|
||||
import { season } from '@/app/_styles/fonts/season/season'
|
||||
|
||||
export const metadata: Metadata = {
|
||||
metadataBase: new URL('https://sim.ai'),
|
||||
metadataBase: new URL(SITE_URL),
|
||||
manifest: '/manifest.webmanifest',
|
||||
icons: {
|
||||
icon: [{ url: '/icon.svg', type: 'image/svg+xml', sizes: 'any' }],
|
||||
|
||||
@@ -1,7 +1,7 @@
|
||||
import type { Metadata } from 'next'
|
||||
import Link from 'next/link'
|
||||
import { notFound } from 'next/navigation'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { LandingFAQ } from '@/app/(landing)/components/landing-faq'
|
||||
import { FeaturedModelCard, ProviderIcon } from '@/app/(landing)/models/components/model-primitives'
|
||||
import {
|
||||
@@ -18,7 +18,9 @@ import {
|
||||
getRelatedModels,
|
||||
} from '@/app/(landing)/models/utils'
|
||||
|
||||
const baseUrl = getBaseUrl()
|
||||
const baseUrl = SITE_URL
|
||||
|
||||
export const dynamicParams = false
|
||||
|
||||
export async function generateStaticParams() {
|
||||
return ALL_CATALOG_MODELS.map((model) => ({
|
||||
@@ -221,7 +223,7 @@ export default async function ModelPage({
|
||||
|
||||
<div className='flex flex-wrap gap-2'>
|
||||
<a
|
||||
href='https://sim.ai'
|
||||
href='/'
|
||||
className='inline-flex h-[32px] items-center gap-2 rounded-[5px] border border-white bg-white px-2.5 font-season text-black text-sm transition-colors hover:border-[#E0E0E0] hover:bg-[#E0E0E0]'
|
||||
>
|
||||
Build with this model
|
||||
|
||||
@@ -2,7 +2,7 @@ import type { Metadata } from 'next'
|
||||
import Link from 'next/link'
|
||||
import { notFound } from 'next/navigation'
|
||||
import { Badge } from '@/components/emcn'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { LandingFAQ } from '@/app/(landing)/components/landing-faq'
|
||||
import {
|
||||
ChevronArrow,
|
||||
@@ -20,7 +20,9 @@ import {
|
||||
TOP_MODEL_PROVIDERS,
|
||||
} from '@/app/(landing)/models/utils'
|
||||
|
||||
const baseUrl = getBaseUrl()
|
||||
const baseUrl = SITE_URL
|
||||
|
||||
export const dynamicParams = false
|
||||
|
||||
export async function generateStaticParams() {
|
||||
return MODEL_PROVIDERS_WITH_CATALOGS.map((provider) => ({
|
||||
|
||||
@@ -1,11 +1,11 @@
|
||||
import { getNavBlogPosts } from '@/lib/blog/registry'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import Footer from '@/app/(landing)/components/footer/footer'
|
||||
import Navbar from '@/app/(landing)/components/navbar/navbar'
|
||||
|
||||
export default async function ModelsLayout({ children }: { children: React.ReactNode }) {
|
||||
const blogPosts = await getNavBlogPosts()
|
||||
const url = getBaseUrl()
|
||||
const url = SITE_URL
|
||||
const orgJsonLd = {
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'Organization',
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import type { Metadata } from 'next'
|
||||
import { Badge } from '@/components/emcn'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { LandingFAQ } from '@/app/(landing)/components/landing-faq'
|
||||
import { ModelComparisonCharts } from '@/app/(landing)/models/components/model-comparison-charts'
|
||||
import { ModelDirectory } from '@/app/(landing)/models/components/model-directory'
|
||||
@@ -17,7 +17,7 @@ import {
|
||||
TOTAL_MODELS,
|
||||
} from '@/app/(landing)/models/utils'
|
||||
|
||||
const baseUrl = getBaseUrl()
|
||||
const baseUrl = SITE_URL
|
||||
|
||||
const faqItems = [
|
||||
{
|
||||
|
||||
@@ -1,5 +1,6 @@
|
||||
import type { Metadata } from 'next'
|
||||
import { getNavBlogPosts } from '@/lib/blog/registry'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import { martianMono } from '@/app/_styles/fonts/martian-mono/martian-mono'
|
||||
import { season } from '@/app/_styles/fonts/season/season'
|
||||
import Footer from '@/app/(landing)/components/footer/footer'
|
||||
@@ -9,7 +10,7 @@ export const metadata: Metadata = {
|
||||
title: 'Partner Program',
|
||||
description:
|
||||
"Join the Sim partner program. Build, deploy, and sell AI agent solutions powered by Sim's AI workspace. Earn your certification through Sim Academy.",
|
||||
metadataBase: new URL('https://sim.ai'),
|
||||
metadataBase: new URL(SITE_URL),
|
||||
openGraph: {
|
||||
title: 'Partner Program | Sim',
|
||||
description: 'Join the Sim partner program.',
|
||||
|
||||
127
apps/sim/app/(landing)/seo.test.ts
Normal file
127
apps/sim/app/(landing)/seo.test.ts
Normal file
@@ -0,0 +1,127 @@
|
||||
/**
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import fs from 'fs'
|
||||
import path from 'path'
|
||||
import { describe, expect, it } from 'vitest'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
const SIM_ROOT = path.resolve(__dirname, '..', '..')
|
||||
const APP_DIR = path.resolve(SIM_ROOT, 'app')
|
||||
const LANDING_DIR = path.resolve(APP_DIR, '(landing)')
|
||||
|
||||
/**
|
||||
* All directories containing public-facing pages or SEO-relevant code.
|
||||
* Non-marketing app routes (workspace, chat, form) are excluded —
|
||||
* they legitimately use getBaseUrl() for dynamic, env-dependent URLs.
|
||||
*/
|
||||
const SEO_SCAN_DIRS = [
|
||||
LANDING_DIR,
|
||||
path.resolve(APP_DIR, 'changelog'),
|
||||
path.resolve(APP_DIR, 'changelog.xml'),
|
||||
path.resolve(APP_DIR, 'academy'),
|
||||
path.resolve(SIM_ROOT, 'lib', 'blog'),
|
||||
path.resolve(SIM_ROOT, 'content', 'blog'),
|
||||
]
|
||||
|
||||
const SEO_SCAN_INDIVIDUAL_FILES = [
|
||||
path.resolve(APP_DIR, 'page.tsx'),
|
||||
path.resolve(SIM_ROOT, 'ee', 'whitelabeling', 'metadata.ts'),
|
||||
]
|
||||
|
||||
function collectFiles(dir: string, exts: string[]): string[] {
|
||||
const results: string[] = []
|
||||
if (!fs.existsSync(dir)) return results
|
||||
|
||||
for (const entry of fs.readdirSync(dir, { withFileTypes: true })) {
|
||||
const full = path.join(dir, entry.name)
|
||||
if (entry.isDirectory()) {
|
||||
results.push(...collectFiles(full, exts))
|
||||
} else if (exts.some((ext) => entry.name.endsWith(ext)) && !entry.name.includes('.test.')) {
|
||||
results.push(full)
|
||||
}
|
||||
}
|
||||
return results
|
||||
}
|
||||
|
||||
function getAllSeoFiles(exts: string[]): string[] {
|
||||
const files: string[] = []
|
||||
for (const dir of SEO_SCAN_DIRS) {
|
||||
files.push(...collectFiles(dir, exts))
|
||||
}
|
||||
for (const file of SEO_SCAN_INDIVIDUAL_FILES) {
|
||||
if (fs.existsSync(file)) files.push(file)
|
||||
}
|
||||
return files
|
||||
}
|
||||
|
||||
describe('SEO canonical URLs', () => {
|
||||
it('SITE_URL equals https://www.sim.ai', () => {
|
||||
expect(SITE_URL).toBe('https://www.sim.ai')
|
||||
})
|
||||
|
||||
it('public pages do not hardcode https://sim.ai (without www)', () => {
|
||||
const files = getAllSeoFiles(['.ts', '.tsx', '.mdx'])
|
||||
const violations: string[] = []
|
||||
|
||||
for (const file of files) {
|
||||
const content = fs.readFileSync(file, 'utf-8')
|
||||
const lines = content.split('\n')
|
||||
|
||||
for (let i = 0; i < lines.length; i++) {
|
||||
const line = lines[i]
|
||||
const hasBareSimAi =
|
||||
line.includes("'https://sim.ai'") ||
|
||||
line.includes("'https://sim.ai/") ||
|
||||
line.includes('"https://sim.ai"') ||
|
||||
line.includes('"https://sim.ai/') ||
|
||||
line.includes('`https://sim.ai/') ||
|
||||
line.includes('`https://sim.ai`') ||
|
||||
line.includes('canonical: https://sim.ai/')
|
||||
|
||||
if (!hasBareSimAi) continue
|
||||
|
||||
const isAllowlisted =
|
||||
line.includes('https://sim.ai/careers') || line.includes('https://sim.ai/discord')
|
||||
|
||||
if (isAllowlisted) continue
|
||||
|
||||
const rel = path.relative(SIM_ROOT, file)
|
||||
violations.push(`${rel}:${i + 1}: ${line.trim()}`)
|
||||
}
|
||||
}
|
||||
|
||||
expect(
|
||||
violations,
|
||||
`Found hardcoded https://sim.ai (without www):\n${violations.join('\n')}`
|
||||
).toHaveLength(0)
|
||||
})
|
||||
|
||||
it('public pages do not use getBaseUrl() for SEO metadata', () => {
|
||||
const files = getAllSeoFiles(['.ts', '.tsx'])
|
||||
const violations: string[] = []
|
||||
|
||||
for (const file of files) {
|
||||
const content = fs.readFileSync(file, 'utf-8')
|
||||
|
||||
if (!content.includes('getBaseUrl')) continue
|
||||
|
||||
const hasMetadataExport =
|
||||
content.includes('export const metadata') ||
|
||||
content.includes('export async function generateMetadata')
|
||||
const usesGetBaseUrlInMetadata =
|
||||
hasMetadataExport &&
|
||||
(content.includes('= getBaseUrl()') || content.includes('metadataBase: new URL(getBaseUrl'))
|
||||
|
||||
if (usesGetBaseUrlInMetadata) {
|
||||
const rel = path.relative(SIM_ROOT, file)
|
||||
violations.push(rel)
|
||||
}
|
||||
}
|
||||
|
||||
expect(
|
||||
violations,
|
||||
`Public pages should use SITE_URL for metadata, not getBaseUrl():\n${violations.join('\n')}`
|
||||
).toHaveLength(0)
|
||||
})
|
||||
})
|
||||
@@ -35,6 +35,7 @@ export function PostHogProvider({ children }: { children: React.ReactNode }) {
|
||||
capture_performance: false,
|
||||
capture_dead_clicks: false,
|
||||
enable_heatmaps: false,
|
||||
disable_session_recording: true,
|
||||
session_recording: {
|
||||
maskAllInputs: false,
|
||||
maskInputOptions: {
|
||||
|
||||
@@ -92,6 +92,13 @@ export function SessionProvider({ children }: { children: React.ReactNode }) {
|
||||
email_verified: data.user.emailVerified,
|
||||
created_at: data.user.createdAt,
|
||||
})
|
||||
if (
|
||||
typeof posthog.startSessionRecording === 'function' &&
|
||||
typeof posthog.sessionRecordingStarted === 'function' &&
|
||||
!posthog.sessionRecordingStarted()
|
||||
) {
|
||||
posthog.startSessionRecording()
|
||||
}
|
||||
} else {
|
||||
posthog.reset()
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import type React from 'react'
|
||||
import type { Metadata } from 'next'
|
||||
import { notFound } from 'next/navigation'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
// TODO: Remove notFound() call to make academy pages public once content is ready
|
||||
const ACADEMY_ENABLED = false
|
||||
@@ -12,7 +13,7 @@ export const metadata: Metadata = {
|
||||
},
|
||||
description:
|
||||
'Become a certified Sim partner — learn to build, integrate, and deploy AI workflows.',
|
||||
metadataBase: new URL('https://sim.ai'),
|
||||
metadataBase: new URL(SITE_URL),
|
||||
openGraph: {
|
||||
title: 'Sim Academy',
|
||||
description: 'Become a certified Sim partner.',
|
||||
|
||||
@@ -1,8 +1,10 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { authorizeCredentialUse } from '@/lib/auth/credential-access'
|
||||
import { validatePathSegment } from '@/lib/core/security/input-validation'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { getCredential, refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
import { GRAPH_ID_PATTERN } from '@/tools/microsoft_excel/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
@@ -19,6 +21,7 @@ export async function GET(request: NextRequest) {
|
||||
const { searchParams } = new URL(request.url)
|
||||
const credentialId = searchParams.get('credentialId')
|
||||
const query = searchParams.get('query') || ''
|
||||
const driveId = searchParams.get('driveId') || undefined
|
||||
const workflowId = searchParams.get('workflowId') || undefined
|
||||
|
||||
if (!credentialId) {
|
||||
@@ -72,8 +75,21 @@ export async function GET(request: NextRequest) {
|
||||
)
|
||||
searchParams_new.append('$top', '50')
|
||||
|
||||
// When driveId is provided (SharePoint), search within that specific drive.
|
||||
// Otherwise, search the user's personal OneDrive.
|
||||
if (driveId) {
|
||||
const driveIdValidation = validatePathSegment(driveId, {
|
||||
paramName: 'driveId',
|
||||
customPattern: GRAPH_ID_PATTERN,
|
||||
})
|
||||
if (!driveIdValidation.isValid) {
|
||||
return NextResponse.json({ error: driveIdValidation.error }, { status: 400 })
|
||||
}
|
||||
}
|
||||
const drivePath = driveId ? `drives/${driveId}` : 'me/drive'
|
||||
|
||||
const response = await fetch(
|
||||
`https://graph.microsoft.com/v1.0/me/drive/root/search(q='${encodeURIComponent(searchQuery)}')?${searchParams_new.toString()}`,
|
||||
`https://graph.microsoft.com/v1.0/${drivePath}/root/search(q='${encodeURIComponent(searchQuery)}')?${searchParams_new.toString()}`,
|
||||
{
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
|
||||
@@ -6,6 +6,7 @@ import { recordUsage } from '@/lib/billing/core/usage-log'
|
||||
import { checkAndBillOverageThreshold } from '@/lib/billing/threshold-billing'
|
||||
import { checkInternalApiKey } from '@/lib/copilot/request/http'
|
||||
import { isBillingEnabled } from '@/lib/core/config/feature-flags'
|
||||
import { type AtomicClaimResult, billingIdempotency } from '@/lib/core/idempotency/service'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
|
||||
const logger = createLogger('BillingUpdateCostAPI')
|
||||
@@ -19,6 +20,7 @@ const UpdateCostSchema = z.object({
|
||||
source: z
|
||||
.enum(['copilot', 'workspace-chat', 'mcp_copilot', 'mothership_block'])
|
||||
.default('copilot'),
|
||||
idempotencyKey: z.string().min(1).optional(),
|
||||
})
|
||||
|
||||
/**
|
||||
@@ -28,6 +30,8 @@ const UpdateCostSchema = z.object({
|
||||
export async function POST(req: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
const startTime = Date.now()
|
||||
let claim: AtomicClaimResult | null = null
|
||||
let usageCommitted = false
|
||||
|
||||
try {
|
||||
logger.info(`[${requestId}] Update cost request started`)
|
||||
@@ -75,9 +79,30 @@ export async function POST(req: NextRequest) {
|
||||
)
|
||||
}
|
||||
|
||||
const { userId, cost, model, inputTokens, outputTokens, source } = validation.data
|
||||
const { userId, cost, model, inputTokens, outputTokens, source, idempotencyKey } =
|
||||
validation.data
|
||||
const isMcp = source === 'mcp_copilot'
|
||||
|
||||
claim = idempotencyKey
|
||||
? await billingIdempotency.atomicallyClaim('update-cost', idempotencyKey)
|
||||
: null
|
||||
|
||||
if (claim && !claim.claimed) {
|
||||
logger.warn(`[${requestId}] Duplicate billing update rejected`, {
|
||||
idempotencyKey,
|
||||
userId,
|
||||
source,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: 'Duplicate request: idempotency key already processed',
|
||||
requestId,
|
||||
},
|
||||
{ status: 409 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Processing cost update`, {
|
||||
userId,
|
||||
cost,
|
||||
@@ -113,6 +138,7 @@ export async function POST(req: NextRequest) {
|
||||
],
|
||||
additionalStats,
|
||||
})
|
||||
usageCommitted = true
|
||||
|
||||
logger.info(`[${requestId}] Recorded usage`, {
|
||||
userId,
|
||||
@@ -149,6 +175,22 @@ export async function POST(req: NextRequest) {
|
||||
duration,
|
||||
})
|
||||
|
||||
if (claim?.claimed && !usageCommitted) {
|
||||
await billingIdempotency
|
||||
.release(claim.normalizedKey, claim.storageMethod)
|
||||
.catch((releaseErr) => {
|
||||
logger.warn(`[${requestId}] Failed to release idempotency claim`, {
|
||||
error: releaseErr instanceof Error ? releaseErr.message : String(releaseErr),
|
||||
normalizedKey: claim?.normalizedKey,
|
||||
})
|
||||
})
|
||||
} else if (claim?.claimed && usageCommitted) {
|
||||
logger.warn(
|
||||
`[${requestId}] Error occurred after usage committed; retaining idempotency claim to prevent double-billing`,
|
||||
{ normalizedKey: claim.normalizedKey }
|
||||
)
|
||||
}
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
|
||||
@@ -4,7 +4,9 @@ import { createLogger } from '@sim/logger'
|
||||
import { and, desc, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository'
|
||||
import { buildEffectiveChatTranscript } from '@/lib/copilot/chat/effective-transcript'
|
||||
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
|
||||
import { normalizeMessage } from '@/lib/copilot/chat/persisted-message'
|
||||
import {
|
||||
authenticateCopilotRequestSessionOnly,
|
||||
createBadRequestResponse,
|
||||
@@ -113,11 +115,23 @@ export async function GET(req: NextRequest) {
|
||||
}
|
||||
}
|
||||
|
||||
const normalizedMessages = Array.isArray(chat.messages)
|
||||
? chat.messages
|
||||
.filter((message): message is Record<string, unknown> => Boolean(message))
|
||||
.map(normalizeMessage)
|
||||
: []
|
||||
const effectiveMessages = buildEffectiveChatTranscript({
|
||||
messages: normalizedMessages,
|
||||
activeStreamId: chat.conversationId || null,
|
||||
...(streamSnapshot ? { streamSnapshot } : {}),
|
||||
})
|
||||
|
||||
logger.info(`Retrieved chat ${chatId}`)
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
chat: {
|
||||
...transformChat(chat),
|
||||
messages: effectiveMessages,
|
||||
...(streamSnapshot ? { streamSnapshot } : {}),
|
||||
},
|
||||
})
|
||||
|
||||
@@ -169,24 +169,24 @@ export async function DELETE(req: NextRequest) {
|
||||
const body = await req.json()
|
||||
const { chatId, resourceType, resourceId } = RemoveResourceSchema.parse(body)
|
||||
|
||||
const [chat] = await db
|
||||
.select({ resources: copilotChats.resources })
|
||||
.from(copilotChats)
|
||||
const [updated] = await db
|
||||
.update(copilotChats)
|
||||
.set({
|
||||
resources: sql`COALESCE((
|
||||
SELECT jsonb_agg(elem)
|
||||
FROM jsonb_array_elements(${copilotChats.resources}) elem
|
||||
WHERE NOT (elem->>'type' = ${resourceType} AND elem->>'id' = ${resourceId})
|
||||
), '[]'::jsonb)`,
|
||||
updatedAt: new Date(),
|
||||
})
|
||||
.where(and(eq(copilotChats.id, chatId), eq(copilotChats.userId, userId)))
|
||||
.limit(1)
|
||||
.returning({ resources: copilotChats.resources })
|
||||
|
||||
if (!chat) {
|
||||
if (!updated) {
|
||||
return createNotFoundResponse('Chat not found or unauthorized')
|
||||
}
|
||||
|
||||
const existing = Array.isArray(chat.resources) ? (chat.resources as ChatResource[]) : []
|
||||
const key = `${resourceType}:${resourceId}`
|
||||
const merged = existing.filter((r) => `${r.type}:${r.id}` !== key)
|
||||
|
||||
await db
|
||||
.update(copilotChats)
|
||||
.set({ resources: sql`${JSON.stringify(merged)}::jsonb`, updatedAt: new Date() })
|
||||
.where(eq(copilotChats.id, chatId))
|
||||
const merged = Array.isArray(updated.resources) ? (updated.resources as ChatResource[]) : []
|
||||
|
||||
logger.info('Removed resource from chat', { chatId, resourceType, resourceId })
|
||||
|
||||
|
||||
160
apps/sim/app/api/copilot/chat/stop/route.test.ts
Normal file
160
apps/sim/app/api/copilot/chat/stop/route.test.ts
Normal file
@@ -0,0 +1,160 @@
|
||||
/**
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { NextRequest } from 'next/server'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
const {
|
||||
mockGetSession,
|
||||
mockSelect,
|
||||
mockFrom,
|
||||
mockWhereSelect,
|
||||
mockLimit,
|
||||
mockUpdate,
|
||||
mockSet,
|
||||
mockWhereUpdate,
|
||||
mockReturning,
|
||||
mockPublishStatusChanged,
|
||||
mockSql,
|
||||
} = vi.hoisted(() => ({
|
||||
mockGetSession: vi.fn(),
|
||||
mockSelect: vi.fn(),
|
||||
mockFrom: vi.fn(),
|
||||
mockWhereSelect: vi.fn(),
|
||||
mockLimit: vi.fn(),
|
||||
mockUpdate: vi.fn(),
|
||||
mockSet: vi.fn(),
|
||||
mockWhereUpdate: vi.fn(),
|
||||
mockReturning: vi.fn(),
|
||||
mockPublishStatusChanged: vi.fn(),
|
||||
mockSql: vi.fn((strings: TemplateStringsArray, ...values: unknown[]) => ({ strings, values })),
|
||||
}))
|
||||
|
||||
vi.mock('@/lib/auth', () => ({
|
||||
getSession: mockGetSession,
|
||||
}))
|
||||
|
||||
vi.mock('@sim/db', () => ({
|
||||
db: {
|
||||
select: mockSelect,
|
||||
update: mockUpdate,
|
||||
},
|
||||
}))
|
||||
|
||||
vi.mock('@sim/db/schema', () => ({
|
||||
copilotChats: {
|
||||
id: 'id',
|
||||
userId: 'userId',
|
||||
workspaceId: 'workspaceId',
|
||||
messages: 'messages',
|
||||
conversationId: 'conversationId',
|
||||
},
|
||||
}))
|
||||
|
||||
vi.mock('drizzle-orm', () => ({
|
||||
and: vi.fn((...conditions: unknown[]) => ({ conditions, type: 'and' })),
|
||||
eq: vi.fn((field: unknown, value: unknown) => ({ field, value, type: 'eq' })),
|
||||
sql: mockSql,
|
||||
}))
|
||||
|
||||
vi.mock('@/lib/copilot/tasks', () => ({
|
||||
taskPubSub: {
|
||||
publishStatusChanged: mockPublishStatusChanged,
|
||||
},
|
||||
}))
|
||||
|
||||
import { POST } from '@/app/api/copilot/chat/stop/route'
|
||||
|
||||
function createRequest(body: Record<string, unknown>) {
|
||||
return new NextRequest('http://localhost:3000/api/copilot/chat/stop', {
|
||||
method: 'POST',
|
||||
body: JSON.stringify(body),
|
||||
headers: { 'Content-Type': 'application/json' },
|
||||
})
|
||||
}
|
||||
|
||||
describe('copilot chat stop route', () => {
|
||||
beforeEach(() => {
|
||||
vi.clearAllMocks()
|
||||
|
||||
mockGetSession.mockResolvedValue({ user: { id: 'user-1' } })
|
||||
|
||||
mockLimit.mockResolvedValue([
|
||||
{
|
||||
workspaceId: 'ws-1',
|
||||
messages: [{ id: 'stream-1', role: 'user', content: 'hello' }],
|
||||
},
|
||||
])
|
||||
mockWhereSelect.mockReturnValue({ limit: mockLimit })
|
||||
mockFrom.mockReturnValue({ where: mockWhereSelect })
|
||||
mockSelect.mockReturnValue({ from: mockFrom })
|
||||
|
||||
mockReturning.mockResolvedValue([{ workspaceId: 'ws-1' }])
|
||||
mockWhereUpdate.mockReturnValue({ returning: mockReturning })
|
||||
mockSet.mockReturnValue({ where: mockWhereUpdate })
|
||||
mockUpdate.mockReturnValue({ set: mockSet })
|
||||
})
|
||||
|
||||
it('returns 401 when unauthenticated', async () => {
|
||||
mockGetSession.mockResolvedValueOnce(null)
|
||||
|
||||
const response = await POST(
|
||||
createRequest({
|
||||
chatId: 'chat-1',
|
||||
streamId: 'stream-1',
|
||||
content: '',
|
||||
})
|
||||
)
|
||||
|
||||
expect(response.status).toBe(401)
|
||||
expect(await response.json()).toEqual({ error: 'Unauthorized' })
|
||||
})
|
||||
|
||||
it('is a no-op when the chat is missing', async () => {
|
||||
mockLimit.mockResolvedValueOnce([])
|
||||
|
||||
const response = await POST(
|
||||
createRequest({
|
||||
chatId: 'missing-chat',
|
||||
streamId: 'stream-1',
|
||||
content: '',
|
||||
})
|
||||
)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(await response.json()).toEqual({ success: true })
|
||||
expect(mockUpdate).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('appends a stopped assistant message even with no content', async () => {
|
||||
const response = await POST(
|
||||
createRequest({
|
||||
chatId: 'chat-1',
|
||||
streamId: 'stream-1',
|
||||
content: '',
|
||||
})
|
||||
)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(await response.json()).toEqual({ success: true })
|
||||
|
||||
const setArg = mockSet.mock.calls[0]?.[0]
|
||||
expect(setArg).toBeTruthy()
|
||||
expect(setArg.conversationId).toBeNull()
|
||||
expect(setArg.messages).toBeTruthy()
|
||||
|
||||
const appendedPayload = JSON.parse(setArg.messages.values[1] as string)
|
||||
expect(appendedPayload).toHaveLength(1)
|
||||
expect(appendedPayload[0]).toMatchObject({
|
||||
role: 'assistant',
|
||||
content: '',
|
||||
contentBlocks: [{ type: 'complete', status: 'cancelled' }],
|
||||
})
|
||||
|
||||
expect(mockPublishStatusChanged).toHaveBeenCalledWith({
|
||||
workspaceId: 'ws-1',
|
||||
chatId: 'chat-1',
|
||||
type: 'completed',
|
||||
})
|
||||
})
|
||||
})
|
||||
@@ -7,6 +7,7 @@ import { z } from 'zod'
|
||||
import { getSession } from '@/lib/auth'
|
||||
import { normalizeMessage, type PersistedMessage } from '@/lib/copilot/chat/persisted-message'
|
||||
import { taskPubSub } from '@/lib/copilot/tasks'
|
||||
import { generateId } from '@/lib/core/utils/uuid'
|
||||
|
||||
const logger = createLogger('CopilotChatStopAPI')
|
||||
|
||||
@@ -70,7 +71,6 @@ export async function POST(req: NextRequest) {
|
||||
}
|
||||
|
||||
const { chatId, streamId, content, contentBlocks } = StopSchema.parse(await req.json())
|
||||
|
||||
const [row] = await db
|
||||
.select({
|
||||
workspaceId: copilotChats.workspaceId,
|
||||
@@ -106,14 +106,18 @@ export async function POST(req: NextRequest) {
|
||||
|
||||
const hasContent = content.trim().length > 0
|
||||
const hasBlocks = Array.isArray(contentBlocks) && contentBlocks.length > 0
|
||||
|
||||
if ((hasContent || hasBlocks) && canAppendAssistant) {
|
||||
const synthesizedStoppedBlocks = hasBlocks
|
||||
? contentBlocks
|
||||
: hasContent
|
||||
? [{ type: 'text', channel: 'assistant', content }, { type: 'stopped' }]
|
||||
: [{ type: 'stopped' }]
|
||||
if (canAppendAssistant) {
|
||||
const normalized = normalizeMessage({
|
||||
id: crypto.randomUUID(),
|
||||
id: generateId(),
|
||||
role: 'assistant',
|
||||
content,
|
||||
timestamp: new Date().toISOString(),
|
||||
...(hasBlocks ? { contentBlocks } : {}),
|
||||
contentBlocks: synthesizedStoppedBlocks,
|
||||
})
|
||||
const assistantMessage: PersistedMessage = normalized
|
||||
setClause.messages = sql`${copilotChats.messages} || ${JSON.stringify([assistantMessage])}::jsonb`
|
||||
|
||||
@@ -17,6 +17,7 @@ import { eq, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { validateOAuthAccessToken } from '@/lib/auth/oauth-token'
|
||||
import { getHighestPrioritySubscription } from '@/lib/billing/core/subscription'
|
||||
import { generateWorkspaceContext } from '@/lib/copilot/chat/workspace-context'
|
||||
import { ORCHESTRATION_TIMEOUT_MS, SIM_AGENT_API_URL } from '@/lib/copilot/constants'
|
||||
import { runHeadlessCopilotLifecycle } from '@/lib/copilot/request/lifecycle/headless'
|
||||
import { orchestrateSubagentStream } from '@/lib/copilot/request/subagent'
|
||||
@@ -136,14 +137,14 @@ When the user refers to a workflow by name or description ("the email one", "my
|
||||
### Organization
|
||||
|
||||
- \`rename_workflow\` — rename a workflow
|
||||
- \`move_workflow\` — move a workflow into a folder (or root with null)
|
||||
- \`move_folder\` — nest a folder inside another (or root with null)
|
||||
- \`move_workflow\` — move a workflow into a folder (or back to root by clearing the folder id)
|
||||
- \`move_folder\` — nest a folder inside another (or move it back to root by clearing the parent id)
|
||||
- \`create_folder(name, parentId)\` — create nested folder hierarchies
|
||||
|
||||
### Key Rules
|
||||
|
||||
- You can test workflows immediately after building — deployment is only needed for external access (API, chat, MCP).
|
||||
- All workflow-scoped copilot tools require \`workflowId\`.
|
||||
- Tools that operate on a specific workflow such as \`sim_workflow\`, \`sim_test\`, \`sim_deploy\`, and workflow-scoped \`sim_info\` requests require \`workflowId\`.
|
||||
- If the user reports errors, route through \`sim_workflow\` and ask it to reproduce, inspect logs, and fix the issue end to end.
|
||||
- Variable syntax: \`<blockname.field>\` for block outputs, \`{{ENV_VAR}}\` for env vars.
|
||||
`
|
||||
@@ -667,10 +668,10 @@ async function handleDirectToolCall(
|
||||
}
|
||||
|
||||
/**
|
||||
* Build mode uses the main chat orchestrator with the 'fast' command instead of
|
||||
* the subagent endpoint. In Go, 'workflow' is not a registered subagent — it's a mode
|
||||
* (ModeFast) on the main chat processor that bypasses subagent orchestration and
|
||||
* executes all tools directly.
|
||||
* Build mode uses the main /api/mcp orchestrator instead of /api/subagent/workflow.
|
||||
* The main agent still delegates workflow work to the workflow subagent inside Go;
|
||||
* this helper simply uses the full headless lifecycle so build requests behave like
|
||||
* the primary MCP chat flow.
|
||||
*/
|
||||
async function handleBuildToolCall(
|
||||
args: Record<string, unknown>,
|
||||
@@ -680,6 +681,8 @@ async function handleBuildToolCall(
|
||||
try {
|
||||
const requestText = (args.request as string) || JSON.stringify(args)
|
||||
const workflowId = args.workflowId as string | undefined
|
||||
let resolvedWorkflowName: string | undefined
|
||||
let resolvedWorkspaceId: string | undefined
|
||||
|
||||
const resolved = workflowId
|
||||
? await (async () => {
|
||||
@@ -688,11 +691,22 @@ async function handleBuildToolCall(
|
||||
userId,
|
||||
action: 'read',
|
||||
})
|
||||
return authorization.allowed ? { workflowId } : null
|
||||
resolvedWorkflowName = authorization.workflow?.name || undefined
|
||||
resolvedWorkspaceId = authorization.workflow?.workspaceId || undefined
|
||||
return authorization.allowed
|
||||
? { status: 'resolved' as const, workflowId, workflowName: resolvedWorkflowName }
|
||||
: {
|
||||
status: 'not_found' as const,
|
||||
message: 'workflowId is required for build. Call create_workflow first.',
|
||||
}
|
||||
})()
|
||||
: await resolveWorkflowIdForUser(userId)
|
||||
|
||||
if (!resolved?.workflowId) {
|
||||
if (resolved.status === 'resolved') {
|
||||
resolvedWorkflowName ||= resolved.workflowName
|
||||
}
|
||||
|
||||
if (!resolved || resolved.status !== 'resolved') {
|
||||
return {
|
||||
content: [
|
||||
{
|
||||
@@ -700,7 +714,9 @@ async function handleBuildToolCall(
|
||||
text: JSON.stringify(
|
||||
{
|
||||
success: false,
|
||||
error: 'workflowId is required for build. Call create_workflow first.',
|
||||
error:
|
||||
resolved?.message ??
|
||||
'workflowId is required for build. Call create_workflow first.',
|
||||
},
|
||||
null,
|
||||
2
|
||||
@@ -712,10 +728,29 @@ async function handleBuildToolCall(
|
||||
}
|
||||
|
||||
const chatId = generateId()
|
||||
const executionContext = await prepareExecutionContext(userId, resolved.workflowId, chatId, {
|
||||
workspaceId: resolvedWorkspaceId,
|
||||
})
|
||||
resolvedWorkspaceId = executionContext.workspaceId
|
||||
let workspaceContext: string | undefined
|
||||
if (resolvedWorkspaceId) {
|
||||
try {
|
||||
workspaceContext = await generateWorkspaceContext(resolvedWorkspaceId, userId)
|
||||
} catch (error) {
|
||||
logger.warn('Failed to generate workspace context for build tool call', {
|
||||
workflowId: resolved.workflowId,
|
||||
workspaceId: resolvedWorkspaceId,
|
||||
error: error instanceof Error ? error.message : String(error),
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const requestPayload = {
|
||||
message: requestText,
|
||||
workflowId: resolved.workflowId,
|
||||
...(resolvedWorkflowName ? { workflowName: resolvedWorkflowName } : {}),
|
||||
...(resolvedWorkspaceId ? { workspaceId: resolvedWorkspaceId } : {}),
|
||||
...(workspaceContext ? { workspaceContext } : {}),
|
||||
userId,
|
||||
model: DEFAULT_COPILOT_MODEL,
|
||||
mode: 'agent',
|
||||
@@ -727,8 +762,10 @@ async function handleBuildToolCall(
|
||||
const result = await runHeadlessCopilotLifecycle(requestPayload, {
|
||||
userId,
|
||||
workflowId: resolved.workflowId,
|
||||
workspaceId: resolvedWorkspaceId,
|
||||
chatId,
|
||||
goRoute: '/api/mcp',
|
||||
executionContext,
|
||||
autoExecuteTools: true,
|
||||
timeout: ORCHESTRATION_TIMEOUT_MS,
|
||||
interactive: false,
|
||||
|
||||
@@ -5,7 +5,9 @@ import { and, eq, sql } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { getLatestRunForStream } from '@/lib/copilot/async-runs/repository'
|
||||
import { buildEffectiveChatTranscript } from '@/lib/copilot/chat/effective-transcript'
|
||||
import { getAccessibleCopilotChat } from '@/lib/copilot/chat/lifecycle'
|
||||
import { normalizeMessage } from '@/lib/copilot/chat/persisted-message'
|
||||
import {
|
||||
authenticateCopilotRequestSessionOnly,
|
||||
createBadRequestResponse,
|
||||
@@ -93,12 +95,23 @@ export async function GET(
|
||||
}
|
||||
}
|
||||
|
||||
const normalizedMessages = Array.isArray(chat.messages)
|
||||
? chat.messages
|
||||
.filter((message): message is Record<string, unknown> => Boolean(message))
|
||||
.map(normalizeMessage)
|
||||
: []
|
||||
const effectiveMessages = buildEffectiveChatTranscript({
|
||||
messages: normalizedMessages,
|
||||
activeStreamId: chat.conversationId || null,
|
||||
...(streamSnapshot ? { streamSnapshot } : {}),
|
||||
})
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
chat: {
|
||||
id: chat.id,
|
||||
title: chat.title,
|
||||
messages: Array.isArray(chat.messages) ? chat.messages : [],
|
||||
messages: effectiveMessages,
|
||||
conversationId: chat.conversationId || null,
|
||||
resources: Array.isArray(chat.resources) ? chat.resources : [],
|
||||
createdAt: chat.createdAt,
|
||||
|
||||
@@ -13,6 +13,7 @@ import {
|
||||
ALL_REVISION_FIELDS,
|
||||
DEFAULT_EXPORT_FORMATS,
|
||||
GOOGLE_WORKSPACE_MIME_TYPES,
|
||||
VALID_EXPORT_FORMATS,
|
||||
} from '@/tools/google_drive/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
@@ -65,10 +66,12 @@ export async function POST(request: NextRequest) {
|
||||
const {
|
||||
accessToken,
|
||||
fileId,
|
||||
mimeType: exportMimeType,
|
||||
mimeType: rawExportMimeType,
|
||||
fileName,
|
||||
includeRevisions,
|
||||
} = validatedData
|
||||
const exportMimeType =
|
||||
rawExportMimeType && rawExportMimeType !== 'auto' ? rawExportMimeType : null
|
||||
const authHeader = `Bearer ${accessToken}`
|
||||
|
||||
logger.info(`[${requestId}] Getting file metadata from Google Drive`, { fileId })
|
||||
@@ -112,6 +115,24 @@ export async function POST(request: NextRequest) {
|
||||
|
||||
if (GOOGLE_WORKSPACE_MIME_TYPES.includes(fileMimeType)) {
|
||||
const exportFormat = exportMimeType || DEFAULT_EXPORT_FORMATS[fileMimeType] || 'text/plain'
|
||||
|
||||
const validFormats = VALID_EXPORT_FORMATS[fileMimeType]
|
||||
if (validFormats && !validFormats.includes(exportFormat)) {
|
||||
logger.warn(`[${requestId}] Unsupported export format requested`, {
|
||||
fileId,
|
||||
fileMimeType,
|
||||
requestedFormat: exportFormat,
|
||||
validFormats,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: `Export format "${exportFormat}" is not supported for this file type. Supported formats: ${validFormats.join(', ')}`,
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
finalMimeType = exportFormat
|
||||
|
||||
logger.info(`[${requestId}] Exporting Google Workspace file`, {
|
||||
|
||||
@@ -3,7 +3,7 @@ import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { z } from 'zod'
|
||||
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
|
||||
import { validateJiraCloudId, validateJiraIssueKey } from '@/lib/core/security/input-validation'
|
||||
import { getJiraCloudId, parseAtlassianErrorMessage } from '@/tools/jira/utils'
|
||||
import { getJiraCloudId, parseAtlassianErrorMessage, toAdf } from '@/tools/jira/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
@@ -15,14 +15,14 @@ const jiraUpdateSchema = z.object({
|
||||
issueKey: z.string().min(1, 'Issue key is required'),
|
||||
summary: z.string().optional(),
|
||||
title: z.string().optional(),
|
||||
description: z.string().optional(),
|
||||
description: z.union([z.string(), z.record(z.unknown())]).optional(),
|
||||
priority: z.string().optional(),
|
||||
assignee: z.string().optional(),
|
||||
labels: z.array(z.string()).optional(),
|
||||
components: z.array(z.string()).optional(),
|
||||
duedate: z.string().optional(),
|
||||
fixVersions: z.array(z.string()).optional(),
|
||||
environment: z.string().optional(),
|
||||
environment: z.union([z.string(), z.record(z.unknown())]).optional(),
|
||||
customFieldId: z.string().optional(),
|
||||
customFieldValue: z.string().optional(),
|
||||
notifyUsers: z.boolean().optional(),
|
||||
@@ -91,21 +91,7 @@ export async function PUT(request: NextRequest) {
|
||||
}
|
||||
|
||||
if (description !== undefined && description !== null && description !== '') {
|
||||
fields.description = {
|
||||
type: 'doc',
|
||||
version: 1,
|
||||
content: [
|
||||
{
|
||||
type: 'paragraph',
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: description,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
fields.description = toAdf(description)
|
||||
}
|
||||
|
||||
if (priority !== undefined && priority !== null && priority !== '') {
|
||||
@@ -136,21 +122,7 @@ export async function PUT(request: NextRequest) {
|
||||
}
|
||||
|
||||
if (environment !== undefined && environment !== null && environment !== '') {
|
||||
fields.environment = {
|
||||
type: 'doc',
|
||||
version: 1,
|
||||
content: [
|
||||
{
|
||||
type: 'paragraph',
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: environment,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
fields.environment = toAdf(environment)
|
||||
}
|
||||
|
||||
if (
|
||||
|
||||
@@ -2,7 +2,7 @@ import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { checkSessionOrInternalAuth } from '@/lib/auth/hybrid'
|
||||
import { validateAlphanumericId, validateJiraCloudId } from '@/lib/core/security/input-validation'
|
||||
import { getJiraCloudId, parseAtlassianErrorMessage } from '@/tools/jira/utils'
|
||||
import { getJiraCloudId, parseAtlassianErrorMessage, toAdf } from '@/tools/jira/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
@@ -85,21 +85,7 @@ export async function POST(request: NextRequest) {
|
||||
}
|
||||
|
||||
if (description !== undefined && description !== null && description !== '') {
|
||||
fields.description = {
|
||||
type: 'doc',
|
||||
version: 1,
|
||||
content: [
|
||||
{
|
||||
type: 'paragraph',
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: description,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
fields.description = toAdf(description)
|
||||
}
|
||||
|
||||
if (parent !== undefined && parent !== null && parent !== '') {
|
||||
@@ -144,21 +130,7 @@ export async function POST(request: NextRequest) {
|
||||
}
|
||||
|
||||
if (environment !== undefined && environment !== null && environment !== '') {
|
||||
fields.environment = {
|
||||
type: 'doc',
|
||||
version: 1,
|
||||
content: [
|
||||
{
|
||||
type: 'paragraph',
|
||||
content: [
|
||||
{
|
||||
type: 'text',
|
||||
text: environment,
|
||||
},
|
||||
],
|
||||
},
|
||||
],
|
||||
}
|
||||
fields.environment = toAdf(environment)
|
||||
}
|
||||
|
||||
if (
|
||||
|
||||
135
apps/sim/app/api/tools/microsoft_excel/drives/route.ts
Normal file
135
apps/sim/app/api/tools/microsoft_excel/drives/route.ts
Normal file
@@ -0,0 +1,135 @@
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { authorizeCredentialUse } from '@/lib/auth/credential-access'
|
||||
import { validatePathSegment, validateSharePointSiteId } from '@/lib/core/security/input-validation'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
import { GRAPH_ID_PATTERN } from '@/tools/microsoft_excel/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
const logger = createLogger('MicrosoftExcelDrivesAPI')
|
||||
|
||||
interface GraphDrive {
|
||||
id: string
|
||||
name: string
|
||||
driveType: string
|
||||
webUrl?: string
|
||||
}
|
||||
|
||||
/**
|
||||
* List document libraries (drives) for a SharePoint site.
|
||||
* Used by the microsoft.excel.drives selector to let users pick
|
||||
* which drive contains their Excel file.
|
||||
*/
|
||||
export async function POST(request: NextRequest) {
|
||||
const requestId = generateRequestId()
|
||||
|
||||
try {
|
||||
const body = await request.json()
|
||||
const { credential, workflowId, siteId, driveId } = body
|
||||
|
||||
if (!credential) {
|
||||
logger.warn(`[${requestId}] Missing credential in request`)
|
||||
return NextResponse.json({ error: 'Credential is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
if (!siteId) {
|
||||
logger.warn(`[${requestId}] Missing siteId in request`)
|
||||
return NextResponse.json({ error: 'Site ID is required' }, { status: 400 })
|
||||
}
|
||||
|
||||
const siteIdValidation = validateSharePointSiteId(siteId, 'siteId')
|
||||
if (!siteIdValidation.isValid) {
|
||||
logger.warn(`[${requestId}] Invalid siteId format`)
|
||||
return NextResponse.json({ error: siteIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const authz = await authorizeCredentialUse(request, {
|
||||
credentialId: credential,
|
||||
workflowId,
|
||||
})
|
||||
if (!authz.ok || !authz.credentialOwnerUserId) {
|
||||
return NextResponse.json({ error: authz.error || 'Unauthorized' }, { status: 403 })
|
||||
}
|
||||
|
||||
const accessToken = await refreshAccessTokenIfNeeded(
|
||||
credential,
|
||||
authz.credentialOwnerUserId,
|
||||
requestId
|
||||
)
|
||||
if (!accessToken) {
|
||||
logger.warn(`[${requestId}] Failed to obtain valid access token`)
|
||||
return NextResponse.json(
|
||||
{ error: 'Failed to obtain valid access token', authRequired: true },
|
||||
{ status: 401 }
|
||||
)
|
||||
}
|
||||
|
||||
// Single-drive lookup when driveId is provided (used by fetchById)
|
||||
if (driveId) {
|
||||
const driveIdValidation = validatePathSegment(driveId, {
|
||||
paramName: 'driveId',
|
||||
customPattern: GRAPH_ID_PATTERN,
|
||||
})
|
||||
if (!driveIdValidation.isValid) {
|
||||
return NextResponse.json({ error: driveIdValidation.error }, { status: 400 })
|
||||
}
|
||||
|
||||
const url = `https://graph.microsoft.com/v1.0/sites/${siteId}/drives/${driveId}?$select=id,name,driveType,webUrl`
|
||||
const response = await fetch(url, {
|
||||
headers: { Authorization: `Bearer ${accessToken}` },
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response
|
||||
.json()
|
||||
.catch(() => ({ error: { message: 'Unknown error' } }))
|
||||
return NextResponse.json(
|
||||
{ error: errorData.error?.message || 'Failed to fetch drive' },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data: GraphDrive = await response.json()
|
||||
return NextResponse.json(
|
||||
{ drive: { id: data.id, name: data.name, driveType: data.driveType } },
|
||||
{ status: 200 }
|
||||
)
|
||||
}
|
||||
|
||||
// List all drives for the site
|
||||
const url = `https://graph.microsoft.com/v1.0/sites/${siteId}/drives?$select=id,name,driveType,webUrl`
|
||||
|
||||
const response = await fetch(url, {
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
},
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorData = await response.json().catch(() => ({ error: { message: 'Unknown error' } }))
|
||||
logger.error(`[${requestId}] Microsoft Graph API error fetching drives`, {
|
||||
status: response.status,
|
||||
error: errorData.error?.message,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{ error: errorData.error?.message || 'Failed to fetch drives' },
|
||||
{ status: response.status }
|
||||
)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
const drives = (data.value || []).map((drive: GraphDrive) => ({
|
||||
id: drive.id,
|
||||
name: drive.name,
|
||||
driveType: drive.driveType,
|
||||
}))
|
||||
|
||||
logger.info(`[${requestId}] Successfully fetched ${drives.length} drives for site ${siteId}`)
|
||||
return NextResponse.json({ drives }, { status: 200 })
|
||||
} catch (error) {
|
||||
logger.error(`[${requestId}] Error fetching drives`, error)
|
||||
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })
|
||||
}
|
||||
}
|
||||
@@ -3,6 +3,7 @@ import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { authorizeCredentialUse } from '@/lib/auth/credential-access'
|
||||
import { generateRequestId } from '@/lib/core/utils/request'
|
||||
import { refreshAccessTokenIfNeeded } from '@/app/api/auth/oauth/utils'
|
||||
import { getItemBasePath } from '@/tools/microsoft_excel/utils'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
|
||||
@@ -30,6 +31,7 @@ export async function GET(request: NextRequest) {
|
||||
const { searchParams } = new URL(request.url)
|
||||
const credentialId = searchParams.get('credentialId')
|
||||
const spreadsheetId = searchParams.get('spreadsheetId')
|
||||
const driveId = searchParams.get('driveId') || undefined
|
||||
const workflowId = searchParams.get('workflowId') || undefined
|
||||
|
||||
if (!credentialId) {
|
||||
@@ -61,17 +63,23 @@ export async function GET(request: NextRequest) {
|
||||
`[${requestId}] Fetching worksheets from Microsoft Graph API for workbook ${spreadsheetId}`
|
||||
)
|
||||
|
||||
// Fetch worksheets from Microsoft Graph API
|
||||
const worksheetsResponse = await fetch(
|
||||
`https://graph.microsoft.com/v1.0/me/drive/items/${spreadsheetId}/workbook/worksheets`,
|
||||
{
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
}
|
||||
)
|
||||
let basePath: string
|
||||
try {
|
||||
basePath = getItemBasePath(spreadsheetId, driveId)
|
||||
} catch (error) {
|
||||
return NextResponse.json(
|
||||
{ error: error instanceof Error ? error.message : 'Invalid parameters' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
const worksheetsResponse = await fetch(`${basePath}/workbook/worksheets`, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
Authorization: `Bearer ${accessToken}`,
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
})
|
||||
|
||||
if (!worksheetsResponse.ok) {
|
||||
const errorData = await worksheetsResponse
|
||||
|
||||
@@ -29,8 +29,8 @@ const RequestSchema = z.object({
|
||||
*
|
||||
* workflowId is optional - if not provided:
|
||||
* - If workflowName is provided, finds that workflow
|
||||
* - Otherwise uses the user's first workflow as context
|
||||
* - The copilot can still operate on any workflow using list_user_workflows
|
||||
* - If exactly one workflow is available, uses that workflow as context
|
||||
* - Otherwise requires workflowId or workflowName to disambiguate
|
||||
*/
|
||||
export async function POST(req: NextRequest) {
|
||||
let messageId: string | undefined
|
||||
@@ -54,11 +54,11 @@ export async function POST(req: NextRequest) {
|
||||
parsed.workflowName,
|
||||
auth.keyType === 'workspace' ? auth.workspaceId : undefined
|
||||
)
|
||||
if (!resolved) {
|
||||
if (resolved.status !== 'resolved') {
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: 'No workflows found. Create a workflow first or provide a valid workflowId.',
|
||||
error: resolved.message,
|
||||
},
|
||||
{ status: 400 }
|
||||
)
|
||||
|
||||
@@ -2,6 +2,7 @@
|
||||
* @vitest-environment node
|
||||
*/
|
||||
|
||||
import { databaseMock } from '@sim/testing'
|
||||
import { NextRequest } from 'next/server'
|
||||
import { beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
|
||||
@@ -203,4 +204,73 @@ describe('POST /api/workflows/[id]/executions/[executionId]/cancel', () => {
|
||||
|
||||
expect(response.status).toBe(403)
|
||||
})
|
||||
|
||||
it('updates execution log status in DB when durably recorded', async () => {
|
||||
const mockWhere = vi.fn().mockResolvedValue(undefined)
|
||||
const mockSet = vi.fn(() => ({ where: mockWhere }))
|
||||
databaseMock.db.update.mockReturnValueOnce({ set: mockSet })
|
||||
mockMarkExecutionCancelled.mockResolvedValue({
|
||||
durablyRecorded: true,
|
||||
reason: 'recorded',
|
||||
})
|
||||
|
||||
await POST(makeRequest(), makeParams())
|
||||
|
||||
expect(databaseMock.db.update).toHaveBeenCalled()
|
||||
expect(mockSet).toHaveBeenCalledWith({
|
||||
status: 'cancelled',
|
||||
endedAt: expect.any(Date),
|
||||
})
|
||||
})
|
||||
|
||||
it('updates execution log status in DB when locally aborted', async () => {
|
||||
const mockWhere = vi.fn().mockResolvedValue(undefined)
|
||||
const mockSet = vi.fn(() => ({ where: mockWhere }))
|
||||
databaseMock.db.update.mockReturnValueOnce({ set: mockSet })
|
||||
mockMarkExecutionCancelled.mockResolvedValue({
|
||||
durablyRecorded: false,
|
||||
reason: 'redis_unavailable',
|
||||
})
|
||||
mockAbortManualExecution.mockReturnValue(true)
|
||||
|
||||
await POST(makeRequest(), makeParams())
|
||||
|
||||
expect(databaseMock.db.update).toHaveBeenCalled()
|
||||
expect(mockSet).toHaveBeenCalledWith({
|
||||
status: 'cancelled',
|
||||
endedAt: expect.any(Date),
|
||||
})
|
||||
})
|
||||
|
||||
it('does not update execution log status in DB when only paused execution was cancelled', async () => {
|
||||
mockMarkExecutionCancelled.mockResolvedValue({
|
||||
durablyRecorded: false,
|
||||
reason: 'redis_unavailable',
|
||||
})
|
||||
mockCancelPausedExecution.mockResolvedValue(true)
|
||||
|
||||
await POST(makeRequest(), makeParams())
|
||||
|
||||
expect(databaseMock.db.update).not.toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('returns success even if direct DB update fails', async () => {
|
||||
mockMarkExecutionCancelled.mockResolvedValue({
|
||||
durablyRecorded: true,
|
||||
reason: 'recorded',
|
||||
})
|
||||
databaseMock.db.update.mockReturnValueOnce({
|
||||
set: vi.fn(() => ({
|
||||
where: vi.fn(() => {
|
||||
throw new Error('DB connection failed')
|
||||
}),
|
||||
})),
|
||||
})
|
||||
|
||||
const response = await POST(makeRequest(), makeParams())
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
const data = await response.json()
|
||||
expect(data.success).toBe(true)
|
||||
})
|
||||
})
|
||||
|
||||
@@ -1,4 +1,7 @@
|
||||
import { db } from '@sim/db'
|
||||
import { workflowExecutionLogs } from '@sim/db/schema'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { and, eq } from 'drizzle-orm'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import { checkHybridAuth } from '@/lib/auth/hybrid'
|
||||
import { markExecutionCancelled } from '@/lib/execution/cancellation'
|
||||
@@ -83,6 +86,25 @@ export async function POST(
|
||||
})
|
||||
}
|
||||
|
||||
if ((cancellation.durablyRecorded || locallyAborted) && !pausedCancelled) {
|
||||
try {
|
||||
await db
|
||||
.update(workflowExecutionLogs)
|
||||
.set({ status: 'cancelled', endedAt: new Date() })
|
||||
.where(
|
||||
and(
|
||||
eq(workflowExecutionLogs.executionId, executionId),
|
||||
eq(workflowExecutionLogs.status, 'running')
|
||||
)
|
||||
)
|
||||
} catch (dbError) {
|
||||
logger.warn('Failed to update execution log status directly', {
|
||||
executionId,
|
||||
error: dbError,
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
const success = cancellation.durablyRecorded || locallyAborted || pausedCancelled
|
||||
|
||||
if (success) {
|
||||
|
||||
@@ -48,14 +48,11 @@ export async function GET(
|
||||
|
||||
const meta = await getExecutionMeta(executionId)
|
||||
if (!meta) {
|
||||
return NextResponse.json({ error: 'Execution buffer not found or expired' }, { status: 404 })
|
||||
return NextResponse.json({ error: 'Run buffer not found or expired' }, { status: 404 })
|
||||
}
|
||||
|
||||
if (meta.workflowId && meta.workflowId !== workflowId) {
|
||||
return NextResponse.json(
|
||||
{ error: 'Execution does not belong to this workflow' },
|
||||
{ status: 403 }
|
||||
)
|
||||
return NextResponse.json({ error: 'Run does not belong to this workflow' }, { status: 403 })
|
||||
}
|
||||
|
||||
const fromParam = req.nextUrl.searchParams.get('from')
|
||||
|
||||
@@ -95,7 +95,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
const { traceSpans, totalDuration } = buildTraceSpans(resultWithOutput as ExecutionResult)
|
||||
|
||||
if (result.success === false) {
|
||||
const message = result.error || 'Workflow execution failed'
|
||||
const message = result.error || 'Workflow run failed'
|
||||
await loggingSession.safeCompleteWithError({
|
||||
endedAt: new Date().toISOString(),
|
||||
totalDurationMs: totalDuration || result.metadata?.duration || 0,
|
||||
@@ -112,7 +112,7 @@ export async function POST(request: NextRequest, { params }: { params: Promise<{
|
||||
}
|
||||
|
||||
return createSuccessResponse({
|
||||
message: 'Execution logs persisted successfully',
|
||||
message: 'Run logs persisted successfully',
|
||||
})
|
||||
}
|
||||
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
|
||||
export const dynamic = 'force-static'
|
||||
export const revalidate = 3600
|
||||
@@ -48,7 +49,7 @@ export async function GET() {
|
||||
<rss version="2.0">
|
||||
<channel>
|
||||
<title>Sim Changelog</title>
|
||||
<link>https://sim.ai/changelog</link>
|
||||
<link>${SITE_URL}/changelog</link>
|
||||
<description>Latest changes, fixes and updates in Sim.</description>
|
||||
<language>en-us</language>
|
||||
${items}
|
||||
|
||||
@@ -1,9 +1,11 @@
|
||||
import type { Metadata } from 'next'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import ChangelogContent from '@/app/changelog/components/changelog-content'
|
||||
|
||||
export const metadata: Metadata = {
|
||||
title: 'Changelog',
|
||||
description: 'Stay up-to-date with the latest features, improvements, and bug fixes in Sim.',
|
||||
alternates: { canonical: `${SITE_URL}/changelog` },
|
||||
openGraph: {
|
||||
title: 'Changelog',
|
||||
description: 'Stay up-to-date with the latest features, improvements, and bug fixes in Sim.',
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
import React, { type HTMLAttributes, memo, type ReactNode, useMemo } from 'react'
|
||||
import React, { type HTMLAttributes, memo, type ReactNode } from 'react'
|
||||
import { Streamdown } from 'streamdown'
|
||||
import 'streamdown/styles.css'
|
||||
import { CopyCodeButton, Tooltip } from '@/components/emcn'
|
||||
import { extractTextContent } from '@/lib/core/utils/react-node-text'
|
||||
|
||||
export function LinkWithPreview({ href, children }: { href: string; children: React.ReactNode }) {
|
||||
function LinkWithPreview({ href, children }: { href: string; children: React.ReactNode }) {
|
||||
return (
|
||||
<Tooltip.Root delayDuration={300}>
|
||||
<Tooltip.Trigger asChild>
|
||||
@@ -24,175 +24,151 @@ export function LinkWithPreview({ href, children }: { href: string; children: Re
|
||||
)
|
||||
}
|
||||
|
||||
function createCustomComponents(LinkComponent: typeof LinkWithPreview) {
|
||||
return {
|
||||
p: ({ children }: React.HTMLAttributes<HTMLParagraphElement>) => (
|
||||
<p className='mb-1 font-sans text-base text-gray-800 leading-relaxed last:mb-0 dark:text-gray-200'>
|
||||
{children}
|
||||
</p>
|
||||
),
|
||||
const COMPONENTS = {
|
||||
p: ({ children }: React.HTMLAttributes<HTMLParagraphElement>) => (
|
||||
<p className='mb-1 font-sans text-base text-gray-800 leading-relaxed last:mb-0 dark:text-gray-200'>
|
||||
{children}
|
||||
</p>
|
||||
),
|
||||
|
||||
h1: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h1 className='mt-10 mb-5 font-sans font-semibold text-2xl text-gray-900 dark:text-gray-100'>
|
||||
{children}
|
||||
</h1>
|
||||
),
|
||||
h2: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h2 className='mt-8 mb-4 font-sans font-semibold text-gray-900 text-xl dark:text-gray-100'>
|
||||
{children}
|
||||
</h2>
|
||||
),
|
||||
h3: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h3 className='mt-7 mb-3 font-sans font-semibold text-gray-900 text-lg dark:text-gray-100'>
|
||||
{children}
|
||||
</h3>
|
||||
),
|
||||
h4: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h4 className='mt-5 mb-2 font-sans font-semibold text-base text-gray-900 dark:text-gray-100'>
|
||||
{children}
|
||||
</h4>
|
||||
),
|
||||
h1: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h1 className='mt-10 mb-5 font-sans font-semibold text-2xl text-gray-900 dark:text-gray-100'>
|
||||
{children}
|
||||
</h1>
|
||||
),
|
||||
h2: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h2 className='mt-8 mb-4 font-sans font-semibold text-gray-900 text-xl dark:text-gray-100'>
|
||||
{children}
|
||||
</h2>
|
||||
),
|
||||
h3: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h3 className='mt-7 mb-3 font-sans font-semibold text-gray-900 text-lg dark:text-gray-100'>
|
||||
{children}
|
||||
</h3>
|
||||
),
|
||||
h4: ({ children }: React.HTMLAttributes<HTMLHeadingElement>) => (
|
||||
<h4 className='mt-5 mb-2 font-sans font-semibold text-base text-gray-900 dark:text-gray-100'>
|
||||
{children}
|
||||
</h4>
|
||||
),
|
||||
|
||||
ul: ({ children }: React.HTMLAttributes<HTMLUListElement>) => (
|
||||
<ul
|
||||
className='mt-1 mb-1 space-y-1 pl-6 font-sans text-gray-800 dark:text-gray-200'
|
||||
style={{ listStyleType: 'disc' }}
|
||||
>
|
||||
{children}
|
||||
</ul>
|
||||
),
|
||||
ol: ({ children }: React.HTMLAttributes<HTMLOListElement>) => (
|
||||
<ol
|
||||
className='mt-1 mb-1 space-y-1 pl-6 font-sans text-gray-800 dark:text-gray-200'
|
||||
style={{ listStyleType: 'decimal' }}
|
||||
>
|
||||
{children}
|
||||
</ol>
|
||||
),
|
||||
li: ({ children }: React.LiHTMLAttributes<HTMLLIElement>) => (
|
||||
<li className='font-sans text-gray-800 dark:text-gray-200' style={{ display: 'list-item' }}>
|
||||
{children}
|
||||
</li>
|
||||
),
|
||||
ul: ({ children }: React.HTMLAttributes<HTMLUListElement>) => (
|
||||
<ul
|
||||
className='mt-1 mb-1 space-y-1 pl-6 font-sans text-gray-800 dark:text-gray-200'
|
||||
style={{ listStyleType: 'disc' }}
|
||||
>
|
||||
{children}
|
||||
</ul>
|
||||
),
|
||||
ol: ({ children }: React.HTMLAttributes<HTMLOListElement>) => (
|
||||
<ol
|
||||
className='mt-1 mb-1 space-y-1 pl-6 font-sans text-gray-800 dark:text-gray-200'
|
||||
style={{ listStyleType: 'decimal' }}
|
||||
>
|
||||
{children}
|
||||
</ol>
|
||||
),
|
||||
li: ({ children }: React.LiHTMLAttributes<HTMLLIElement>) => (
|
||||
<li className='font-sans text-gray-800 dark:text-gray-200' style={{ display: 'list-item' }}>
|
||||
{children}
|
||||
</li>
|
||||
),
|
||||
|
||||
pre: ({ children }: HTMLAttributes<HTMLPreElement>) => {
|
||||
let codeProps: HTMLAttributes<HTMLElement> = {}
|
||||
let codeContent: ReactNode = children
|
||||
pre: ({ children }: HTMLAttributes<HTMLPreElement>) => {
|
||||
let codeProps: HTMLAttributes<HTMLElement> = {}
|
||||
let codeContent: ReactNode = children
|
||||
|
||||
if (
|
||||
React.isValidElement<{ className?: string; children?: ReactNode }>(children) &&
|
||||
children.type === 'code'
|
||||
) {
|
||||
const childElement = children as React.ReactElement<{
|
||||
className?: string
|
||||
children?: ReactNode
|
||||
}>
|
||||
codeProps = { className: childElement.props.className }
|
||||
codeContent = childElement.props.children
|
||||
}
|
||||
if (
|
||||
React.isValidElement<{ className?: string; children?: ReactNode }>(children) &&
|
||||
children.type === 'code'
|
||||
) {
|
||||
const childElement = children as React.ReactElement<{
|
||||
className?: string
|
||||
children?: ReactNode
|
||||
}>
|
||||
codeProps = { className: childElement.props.className }
|
||||
codeContent = childElement.props.children
|
||||
}
|
||||
|
||||
return (
|
||||
<div className='my-6 rounded-md bg-gray-900 text-sm dark:bg-black'>
|
||||
<div className='flex items-center justify-between border-gray-700 border-b px-4 py-1.5 dark:border-gray-800'>
|
||||
<span className='font-sans text-gray-400 text-xs'>
|
||||
{codeProps.className?.replace('language-', '') || 'code'}
|
||||
</span>
|
||||
<CopyCodeButton
|
||||
code={extractTextContent(codeContent)}
|
||||
className='text-gray-400 hover-hover:bg-gray-700 hover-hover:text-gray-200'
|
||||
/>
|
||||
</div>
|
||||
<pre className='overflow-x-auto p-4 font-mono text-gray-200 dark:text-gray-100'>
|
||||
{codeContent}
|
||||
</pre>
|
||||
return (
|
||||
<div className='my-6 rounded-md bg-gray-900 text-sm dark:bg-black'>
|
||||
<div className='flex items-center justify-between border-gray-700 border-b px-4 py-1.5 dark:border-gray-800'>
|
||||
<span className='font-sans text-gray-400 text-xs'>
|
||||
{codeProps.className?.replace('language-', '') || 'code'}
|
||||
</span>
|
||||
<CopyCodeButton
|
||||
code={extractTextContent(codeContent)}
|
||||
className='text-gray-400 hover-hover:bg-gray-700 hover-hover:text-gray-200'
|
||||
/>
|
||||
</div>
|
||||
)
|
||||
},
|
||||
|
||||
inlineCode: ({ children }: { children?: React.ReactNode }) => (
|
||||
<code className='rounded bg-gray-200 px-1 py-0.5 font-mono text-gray-800 text-inherit dark:bg-gray-700 dark:text-gray-200'>
|
||||
{children}
|
||||
</code>
|
||||
),
|
||||
|
||||
blockquote: ({ children }: React.HTMLAttributes<HTMLQuoteElement>) => (
|
||||
<blockquote className='my-4 border-gray-300 border-l-4 py-1 pl-4 font-sans text-gray-700 italic dark:border-gray-600 dark:text-gray-300'>
|
||||
{children}
|
||||
</blockquote>
|
||||
),
|
||||
|
||||
hr: () => <hr className='my-8 border-gray-500/[.07] border-t dark:border-gray-400/[.07]' />,
|
||||
|
||||
a: ({ href, children, ...props }: React.AnchorHTMLAttributes<HTMLAnchorElement>) => (
|
||||
<LinkComponent href={href || '#'} {...props}>
|
||||
{children}
|
||||
</LinkComponent>
|
||||
),
|
||||
|
||||
table: ({ children }: React.TableHTMLAttributes<HTMLTableElement>) => (
|
||||
<div className='my-4 w-full overflow-x-auto'>
|
||||
<table className='min-w-full table-auto border border-gray-300 font-sans text-sm dark:border-gray-700'>
|
||||
{children}
|
||||
</table>
|
||||
<pre className='overflow-x-auto p-4 font-mono text-gray-200 dark:text-gray-100'>
|
||||
{codeContent}
|
||||
</pre>
|
||||
</div>
|
||||
),
|
||||
thead: ({ children }: React.HTMLAttributes<HTMLTableSectionElement>) => (
|
||||
<thead className='bg-gray-100 text-left dark:bg-gray-800'>{children}</thead>
|
||||
),
|
||||
tbody: ({ children }: React.HTMLAttributes<HTMLTableSectionElement>) => (
|
||||
<tbody className='divide-y divide-gray-200 bg-white dark:divide-gray-700 dark:bg-gray-900'>
|
||||
{children}
|
||||
</tbody>
|
||||
),
|
||||
tr: ({ children }: React.HTMLAttributes<HTMLTableRowElement>) => (
|
||||
<tr className='border-gray-200 border-b transition-colors hover:bg-gray-50 dark:border-gray-700 dark:hover:bg-gray-800/60'>
|
||||
{children}
|
||||
</tr>
|
||||
),
|
||||
th: ({ children }: React.ThHTMLAttributes<HTMLTableCellElement>) => (
|
||||
<th className='border-gray-300 border-r px-4 py-2 font-medium text-gray-700 last:border-r-0 dark:border-gray-700 dark:text-gray-300'>
|
||||
{children}
|
||||
</th>
|
||||
),
|
||||
td: ({ children }: React.TdHTMLAttributes<HTMLTableCellElement>) => (
|
||||
<td className='break-words border-gray-300 border-r px-4 py-2 text-gray-800 last:border-r-0 dark:border-gray-700 dark:text-gray-200'>
|
||||
{children}
|
||||
</td>
|
||||
),
|
||||
)
|
||||
},
|
||||
|
||||
img: ({ src, alt, ...props }: React.ImgHTMLAttributes<HTMLImageElement>) => (
|
||||
<img
|
||||
src={src}
|
||||
alt={alt || 'Image'}
|
||||
className='my-3 h-auto max-w-full rounded-md'
|
||||
{...props}
|
||||
/>
|
||||
),
|
||||
}
|
||||
inlineCode: ({ children }: { children?: React.ReactNode }) => (
|
||||
<code className='rounded bg-gray-200 px-1 py-0.5 font-mono text-gray-800 text-inherit dark:bg-gray-700 dark:text-gray-200'>
|
||||
{children}
|
||||
</code>
|
||||
),
|
||||
|
||||
blockquote: ({ children }: React.HTMLAttributes<HTMLQuoteElement>) => (
|
||||
<blockquote className='my-4 border-gray-300 border-l-4 py-1 pl-4 font-sans text-gray-700 italic dark:border-gray-600 dark:text-gray-300'>
|
||||
{children}
|
||||
</blockquote>
|
||||
),
|
||||
|
||||
hr: () => <hr className='my-8 border-gray-500/[.07] border-t dark:border-gray-400/[.07]' />,
|
||||
|
||||
a: ({ href, children, ...props }: React.AnchorHTMLAttributes<HTMLAnchorElement>) => (
|
||||
<LinkWithPreview href={href || '#'} {...props}>
|
||||
{children}
|
||||
</LinkWithPreview>
|
||||
),
|
||||
|
||||
table: ({ children }: React.TableHTMLAttributes<HTMLTableElement>) => (
|
||||
<div className='my-4 w-full overflow-x-auto'>
|
||||
<table className='min-w-full table-auto border border-gray-300 font-sans text-sm dark:border-gray-700'>
|
||||
{children}
|
||||
</table>
|
||||
</div>
|
||||
),
|
||||
thead: ({ children }: React.HTMLAttributes<HTMLTableSectionElement>) => (
|
||||
<thead className='bg-gray-100 text-left dark:bg-gray-800'>{children}</thead>
|
||||
),
|
||||
tbody: ({ children }: React.HTMLAttributes<HTMLTableSectionElement>) => (
|
||||
<tbody className='divide-y divide-gray-200 bg-white dark:divide-gray-700 dark:bg-gray-900'>
|
||||
{children}
|
||||
</tbody>
|
||||
),
|
||||
tr: ({ children }: React.HTMLAttributes<HTMLTableRowElement>) => (
|
||||
<tr className='border-gray-200 border-b transition-colors hover:bg-gray-50 dark:border-gray-700 dark:hover:bg-gray-800/60'>
|
||||
{children}
|
||||
</tr>
|
||||
),
|
||||
th: ({ children }: React.ThHTMLAttributes<HTMLTableCellElement>) => (
|
||||
<th className='border-gray-300 border-r px-4 py-2 font-medium text-gray-700 last:border-r-0 dark:border-gray-700 dark:text-gray-300'>
|
||||
{children}
|
||||
</th>
|
||||
),
|
||||
td: ({ children }: React.TdHTMLAttributes<HTMLTableCellElement>) => (
|
||||
<td className='break-words border-gray-300 border-r px-4 py-2 text-gray-800 last:border-r-0 dark:border-gray-700 dark:text-gray-200'>
|
||||
{children}
|
||||
</td>
|
||||
),
|
||||
|
||||
img: ({ src, alt, ...props }: React.ImgHTMLAttributes<HTMLImageElement>) => (
|
||||
<img src={src} alt={alt || 'Image'} className='my-3 h-auto max-w-full rounded-md' {...props} />
|
||||
),
|
||||
}
|
||||
|
||||
const DEFAULT_COMPONENTS = createCustomComponents(LinkWithPreview)
|
||||
|
||||
const MarkdownRenderer = memo(function MarkdownRenderer({
|
||||
content,
|
||||
customLinkComponent,
|
||||
}: {
|
||||
content: string
|
||||
customLinkComponent?: typeof LinkWithPreview
|
||||
}) {
|
||||
const components = useMemo(() => {
|
||||
if (!customLinkComponent) {
|
||||
return DEFAULT_COMPONENTS
|
||||
}
|
||||
return createCustomComponents(customLinkComponent)
|
||||
}, [customLinkComponent])
|
||||
|
||||
const processedContent = content.trim()
|
||||
|
||||
const MarkdownRenderer = memo(function MarkdownRenderer({ content }: { content: string }) {
|
||||
return (
|
||||
<div className='space-y-4 break-words font-sans text-[var(--landing-text)] text-base leading-relaxed'>
|
||||
<Streamdown mode='static' components={components}>
|
||||
{processedContent}
|
||||
<Streamdown mode='static' components={COMPONENTS}>
|
||||
{content.trim()}
|
||||
</Streamdown>
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -8,7 +8,6 @@ import {
|
||||
ChatFileDownloadAll,
|
||||
} from '@/app/chat/components/message/components/file-download'
|
||||
import MarkdownRenderer from '@/app/chat/components/message/components/markdown-renderer'
|
||||
import { useThrottledValue } from '@/hooks/use-throttled-value'
|
||||
|
||||
export interface ChatAttachment {
|
||||
id: string
|
||||
@@ -39,11 +38,6 @@ export interface ChatMessage {
|
||||
files?: ChatFile[]
|
||||
}
|
||||
|
||||
function EnhancedMarkdownRenderer({ content }: { content: string }) {
|
||||
const throttled = useThrottledValue(content)
|
||||
return <MarkdownRenderer content={throttled} />
|
||||
}
|
||||
|
||||
export const ClientChatMessage = memo(
|
||||
function ClientChatMessage({ message }: { message: ChatMessage }) {
|
||||
const [isCopied, setIsCopied] = useState(false)
|
||||
@@ -188,7 +182,7 @@ export const ClientChatMessage = memo(
|
||||
{JSON.stringify(cleanTextContent, null, 2)}
|
||||
</pre>
|
||||
) : (
|
||||
<EnhancedMarkdownRenderer content={cleanTextContent as string} />
|
||||
<MarkdownRenderer content={cleanTextContent as string} />
|
||||
)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -6,7 +6,7 @@ export const CHAT_ERROR_MESSAGES = {
|
||||
AUTH_REQUIRED_EMAIL: 'Please provide your email to access this chat.',
|
||||
CHAT_UNAVAILABLE: 'This chat is currently unavailable. Please try again later.',
|
||||
NO_CHAT_TRIGGER:
|
||||
'No Chat trigger configured for this workflow. Add a Chat Trigger block to enable chat execution.',
|
||||
'No Chat trigger configured for this workflow. Add a Chat Trigger block to enable chat.',
|
||||
USAGE_LIMIT_EXCEEDED: 'Usage limit exceeded. Please upgrade your plan to continue using chat.',
|
||||
} as const
|
||||
|
||||
|
||||
@@ -5,7 +5,6 @@ import { BrandedLayout } from '@/components/branded-layout'
|
||||
import { PostHogProvider } from '@/app/_shell/providers/posthog-provider'
|
||||
import { generateBrandedMetadata, generateThemeCSS } from '@/ee/whitelabeling'
|
||||
import '@/app/_styles/globals.css'
|
||||
import { OneDollarStats } from '@/components/analytics/onedollarstats'
|
||||
import { isHosted, isReactGrabEnabled, isReactScanEnabled } from '@/lib/core/config/feature-flags'
|
||||
import { HydrationErrorHandler } from '@/app/_shell/hydration-error-handler'
|
||||
import { QueryProvider } from '@/app/_shell/providers/query-provider'
|
||||
@@ -207,10 +206,6 @@ export default function RootLayout({ children }: { children: React.ReactNode })
|
||||
<meta name='format-detection' content='telephone=no' />
|
||||
<meta httpEquiv='x-ua-compatible' content='ie=edge' />
|
||||
|
||||
{/* OneDollarStats Analytics */}
|
||||
<link rel='dns-prefetch' href='https://assets.onedollarstats.com' />
|
||||
<script defer src='https://assets.onedollarstats.com/stonks.js' />
|
||||
|
||||
{/* Google Tag Manager — hosted only */}
|
||||
{isHosted && (
|
||||
<Script
|
||||
@@ -260,7 +255,6 @@ j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src=
|
||||
</noscript>
|
||||
)}
|
||||
<HydrationErrorHandler />
|
||||
<OneDollarStats />
|
||||
<PostHogProvider>
|
||||
<ThemeProvider>
|
||||
<QueryProvider>
|
||||
|
||||
@@ -25,10 +25,10 @@ Sim lets teams create agents visually with the workflow builder, conversationall
|
||||
|
||||
## Key Concepts
|
||||
|
||||
- **Workspace**: The AI workspace — container for agents, workflows, data sources, and executions
|
||||
- **Workspace**: The AI workspace — container for agents, workflows, data sources, and runs
|
||||
- **Workflow**: Visual builder — directed graph of blocks defining agent logic
|
||||
- **Block**: Individual step such as an LLM call, tool call, HTTP request, or code execution
|
||||
- **Trigger**: Event or schedule that initiates workflow execution
|
||||
- **Trigger**: Event or schedule that initiates a workflow run
|
||||
- **Execution**: A single run of a workflow with logs and outputs
|
||||
- **Knowledge Base**: Document store used for retrieval-augmented generation
|
||||
|
||||
@@ -41,7 +41,7 @@ Sim lets teams create agents visually with the workflow builder, conversationall
|
||||
- Knowledge bases and retrieval-augmented generation
|
||||
- Table creation and management
|
||||
- Document creation and processing
|
||||
- Scheduled and webhook-triggered executions
|
||||
- Scheduled and webhook-triggered runs
|
||||
|
||||
## Use Cases
|
||||
|
||||
|
||||
@@ -1,13 +1,11 @@
|
||||
import type { Metadata } from 'next'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import { SITE_URL } from '@/lib/core/utils/urls'
|
||||
import Landing from '@/app/(landing)/landing'
|
||||
|
||||
export const revalidate = 3600
|
||||
|
||||
const baseUrl = getBaseUrl()
|
||||
|
||||
export const metadata: Metadata = {
|
||||
metadataBase: new URL(baseUrl),
|
||||
metadataBase: new URL(SITE_URL),
|
||||
title: {
|
||||
absolute: 'Sim — The AI Workspace | Build, Deploy & Manage AI Agents',
|
||||
},
|
||||
@@ -28,7 +26,7 @@ export const metadata: Metadata = {
|
||||
description:
|
||||
'Sim is the open-source AI workspace where teams build, deploy, and manage AI agents. Connect 1,000+ integrations and every major LLM to create agents that automate real work — visually, conversationally, or with code.',
|
||||
type: 'website',
|
||||
url: baseUrl,
|
||||
url: SITE_URL,
|
||||
siteName: 'Sim',
|
||||
locale: 'en_US',
|
||||
images: [
|
||||
@@ -54,10 +52,10 @@ export const metadata: Metadata = {
|
||||
},
|
||||
},
|
||||
alternates: {
|
||||
canonical: baseUrl,
|
||||
canonical: SITE_URL,
|
||||
languages: {
|
||||
'en-US': baseUrl,
|
||||
'x-default': baseUrl,
|
||||
'en-US': SITE_URL,
|
||||
'x-default': SITE_URL,
|
||||
},
|
||||
},
|
||||
robots: {
|
||||
|
||||
@@ -4,133 +4,27 @@ import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
export default function robots(): MetadataRoute.Robots {
|
||||
const baseUrl = getBaseUrl()
|
||||
|
||||
const disallowedPaths = [
|
||||
'/api/',
|
||||
'/workspace/',
|
||||
'/chat/',
|
||||
'/playground/',
|
||||
'/resume/',
|
||||
'/invite/',
|
||||
'/unsubscribe/',
|
||||
'/w/',
|
||||
'/_next/',
|
||||
'/private/',
|
||||
]
|
||||
|
||||
return {
|
||||
rules: [
|
||||
{
|
||||
userAgent: '*',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'Googlebot',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'Bingbot',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'YandexBot',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'Baiduspider',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'GPTBot',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'ChatGPT-User',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'OAI-SearchBot',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'ClaudeBot',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'Claude-SearchBot',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'Google-Extended',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'PerplexityBot',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'Meta-ExternalAgent',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'FacebookBot',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'Applebot',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'Applebot-Extended',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'Amazonbot',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'Bytespider',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'CCBot',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'cohere-ai',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'Grok-web-crawl',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
},
|
||||
{
|
||||
userAgent: 'DeepSeek-AI',
|
||||
allow: '/',
|
||||
disallow: disallowedPaths,
|
||||
disallow: [
|
||||
'/api/',
|
||||
'/workspace/',
|
||||
'/chat/',
|
||||
'/playground/',
|
||||
'/resume/',
|
||||
'/invite/',
|
||||
'/unsubscribe/',
|
||||
'/w/',
|
||||
'/form/',
|
||||
'/credential-account/',
|
||||
'/_next/',
|
||||
'/private/',
|
||||
],
|
||||
},
|
||||
],
|
||||
sitemap: `${baseUrl}/sitemap.xml`,
|
||||
host: baseUrl,
|
||||
sitemap: [`${baseUrl}/sitemap.xml`, `${baseUrl}/blog/sitemap-images.xml`],
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import type { MetadataRoute } from 'next'
|
||||
import { COURSES } from '@/lib/academy/content'
|
||||
import { getAllPostMeta } from '@/lib/blog/registry'
|
||||
import { getBaseUrl } from '@/lib/core/utils/urls'
|
||||
import integrations from '@/app/(landing)/integrations/data/integrations.json'
|
||||
@@ -6,69 +7,44 @@ import { ALL_CATALOG_MODELS, MODEL_PROVIDERS_WITH_CATALOGS } from '@/app/(landin
|
||||
|
||||
export default async function sitemap(): Promise<MetadataRoute.Sitemap> {
|
||||
const baseUrl = getBaseUrl()
|
||||
const posts = await getAllPostMeta()
|
||||
|
||||
const now = new Date()
|
||||
const integrationPages: MetadataRoute.Sitemap = integrations.map((integration) => ({
|
||||
url: `${baseUrl}/integrations/${integration.slug}`,
|
||||
lastModified: now,
|
||||
changeFrequency: 'monthly',
|
||||
priority: 0.6,
|
||||
}))
|
||||
const modelHubPages: MetadataRoute.Sitemap = [
|
||||
{
|
||||
url: `${baseUrl}/integrations`,
|
||||
lastModified: now,
|
||||
changeFrequency: 'weekly',
|
||||
priority: 0.8,
|
||||
},
|
||||
{
|
||||
url: `${baseUrl}/models`,
|
||||
lastModified: now,
|
||||
changeFrequency: 'weekly',
|
||||
priority: 0.8,
|
||||
},
|
||||
{
|
||||
url: `${baseUrl}/partners`,
|
||||
lastModified: now,
|
||||
changeFrequency: 'monthly',
|
||||
priority: 0.5,
|
||||
},
|
||||
]
|
||||
const providerPages: MetadataRoute.Sitemap = MODEL_PROVIDERS_WITH_CATALOGS.map((provider) => ({
|
||||
url: `${baseUrl}${provider.href}`,
|
||||
lastModified: new Date(
|
||||
Math.max(...provider.models.map((model) => new Date(model.pricing.updatedAt).getTime()))
|
||||
),
|
||||
changeFrequency: 'weekly',
|
||||
priority: 0.7,
|
||||
}))
|
||||
const modelPages: MetadataRoute.Sitemap = ALL_CATALOG_MODELS.map((model) => ({
|
||||
url: `${baseUrl}${model.href}`,
|
||||
lastModified: new Date(model.pricing.updatedAt),
|
||||
changeFrequency: 'monthly',
|
||||
priority: 0.6,
|
||||
}))
|
||||
const latestPostDate =
|
||||
posts.length > 0
|
||||
? new Date(Math.max(...posts.map((p) => new Date(p.updated ?? p.date).getTime())))
|
||||
: undefined
|
||||
|
||||
const modelTimes = MODEL_PROVIDERS_WITH_CATALOGS.flatMap((provider) =>
|
||||
provider.models.map((model) => new Date(model.pricing.updatedAt).getTime())
|
||||
)
|
||||
const latestModelDate = modelTimes.length > 0 ? new Date(Math.max(...modelTimes)) : undefined
|
||||
|
||||
const staticPages: MetadataRoute.Sitemap = [
|
||||
{
|
||||
url: baseUrl,
|
||||
lastModified: now,
|
||||
changeFrequency: 'daily',
|
||||
priority: 1.0,
|
||||
},
|
||||
{
|
||||
url: `${baseUrl}/blog`,
|
||||
lastModified: now,
|
||||
changeFrequency: 'daily',
|
||||
priority: 0.8,
|
||||
lastModified: latestPostDate,
|
||||
},
|
||||
{
|
||||
url: `${baseUrl}/blog/tags`,
|
||||
lastModified: now,
|
||||
lastModified: latestPostDate,
|
||||
},
|
||||
{
|
||||
url: `${baseUrl}/changelog`,
|
||||
lastModified: now,
|
||||
lastModified: latestPostDate,
|
||||
},
|
||||
{
|
||||
url: `${baseUrl}/integrations`,
|
||||
lastModified: latestModelDate,
|
||||
},
|
||||
{
|
||||
url: `${baseUrl}/models`,
|
||||
lastModified: latestModelDate,
|
||||
},
|
||||
{
|
||||
url: `${baseUrl}/partners`,
|
||||
},
|
||||
{
|
||||
url: `${baseUrl}/terms`,
|
||||
@@ -80,20 +56,61 @@ export default async function sitemap(): Promise<MetadataRoute.Sitemap> {
|
||||
},
|
||||
]
|
||||
|
||||
const posts = await getAllPostMeta()
|
||||
const blogPages: MetadataRoute.Sitemap = posts.map((p) => ({
|
||||
url: p.canonical,
|
||||
lastModified: new Date(p.updated ?? p.date),
|
||||
changeFrequency: 'weekly',
|
||||
priority: 0.7,
|
||||
}))
|
||||
|
||||
const authorsMap = new Map<string, Date>()
|
||||
for (const p of posts) {
|
||||
for (const author of p.authors ?? [p.author]) {
|
||||
const postDate = new Date(p.updated ?? p.date)
|
||||
const existing = authorsMap.get(author.id)
|
||||
if (!existing || postDate > existing) {
|
||||
authorsMap.set(author.id, postDate)
|
||||
}
|
||||
}
|
||||
}
|
||||
const authorPages: MetadataRoute.Sitemap = [...authorsMap.entries()].map(([id, date]) => ({
|
||||
url: `${baseUrl}/blog/authors/${id}`,
|
||||
lastModified: date,
|
||||
}))
|
||||
|
||||
const integrationPages: MetadataRoute.Sitemap = integrations.map((integration) => ({
|
||||
url: `${baseUrl}/integrations/${integration.slug}`,
|
||||
}))
|
||||
|
||||
const providerPages: MetadataRoute.Sitemap = MODEL_PROVIDERS_WITH_CATALOGS.flatMap((provider) => {
|
||||
if (provider.models.length === 0) return []
|
||||
return [
|
||||
{
|
||||
url: `${baseUrl}${provider.href}`,
|
||||
lastModified: new Date(
|
||||
Math.max(...provider.models.map((model) => new Date(model.pricing.updatedAt).getTime()))
|
||||
),
|
||||
},
|
||||
]
|
||||
})
|
||||
|
||||
const modelEntries: MetadataRoute.Sitemap = ALL_CATALOG_MODELS.map((model) => ({
|
||||
url: `${baseUrl}${model.href}`,
|
||||
lastModified: new Date(model.pricing.updatedAt),
|
||||
}))
|
||||
|
||||
const academyPages: MetadataRoute.Sitemap = [
|
||||
{ url: `${baseUrl}/academy` },
|
||||
...COURSES.map((course) => ({
|
||||
url: `${baseUrl}/academy/${course.slug}`,
|
||||
})),
|
||||
]
|
||||
|
||||
return [
|
||||
...staticPages,
|
||||
...modelHubPages,
|
||||
...blogPages,
|
||||
...authorPages,
|
||||
...integrationPages,
|
||||
...providerPages,
|
||||
...modelPages,
|
||||
...blogPages,
|
||||
...modelEntries,
|
||||
...academyPages,
|
||||
]
|
||||
}
|
||||
|
||||
@@ -21,7 +21,13 @@ interface AgentGroupProps {
|
||||
}
|
||||
|
||||
function isToolDone(status: ToolCallData['status']): boolean {
|
||||
return status === 'success' || status === 'error' || status === 'cancelled'
|
||||
return (
|
||||
status === 'success' ||
|
||||
status === 'error' ||
|
||||
status === 'cancelled' ||
|
||||
status === 'skipped' ||
|
||||
status === 'rejected'
|
||||
)
|
||||
}
|
||||
|
||||
export function AgentGroup({
|
||||
|
||||
@@ -18,7 +18,6 @@ import {
|
||||
SpecialTags,
|
||||
} from '@/app/workspace/[workspaceId]/home/components/message-content/components/special-tags'
|
||||
import type { MothershipResource } from '@/app/workspace/[workspaceId]/home/types'
|
||||
import { useStreamingText } from '@/hooks/use-streaming-text'
|
||||
|
||||
const LANG_ALIASES: Record<string, string> = {
|
||||
js: 'javascript',
|
||||
@@ -236,7 +235,6 @@ interface ChatContentProps {
|
||||
isStreaming?: boolean
|
||||
onOptionSelect?: (id: string) => void
|
||||
onWorkspaceResourceSelect?: (resource: MothershipResource) => void
|
||||
smoothStreaming?: boolean
|
||||
}
|
||||
|
||||
export function ChatContent({
|
||||
@@ -244,20 +242,7 @@ export function ChatContent({
|
||||
isStreaming = false,
|
||||
onOptionSelect,
|
||||
onWorkspaceResourceSelect,
|
||||
smoothStreaming = true,
|
||||
}: ChatContentProps) {
|
||||
const hydratedStreamingRef = useRef(isStreaming && content.trim().length > 0)
|
||||
const previousIsStreamingRef = useRef(isStreaming)
|
||||
|
||||
useEffect(() => {
|
||||
if (!previousIsStreamingRef.current && isStreaming && content.trim().length > 0) {
|
||||
hydratedStreamingRef.current = true
|
||||
} else if (!isStreaming) {
|
||||
hydratedStreamingRef.current = false
|
||||
}
|
||||
previousIsStreamingRef.current = isStreaming
|
||||
}, [content, isStreaming])
|
||||
|
||||
const onWorkspaceResourceSelectRef = useRef(onWorkspaceResourceSelect)
|
||||
onWorkspaceResourceSelectRef.current = onWorkspaceResourceSelect
|
||||
|
||||
@@ -270,9 +255,7 @@ export function ChatContent({
|
||||
return () => window.removeEventListener('wsres-click', handler)
|
||||
}, [])
|
||||
|
||||
const rendered = useStreamingText(content, isStreaming && smoothStreaming)
|
||||
|
||||
const parsed = useMemo(() => parseSpecialTags(rendered, isStreaming), [rendered, isStreaming])
|
||||
const parsed = useMemo(() => parseSpecialTags(content, isStreaming), [content, isStreaming])
|
||||
const hasSpecialContent = parsed.hasPendingTag || parsed.segments.some((s) => s.type !== 'text')
|
||||
|
||||
if (hasSpecialContent) {
|
||||
@@ -322,7 +305,10 @@ export function ChatContent({
|
||||
key={`inline-${i}`}
|
||||
className={cn(PROSE_CLASSES, '[&>:first-child]:mt-0 [&>:last-child]:mb-0')}
|
||||
>
|
||||
<Streamdown mode='static' components={MARKDOWN_COMPONENTS}>
|
||||
<Streamdown
|
||||
mode={isStreaming ? undefined : 'static'}
|
||||
components={MARKDOWN_COMPONENTS}
|
||||
>
|
||||
{group.markdown}
|
||||
</Streamdown>
|
||||
</div>
|
||||
@@ -343,13 +329,8 @@ export function ChatContent({
|
||||
|
||||
return (
|
||||
<div className={cn(PROSE_CLASSES, '[&>:first-child]:mt-0 [&>:last-child]:mb-0')}>
|
||||
<Streamdown
|
||||
mode={isStreaming ? undefined : 'static'}
|
||||
isAnimating={isStreaming}
|
||||
animated={isStreaming && !hydratedStreamingRef.current}
|
||||
components={MARKDOWN_COMPONENTS}
|
||||
>
|
||||
{rendered}
|
||||
<Streamdown mode={isStreaming ? undefined : 'static'} components={MARKDOWN_COMPONENTS}>
|
||||
{content}
|
||||
</Streamdown>
|
||||
</div>
|
||||
)
|
||||
|
||||
@@ -415,7 +415,7 @@ function OptionsDisplay({ data, onSelect }: OptionsDisplayProps) {
|
||||
if (entries.length === 0) return null
|
||||
|
||||
return (
|
||||
<div className='animate-stream-fade-in'>
|
||||
<div>
|
||||
{disabled ? (
|
||||
<button
|
||||
type='button'
|
||||
@@ -608,7 +608,7 @@ function CredentialDisplay({ data }: { data: CredentialTagData }) {
|
||||
href={data.value}
|
||||
target='_blank'
|
||||
rel='noopener noreferrer'
|
||||
className='flex animate-stream-fade-in items-center gap-2 rounded-lg border border-[var(--divider)] px-3 py-2.5 transition-colors hover-hover:bg-[var(--surface-5)]'
|
||||
className='flex items-center gap-2 rounded-lg border border-[var(--divider)] px-3 py-2.5 transition-colors hover-hover:bg-[var(--surface-5)]'
|
||||
>
|
||||
{createElement(Icon, { className: 'h-[16px] w-[16px] shrink-0' })}
|
||||
<span className='flex-1 font-base text-[var(--text-body)] text-sm'>
|
||||
@@ -623,7 +623,7 @@ function MothershipErrorDisplay({ data }: { data: MothershipErrorTagData }) {
|
||||
const detail = data.code ? `${data.message} (${data.code})` : data.message
|
||||
|
||||
return (
|
||||
<p className='animate-stream-fade-in font-base text-[13px] text-[var(--text-secondary)] italic leading-[20px]'>
|
||||
<p className='font-base text-[13px] text-[var(--text-secondary)] italic leading-[20px]'>
|
||||
{detail}
|
||||
</p>
|
||||
)
|
||||
@@ -635,7 +635,7 @@ function UsageUpgradeDisplay({ data }: { data: UsageUpgradeTagData }) {
|
||||
const buttonLabel = data.action === 'upgrade_plan' ? 'Upgrade Plan' : 'Increase Limit'
|
||||
|
||||
return (
|
||||
<div className='animate-stream-fade-in rounded-xl border border-amber-300/40 bg-amber-50/50 px-4 py-3 dark:border-amber-500/20 dark:bg-amber-950/20'>
|
||||
<div className='rounded-xl border border-amber-300/40 bg-amber-50/50 px-4 py-3 dark:border-amber-500/20 dark:bg-amber-950/20'>
|
||||
<div className='flex items-center gap-2'>
|
||||
<svg
|
||||
className='h-4 w-4 shrink-0 text-amber-600 dark:text-amber-400'
|
||||
|
||||
@@ -70,7 +70,13 @@ function resolveAgentLabel(key: string): string {
|
||||
}
|
||||
|
||||
function isToolDone(status: ToolCallData['status']): boolean {
|
||||
return status === 'success' || status === 'error' || status === 'cancelled'
|
||||
return (
|
||||
status === 'success' ||
|
||||
status === 'error' ||
|
||||
status === 'cancelled' ||
|
||||
status === 'skipped' ||
|
||||
status === 'rejected'
|
||||
)
|
||||
}
|
||||
|
||||
function isDelegatingTool(tc: NonNullable<ContentBlock['toolCall']>): boolean {
|
||||
@@ -87,6 +93,10 @@ function mapToolStatusToClientState(
|
||||
return ClientToolCallState.error
|
||||
case 'cancelled':
|
||||
return ClientToolCallState.cancelled
|
||||
case 'skipped':
|
||||
return ClientToolCallState.aborted
|
||||
case 'rejected':
|
||||
return ClientToolCallState.rejected
|
||||
default:
|
||||
return ClientToolCallState.executing
|
||||
}
|
||||
@@ -374,7 +384,6 @@ export function MessageContent({
|
||||
const hasSubagentEnded = blocks.some((b) => b.type === 'subagent_end')
|
||||
const showTrailingThinking =
|
||||
isStreaming && !hasTrailingContent && (hasSubagentEnded || allLastGroupToolsDone)
|
||||
const hasStructuredSegments = segments.some((segment) => segment.type !== 'text')
|
||||
const lastOpenSubagentGroupId = [...segments]
|
||||
.reverse()
|
||||
.find(
|
||||
@@ -394,7 +403,6 @@ export function MessageContent({
|
||||
isStreaming={isStreaming}
|
||||
onOptionSelect={onOptionSelect}
|
||||
onWorkspaceResourceSelect={onWorkspaceResourceSelect}
|
||||
smoothStreaming={!hasStructuredSegments}
|
||||
/>
|
||||
)
|
||||
case 'agent_group': {
|
||||
|
||||
@@ -2,6 +2,7 @@ import type { ComponentType, SVGProps } from 'react'
|
||||
import {
|
||||
Asterisk,
|
||||
Blimp,
|
||||
Bug,
|
||||
Calendar,
|
||||
Database,
|
||||
Eye,
|
||||
@@ -44,6 +45,7 @@ const TOOL_ICONS: Record<string, IconComponent> = {
|
||||
create_workflow: Layout,
|
||||
edit_workflow: Pencil,
|
||||
workflow: Hammer,
|
||||
debug: Bug,
|
||||
run: PlayOutline,
|
||||
deploy: Rocket,
|
||||
auth: Integration,
|
||||
|
||||
@@ -10,7 +10,10 @@ import {
|
||||
} from '@/app/workspace/[workspaceId]/home/components/message-content'
|
||||
import { PendingTagIndicator } from '@/app/workspace/[workspaceId]/home/components/message-content/components/special-tags'
|
||||
import { QueuedMessages } from '@/app/workspace/[workspaceId]/home/components/queued-messages'
|
||||
import { UserInput } from '@/app/workspace/[workspaceId]/home/components/user-input'
|
||||
import {
|
||||
UserInput,
|
||||
type UserInputHandle,
|
||||
} from '@/app/workspace/[workspaceId]/home/components/user-input'
|
||||
import { UserMessageContent } from '@/app/workspace/[workspaceId]/home/components/user-message-content'
|
||||
import type {
|
||||
ChatMessage,
|
||||
@@ -36,14 +39,12 @@ interface MothershipChatProps {
|
||||
messageQueue: QueuedMessage[]
|
||||
onRemoveQueuedMessage: (id: string) => void
|
||||
onSendQueuedMessage: (id: string) => Promise<void>
|
||||
onEditQueuedMessage: (id: string) => void
|
||||
onEditQueuedMessage: (id: string) => QueuedMessage | undefined
|
||||
userId?: string
|
||||
chatId?: string
|
||||
onContextAdd?: (context: ChatContext) => void
|
||||
onContextRemove?: (context: ChatContext) => void
|
||||
onWorkspaceResourceSelect?: (resource: MothershipResource) => void
|
||||
editValue?: string
|
||||
onEditValueConsumed?: () => void
|
||||
layout?: 'mothership-view' | 'copilot-view'
|
||||
initialScrollBlocked?: boolean
|
||||
animateInput?: boolean
|
||||
@@ -91,8 +92,6 @@ export function MothershipChat({
|
||||
onContextAdd,
|
||||
onContextRemove,
|
||||
onWorkspaceResourceSelect,
|
||||
editValue,
|
||||
onEditValueConsumed,
|
||||
layout = 'mothership-view',
|
||||
initialScrollBlocked = false,
|
||||
animateInput = false,
|
||||
@@ -106,11 +105,24 @@ export function MothershipChat({
|
||||
})
|
||||
const hasMessages = messages.length > 0
|
||||
const initialScrollDoneRef = useRef(false)
|
||||
const userInputRef = useRef<UserInputHandle>(null)
|
||||
const handleSendQueuedHead = useCallback(() => {
|
||||
const topMessage = messageQueue[0]
|
||||
if (!topMessage) return
|
||||
void onSendQueuedMessage(topMessage.id)
|
||||
}, [messageQueue, onSendQueuedMessage])
|
||||
const handleEditQueued = useCallback(
|
||||
(id: string) => {
|
||||
const msg = onEditQueuedMessage(id)
|
||||
if (msg) userInputRef.current?.loadQueuedMessage(msg)
|
||||
},
|
||||
[onEditQueuedMessage]
|
||||
)
|
||||
const handleEditQueuedTail = useCallback(() => {
|
||||
const tail = messageQueue[messageQueue.length - 1]
|
||||
if (!tail) return
|
||||
handleEditQueued(tail.id)
|
||||
}, [messageQueue, handleEditQueued])
|
||||
|
||||
useLayoutEffect(() => {
|
||||
if (!hasMessages) {
|
||||
@@ -205,9 +217,10 @@ export function MothershipChat({
|
||||
messageQueue={messageQueue}
|
||||
onRemove={onRemoveQueuedMessage}
|
||||
onSendNow={onSendQueuedMessage}
|
||||
onEdit={onEditQueuedMessage}
|
||||
onEdit={handleEditQueued}
|
||||
/>
|
||||
<UserInput
|
||||
ref={userInputRef}
|
||||
onSubmit={onSubmit}
|
||||
isSending={isStreamActive}
|
||||
onStopGeneration={onStopGeneration}
|
||||
@@ -215,9 +228,8 @@ export function MothershipChat({
|
||||
userId={userId}
|
||||
onContextAdd={onContextAdd}
|
||||
onContextRemove={onContextRemove}
|
||||
editValue={editValue}
|
||||
onEditValueConsumed={onEditValueConsumed}
|
||||
onSendQueuedHead={handleSendQueuedHead}
|
||||
onEditQueuedTail={handleEditQueuedTail}
|
||||
/>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
@@ -41,6 +41,12 @@ export function GenericResourceContent({ data }: GenericResourceContentProps) {
|
||||
{entry.status === 'error' && (
|
||||
<span className='ml-auto text-[12px] text-[var(--text-error)]'>Error</span>
|
||||
)}
|
||||
{entry.status === 'skipped' && (
|
||||
<span className='ml-auto text-[12px] text-[var(--text-muted)]'>Skipped</span>
|
||||
)}
|
||||
{entry.status === 'rejected' && (
|
||||
<span className='ml-auto text-[12px] text-[var(--text-muted)]'>Rejected</span>
|
||||
)}
|
||||
</div>
|
||||
{entry.streamingArgs && (
|
||||
<pre className='overflow-x-auto whitespace-pre-wrap break-words font-mono text-[12px] text-[var(--text-body)]'>
|
||||
|
||||
@@ -684,12 +684,10 @@ function EmbeddedLog({ logId }: EmbeddedLogProps) {
|
||||
</div>
|
||||
</div>
|
||||
|
||||
{/* Execution ID */}
|
||||
{/* Run ID */}
|
||||
{log.executionId && (
|
||||
<div className='flex flex-col gap-1.5 rounded-md border border-[var(--border)] bg-[var(--surface-2)] px-2.5 py-2'>
|
||||
<span className='font-medium text-[var(--text-tertiary)] text-caption'>
|
||||
Execution ID
|
||||
</span>
|
||||
<span className='font-medium text-[var(--text-tertiary)] text-caption'>Run ID</span>
|
||||
<span className='truncate font-medium text-[var(--text-secondary)] text-sm'>
|
||||
{log.executionId}
|
||||
</span>
|
||||
|
||||
@@ -180,6 +180,26 @@ export function ResourceTabs({
|
||||
return () => node.removeEventListener('wheel', handler)
|
||||
}, [])
|
||||
|
||||
useEffect(() => {
|
||||
const node = scrollNodeRef.current
|
||||
if (!node || !activeId) return
|
||||
const tab = node.querySelector<HTMLElement>(`[data-resource-tab-id="${CSS.escape(activeId)}"]`)
|
||||
if (!tab) return
|
||||
// Use bounding rects because the tab's offsetParent is a `position: relative`
|
||||
// wrapper, so `offsetLeft` is relative to that wrapper rather than `node`.
|
||||
const tabRect = tab.getBoundingClientRect()
|
||||
const nodeRect = node.getBoundingClientRect()
|
||||
const tabLeft = tabRect.left - nodeRect.left + node.scrollLeft
|
||||
const tabRight = tabLeft + tabRect.width
|
||||
const viewLeft = node.scrollLeft
|
||||
const viewRight = viewLeft + node.clientWidth
|
||||
if (tabLeft < viewLeft) {
|
||||
node.scrollTo({ left: tabLeft, behavior: 'smooth' })
|
||||
} else if (tabRight > viewRight) {
|
||||
node.scrollTo({ left: tabRight - node.clientWidth, behavior: 'smooth' })
|
||||
}
|
||||
}, [activeId])
|
||||
|
||||
const addResource = useAddChatResource(chatId)
|
||||
const removeResource = useRemoveChatResource(chatId)
|
||||
const reorderResources = useReorderChatResources(chatId)
|
||||
@@ -286,24 +306,9 @@ export function ResourceTabs({
|
||||
if (anchorIdRef.current && removedIds.has(anchorIdRef.current)) {
|
||||
anchorIdRef.current = null
|
||||
}
|
||||
// Serialize mutations so each onMutate sees the cache updated by the prior
|
||||
// one. Continue on individual failures so remaining removals still fire.
|
||||
const persistable = targets.filter((r) => !isEphemeralResource(r))
|
||||
if (persistable.length > 0) {
|
||||
void (async () => {
|
||||
for (const r of persistable) {
|
||||
try {
|
||||
await removeResource.mutateAsync({
|
||||
chatId,
|
||||
resourceType: r.type,
|
||||
resourceId: r.id,
|
||||
})
|
||||
} catch {
|
||||
// Individual failure — the mutation's onError already rolled back
|
||||
// this resource in cache. Remaining removals continue.
|
||||
}
|
||||
}
|
||||
})()
|
||||
for (const r of targets) {
|
||||
if (isEphemeralResource(r)) continue
|
||||
removeResource.mutate({ chatId, resourceType: r.type, resourceId: r.id })
|
||||
}
|
||||
},
|
||||
// eslint-disable-next-line react-hooks/exhaustive-deps
|
||||
|
||||
@@ -1,10 +1,13 @@
|
||||
'use client'
|
||||
|
||||
import { useState } from 'react'
|
||||
import { ArrowUp, ChevronDown, ChevronRight, Pencil, Trash2 } from 'lucide-react'
|
||||
import { useCallback, useRef, useState } from 'react'
|
||||
import { ArrowUp, ChevronDown, ChevronRight, Paperclip, Pencil, Trash2 } from 'lucide-react'
|
||||
import { Tooltip } from '@/components/emcn'
|
||||
import { UserMessageContent } from '@/app/workspace/[workspaceId]/home/components/user-message-content'
|
||||
import type { QueuedMessage } from '@/app/workspace/[workspaceId]/home/types'
|
||||
|
||||
const NARROW_WIDTH_PX = 320
|
||||
|
||||
interface QueuedMessagesProps {
|
||||
messageQueue: QueuedMessage[]
|
||||
onRemove: (id: string) => void
|
||||
@@ -14,11 +17,29 @@ interface QueuedMessagesProps {
|
||||
|
||||
export function QueuedMessages({ messageQueue, onRemove, onSendNow, onEdit }: QueuedMessagesProps) {
|
||||
const [isExpanded, setIsExpanded] = useState(true)
|
||||
const [isNarrow, setIsNarrow] = useState(false)
|
||||
const roRef = useRef<ResizeObserver | null>(null)
|
||||
|
||||
const containerRef = useCallback((el: HTMLDivElement | null) => {
|
||||
if (roRef.current) {
|
||||
roRef.current.disconnect()
|
||||
roRef.current = null
|
||||
}
|
||||
if (!el) return
|
||||
const ro = new ResizeObserver((entries) => {
|
||||
setIsNarrow(entries[0].contentRect.width < NARROW_WIDTH_PX)
|
||||
})
|
||||
ro.observe(el)
|
||||
roRef.current = ro
|
||||
}, [])
|
||||
|
||||
if (messageQueue.length === 0) return null
|
||||
|
||||
return (
|
||||
<div className='-mb-3 mx-3.5 overflow-hidden rounded-t-[16px] border border-[var(--border-1)] border-b-0 bg-[var(--surface-3)] pb-3'>
|
||||
<div
|
||||
ref={containerRef}
|
||||
className='-mb-3 mx-3.5 overflow-hidden rounded-t-[16px] border border-[var(--border-1)] border-b-0 bg-[var(--surface-3)] pb-3'
|
||||
>
|
||||
<button
|
||||
type='button'
|
||||
onClick={() => setIsExpanded(!isExpanded)}
|
||||
@@ -39,16 +60,41 @@ export function QueuedMessages({ messageQueue, onRemove, onSendNow, onEdit }: Qu
|
||||
{messageQueue.map((msg) => (
|
||||
<div
|
||||
key={msg.id}
|
||||
className='flex items-center gap-2 px-3.5 py-1.5 transition-colors hover-hover:bg-[var(--surface-active)]'
|
||||
className='flex items-center gap-2 py-1.5 pr-2 pl-3.5 transition-colors hover-hover:bg-[var(--surface-active)]'
|
||||
>
|
||||
<div className='flex h-[16px] w-[16px] shrink-0 items-center justify-center'>
|
||||
<div className='h-[10px] w-[10px] rounded-full border-[1.5px] border-[color-mix(in_srgb,var(--text-tertiary)_40%,transparent)]' />
|
||||
</div>
|
||||
|
||||
<div className='min-w-0 flex-1'>
|
||||
<p className='truncate text-[var(--text-primary)] text-small'>{msg.content}</p>
|
||||
<div className='min-w-0 flex-1 overflow-hidden'>
|
||||
<UserMessageContent
|
||||
content={msg.content}
|
||||
contexts={msg.contexts}
|
||||
plainMentions
|
||||
compact
|
||||
/>
|
||||
</div>
|
||||
|
||||
{msg.fileAttachments && msg.fileAttachments.length > 0 && (
|
||||
<span className='inline-flex min-w-0 max-w-[40%] shrink items-center gap-1 rounded-[5px] bg-[var(--surface-5)] px-[5px] py-0.5 text-[var(--text-primary)] text-small'>
|
||||
<Paperclip className='h-[12px] w-[12px] shrink-0 text-[var(--text-icon)]' />
|
||||
{isNarrow ? (
|
||||
<span className='shrink-0 text-[var(--text-secondary)]'>
|
||||
{msg.fileAttachments.length}
|
||||
</span>
|
||||
) : (
|
||||
<>
|
||||
<span className='truncate'>{msg.fileAttachments[0].filename}</span>
|
||||
{msg.fileAttachments.length > 1 && (
|
||||
<span className='shrink-0 text-[var(--text-secondary)]'>
|
||||
+{msg.fileAttachments.length - 1}
|
||||
</span>
|
||||
)}
|
||||
</>
|
||||
)}
|
||||
</span>
|
||||
)}
|
||||
|
||||
<div className='flex shrink-0 items-center gap-0.5'>
|
||||
<Tooltip.Root>
|
||||
<Tooltip.Trigger asChild>
|
||||
|
||||
@@ -1 +1 @@
|
||||
export { UserInput } from './user-input'
|
||||
export { UserInput, type UserInputHandle } from './user-input'
|
||||
|
||||
@@ -1,7 +1,16 @@
|
||||
'use client'
|
||||
|
||||
import type React from 'react'
|
||||
import { useCallback, useEffect, useLayoutEffect, useMemo, useRef, useState } from 'react'
|
||||
import {
|
||||
forwardRef,
|
||||
useCallback,
|
||||
useEffect,
|
||||
useImperativeHandle,
|
||||
useLayoutEffect,
|
||||
useMemo,
|
||||
useRef,
|
||||
useState,
|
||||
} from 'react'
|
||||
import { useParams } from 'next/navigation'
|
||||
import { useSession } from '@/lib/auth/auth-client'
|
||||
import { SIM_RESOURCE_DRAG_TYPE, SIM_RESOURCES_DRAG_TYPE } from '@/lib/copilot/resource-types'
|
||||
@@ -26,6 +35,7 @@ import {
|
||||
import type {
|
||||
FileAttachmentForApi,
|
||||
MothershipResource,
|
||||
QueuedMessage,
|
||||
} from '@/app/workspace/[workspaceId]/home/types'
|
||||
import {
|
||||
useContextManagement,
|
||||
@@ -91,8 +101,6 @@ function getCaretAnchor(
|
||||
|
||||
interface UserInputProps {
|
||||
defaultValue?: string
|
||||
editValue?: string
|
||||
onEditValueConsumed?: () => void
|
||||
onSubmit: (
|
||||
text: string,
|
||||
fileAttachments?: FileAttachmentForApi[],
|
||||
@@ -105,21 +113,28 @@ interface UserInputProps {
|
||||
onContextAdd?: (context: ChatContext) => void
|
||||
onContextRemove?: (context: ChatContext) => void
|
||||
onSendQueuedHead?: () => void
|
||||
onEditQueuedTail?: () => void
|
||||
}
|
||||
|
||||
export function UserInput({
|
||||
defaultValue = '',
|
||||
editValue,
|
||||
onEditValueConsumed,
|
||||
onSubmit,
|
||||
isSending,
|
||||
onStopGeneration,
|
||||
isInitialView = true,
|
||||
userId,
|
||||
onContextAdd,
|
||||
onContextRemove,
|
||||
onSendQueuedHead,
|
||||
}: UserInputProps) {
|
||||
export interface UserInputHandle {
|
||||
loadQueuedMessage: (msg: QueuedMessage) => void
|
||||
}
|
||||
|
||||
export const UserInput = forwardRef<UserInputHandle, UserInputProps>(function UserInput(
|
||||
{
|
||||
defaultValue = '',
|
||||
onSubmit,
|
||||
isSending,
|
||||
onStopGeneration,
|
||||
isInitialView = true,
|
||||
userId,
|
||||
onContextAdd,
|
||||
onContextRemove,
|
||||
onSendQueuedHead,
|
||||
onEditQueuedTail,
|
||||
},
|
||||
ref
|
||||
) {
|
||||
const { workspaceId } = useParams<{ workspaceId: string }>()
|
||||
const { navigateToSettings } = useSettingsNavigation()
|
||||
const { data: workflowsById = {} } = useWorkflowMap(workspaceId)
|
||||
@@ -136,18 +151,6 @@ export function UserInput({
|
||||
setPrevDefaultValue(defaultValue)
|
||||
}
|
||||
|
||||
const [prevEditValue, setPrevEditValue] = useState(editValue)
|
||||
if (editValue && editValue !== prevEditValue) {
|
||||
setPrevEditValue(editValue)
|
||||
setValue(editValue)
|
||||
} else if (!editValue && prevEditValue) {
|
||||
setPrevEditValue(editValue)
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
if (editValue) onEditValueConsumed?.()
|
||||
}, [editValue, onEditValueConsumed])
|
||||
|
||||
const files = useFileAttachments({
|
||||
userId: userId || session?.user?.id,
|
||||
workspaceId,
|
||||
@@ -269,6 +272,8 @@ export function UserInput({
|
||||
contextRef.current = contextManagement
|
||||
const onSendQueuedHeadRef = useRef(onSendQueuedHead)
|
||||
onSendQueuedHeadRef.current = onSendQueuedHead
|
||||
const onEditQueuedTailRef = useRef(onEditQueuedTail)
|
||||
onEditQueuedTailRef.current = onEditQueuedTail
|
||||
const isSendingRef = useRef(isSending)
|
||||
isSendingRef.current = isSending
|
||||
|
||||
@@ -277,6 +282,34 @@ export function UserInput({
|
||||
const atInsertPosRef = useRef<number | null>(null)
|
||||
const pendingCursorRef = useRef<number | null>(null)
|
||||
|
||||
useImperativeHandle(
|
||||
ref,
|
||||
() => ({
|
||||
loadQueuedMessage: (msg: QueuedMessage) => {
|
||||
setValue(msg.content)
|
||||
const restored: AttachedFile[] = (msg.fileAttachments ?? []).map((a) => ({
|
||||
id: a.id,
|
||||
name: a.filename,
|
||||
size: a.size,
|
||||
type: a.media_type,
|
||||
path: a.path ?? '',
|
||||
key: a.key,
|
||||
uploading: false,
|
||||
}))
|
||||
files.restoreAttachedFiles(restored)
|
||||
contextManagement.setSelectedContexts(msg.contexts ?? [])
|
||||
requestAnimationFrame(() => {
|
||||
const textarea = textareaRef.current
|
||||
if (!textarea) return
|
||||
textarea.focus()
|
||||
const end = textarea.value.length
|
||||
textarea.setSelectionRange(end, end)
|
||||
})
|
||||
},
|
||||
}),
|
||||
[files.restoreAttachedFiles, contextManagement.setSelectedContexts, textareaRef]
|
||||
)
|
||||
|
||||
useLayoutEffect(() => {
|
||||
const textarea = textareaRef.current
|
||||
if (!textarea) return
|
||||
@@ -430,6 +463,7 @@ export function UserInput({
|
||||
filename: f.name,
|
||||
media_type: f.type,
|
||||
size: f.size,
|
||||
...(f.path ? { path: f.path } : {}),
|
||||
}))
|
||||
|
||||
onSubmit(
|
||||
@@ -452,6 +486,15 @@ export function UserInput({
|
||||
|
||||
const handleKeyDown = useCallback(
|
||||
(e: React.KeyboardEvent<HTMLTextAreaElement>) => {
|
||||
if (e.key === 'ArrowUp' && !e.shiftKey && !e.metaKey && !e.ctrlKey && !e.altKey) {
|
||||
const isEmpty = valueRef.current.length === 0 && filesRef.current.attachedFiles.length === 0
|
||||
if (isEmpty && onEditQueuedTailRef.current) {
|
||||
e.preventDefault()
|
||||
onEditQueuedTailRef.current()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
if (e.key === 'Enter' && !e.shiftKey && !e.nativeEvent.isComposing) {
|
||||
e.preventDefault()
|
||||
const hasSubmitPayload =
|
||||
@@ -763,4 +806,4 @@ export function UserInput({
|
||||
{files.isDragging && <DropOverlay />}
|
||||
</div>
|
||||
)
|
||||
}
|
||||
})
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user