mirror of
https://github.com/simstudioai/sim.git
synced 2026-03-15 03:00:33 -04:00
Compare commits
18 Commits
feat/short
...
v0.5.103
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e07e3c34cc | ||
|
|
79bb4e5ad8 | ||
|
|
ee20e119de | ||
|
|
3788660366 | ||
|
|
0d2e6ff31d | ||
|
|
9be75e3633 | ||
|
|
40bab7731a | ||
|
|
4fd0989264 | ||
|
|
67f8a687f6 | ||
|
|
af592349d3 | ||
|
|
0d86ea01f0 | ||
|
|
115f04e989 | ||
|
|
34d92fae89 | ||
|
|
67aa4bb332 | ||
|
|
15ace5e63f | ||
|
|
fdca73679d | ||
|
|
da46a387c9 | ||
|
|
b7e377ec4b |
@@ -1,5 +1,8 @@
|
||||
import type React from 'react'
|
||||
import type { Root } from 'fumadocs-core/page-tree'
|
||||
import { findNeighbour } from 'fumadocs-core/page-tree'
|
||||
import type { ApiPageProps } from 'fumadocs-openapi/ui'
|
||||
import { createAPIPage } from 'fumadocs-openapi/ui'
|
||||
import { Pre } from 'fumadocs-ui/components/codeblock'
|
||||
import defaultMdxComponents from 'fumadocs-ui/mdx'
|
||||
import { DocsBody, DocsDescription, DocsPage, DocsTitle } from 'fumadocs-ui/page'
|
||||
@@ -12,28 +15,75 @@ import { LLMCopyButton } from '@/components/page-actions'
|
||||
import { StructuredData } from '@/components/structured-data'
|
||||
import { CodeBlock } from '@/components/ui/code-block'
|
||||
import { Heading } from '@/components/ui/heading'
|
||||
import { ResponseSection } from '@/components/ui/response-section'
|
||||
import { i18n } from '@/lib/i18n'
|
||||
import { getApiSpecContent, openapi } from '@/lib/openapi'
|
||||
import { type PageData, source } from '@/lib/source'
|
||||
|
||||
const SUPPORTED_LANGUAGES: Set<string> = new Set(i18n.languages)
|
||||
const BASE_URL = 'https://docs.sim.ai'
|
||||
|
||||
function resolveLangAndSlug(params: { slug?: string[]; lang: string }) {
|
||||
const isValidLang = SUPPORTED_LANGUAGES.has(params.lang)
|
||||
const lang = isValidLang ? params.lang : 'en'
|
||||
const slug = isValidLang ? params.slug : [params.lang, ...(params.slug ?? [])]
|
||||
return { lang, slug }
|
||||
}
|
||||
|
||||
const APIPage = createAPIPage(openapi, {
|
||||
playground: { enabled: false },
|
||||
content: {
|
||||
renderOperationLayout: async (slots) => {
|
||||
return (
|
||||
<div className='flex @4xl:flex-row flex-col @4xl:items-start gap-x-6 gap-y-4'>
|
||||
<div className='min-w-0 flex-1'>
|
||||
{slots.header}
|
||||
{slots.apiPlayground}
|
||||
{slots.authSchemes && <div className='api-section-divider'>{slots.authSchemes}</div>}
|
||||
{slots.paremeters}
|
||||
{slots.body && <div className='api-section-divider'>{slots.body}</div>}
|
||||
<ResponseSection>{slots.responses}</ResponseSection>
|
||||
{slots.callbacks}
|
||||
</div>
|
||||
<div className='@4xl:sticky @4xl:top-[calc(var(--fd-docs-row-1,2rem)+1rem)] @4xl:w-[400px]'>
|
||||
{slots.apiExample}
|
||||
</div>
|
||||
</div>
|
||||
)
|
||||
},
|
||||
},
|
||||
})
|
||||
|
||||
export default async function Page(props: { params: Promise<{ slug?: string[]; lang: string }> }) {
|
||||
const params = await props.params
|
||||
const page = source.getPage(params.slug, params.lang)
|
||||
const { lang, slug } = resolveLangAndSlug(params)
|
||||
const page = source.getPage(slug, lang)
|
||||
if (!page) notFound()
|
||||
|
||||
const data = page.data as PageData
|
||||
const MDX = data.body
|
||||
const baseUrl = 'https://docs.sim.ai'
|
||||
const markdownContent = await data.getText('processed')
|
||||
const data = page.data as unknown as PageData & {
|
||||
_openapi?: { method?: string }
|
||||
getAPIPageProps?: () => ApiPageProps
|
||||
}
|
||||
const isOpenAPI = '_openapi' in data && data._openapi != null
|
||||
const isApiReference = slug?.some((s) => s === 'api-reference') ?? false
|
||||
|
||||
const pageTreeRecord = source.pageTree as Record<string, any>
|
||||
const pageTree =
|
||||
pageTreeRecord[params.lang] ?? pageTreeRecord.en ?? Object.values(pageTreeRecord)[0]
|
||||
const neighbours = pageTree ? findNeighbour(pageTree, page.url) : null
|
||||
const pageTreeRecord = source.pageTree as Record<string, Root>
|
||||
const pageTree = pageTreeRecord[lang] ?? pageTreeRecord.en ?? Object.values(pageTreeRecord)[0]
|
||||
const rawNeighbours = pageTree ? findNeighbour(pageTree, page.url) : null
|
||||
const neighbours = isApiReference
|
||||
? {
|
||||
previous: rawNeighbours?.previous?.url.includes('/api-reference/')
|
||||
? rawNeighbours.previous
|
||||
: undefined,
|
||||
next: rawNeighbours?.next?.url.includes('/api-reference/') ? rawNeighbours.next : undefined,
|
||||
}
|
||||
: rawNeighbours
|
||||
|
||||
const generateBreadcrumbs = () => {
|
||||
const breadcrumbs: Array<{ name: string; url: string }> = [
|
||||
{
|
||||
name: 'Home',
|
||||
url: baseUrl,
|
||||
url: BASE_URL,
|
||||
},
|
||||
]
|
||||
|
||||
@@ -41,7 +91,7 @@ export default async function Page(props: { params: Promise<{ slug?: string[]; l
|
||||
let currentPath = ''
|
||||
|
||||
urlParts.forEach((part, index) => {
|
||||
if (index === 0 && ['en', 'es', 'fr', 'de', 'ja', 'zh'].includes(part)) {
|
||||
if (index === 0 && SUPPORTED_LANGUAGES.has(part)) {
|
||||
currentPath = `/${part}`
|
||||
return
|
||||
}
|
||||
@@ -56,12 +106,12 @@ export default async function Page(props: { params: Promise<{ slug?: string[]; l
|
||||
if (index === urlParts.length - 1) {
|
||||
breadcrumbs.push({
|
||||
name: data.title,
|
||||
url: `${baseUrl}${page.url}`,
|
||||
url: `${BASE_URL}${page.url}`,
|
||||
})
|
||||
} else {
|
||||
breadcrumbs.push({
|
||||
name: name,
|
||||
url: `${baseUrl}${currentPath}`,
|
||||
url: `${BASE_URL}${currentPath}`,
|
||||
})
|
||||
}
|
||||
})
|
||||
@@ -73,7 +123,6 @@ export default async function Page(props: { params: Promise<{ slug?: string[]; l
|
||||
|
||||
const CustomFooter = () => (
|
||||
<div className='mt-12'>
|
||||
{/* Navigation links */}
|
||||
<div className='flex items-center justify-between py-8'>
|
||||
{neighbours?.previous ? (
|
||||
<Link
|
||||
@@ -100,10 +149,8 @@ export default async function Page(props: { params: Promise<{ slug?: string[]; l
|
||||
)}
|
||||
</div>
|
||||
|
||||
{/* Divider line */}
|
||||
<div className='border-border border-t' />
|
||||
|
||||
{/* Social icons */}
|
||||
<div className='flex items-center gap-4 py-6'>
|
||||
<Link
|
||||
href='https://x.com/simdotai'
|
||||
@@ -169,13 +216,69 @@ export default async function Page(props: { params: Promise<{ slug?: string[]; l
|
||||
</div>
|
||||
)
|
||||
|
||||
if (isOpenAPI && data.getAPIPageProps) {
|
||||
const apiProps = data.getAPIPageProps()
|
||||
const apiPageContent = getApiSpecContent(
|
||||
data.title,
|
||||
data.description,
|
||||
apiProps.operations ?? []
|
||||
)
|
||||
|
||||
return (
|
||||
<>
|
||||
<StructuredData
|
||||
title={data.title}
|
||||
description={data.description || ''}
|
||||
url={`${BASE_URL}${page.url}`}
|
||||
lang={lang}
|
||||
breadcrumb={breadcrumbs}
|
||||
/>
|
||||
<DocsPage
|
||||
toc={data.toc}
|
||||
breadcrumb={{
|
||||
enabled: false,
|
||||
}}
|
||||
tableOfContent={{
|
||||
style: 'clerk',
|
||||
enabled: false,
|
||||
}}
|
||||
tableOfContentPopover={{
|
||||
style: 'clerk',
|
||||
enabled: false,
|
||||
}}
|
||||
footer={{
|
||||
enabled: true,
|
||||
component: <CustomFooter />,
|
||||
}}
|
||||
>
|
||||
<div className='api-page-header relative mt-6 sm:mt-0'>
|
||||
<div className='absolute top-1 right-0 flex items-center gap-2'>
|
||||
<div className='hidden sm:flex'>
|
||||
<LLMCopyButton content={apiPageContent} />
|
||||
</div>
|
||||
<PageNavigationArrows previous={neighbours?.previous} next={neighbours?.next} />
|
||||
</div>
|
||||
<DocsTitle>{data.title}</DocsTitle>
|
||||
<DocsDescription>{data.description}</DocsDescription>
|
||||
</div>
|
||||
<DocsBody>
|
||||
<APIPage {...apiProps} />
|
||||
</DocsBody>
|
||||
</DocsPage>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
const MDX = data.body
|
||||
const markdownContent = await data.getText('processed')
|
||||
|
||||
return (
|
||||
<>
|
||||
<StructuredData
|
||||
title={data.title}
|
||||
description={data.description || ''}
|
||||
url={`${baseUrl}${page.url}`}
|
||||
lang={params.lang}
|
||||
url={`${BASE_URL}${page.url}`}
|
||||
lang={lang}
|
||||
breadcrumb={breadcrumbs}
|
||||
/>
|
||||
<DocsPage
|
||||
@@ -252,14 +355,14 @@ export async function generateMetadata(props: {
|
||||
params: Promise<{ slug?: string[]; lang: string }>
|
||||
}) {
|
||||
const params = await props.params
|
||||
const page = source.getPage(params.slug, params.lang)
|
||||
const { lang, slug } = resolveLangAndSlug(params)
|
||||
const page = source.getPage(slug, lang)
|
||||
if (!page) notFound()
|
||||
|
||||
const data = page.data as PageData
|
||||
const baseUrl = 'https://docs.sim.ai'
|
||||
const fullUrl = `${baseUrl}${page.url}`
|
||||
const data = page.data as unknown as PageData
|
||||
const fullUrl = `${BASE_URL}${page.url}`
|
||||
|
||||
const ogImageUrl = `${baseUrl}/api/og?title=${encodeURIComponent(data.title)}`
|
||||
const ogImageUrl = `${BASE_URL}/api/og?title=${encodeURIComponent(data.title)}`
|
||||
|
||||
return {
|
||||
title: data.title,
|
||||
@@ -286,10 +389,10 @@ export async function generateMetadata(props: {
|
||||
url: fullUrl,
|
||||
siteName: 'Sim Documentation',
|
||||
type: 'article',
|
||||
locale: params.lang === 'en' ? 'en_US' : `${params.lang}_${params.lang.toUpperCase()}`,
|
||||
locale: lang === 'en' ? 'en_US' : `${lang}_${lang.toUpperCase()}`,
|
||||
alternateLocale: ['en', 'es', 'fr', 'de', 'ja', 'zh']
|
||||
.filter((lang) => lang !== params.lang)
|
||||
.map((lang) => (lang === 'en' ? 'en_US' : `${lang}_${lang.toUpperCase()}`)),
|
||||
.filter((l) => l !== lang)
|
||||
.map((l) => (l === 'en' ? 'en_US' : `${l}_${l.toUpperCase()}`)),
|
||||
images: [
|
||||
{
|
||||
url: ogImageUrl,
|
||||
@@ -323,13 +426,13 @@ export async function generateMetadata(props: {
|
||||
alternates: {
|
||||
canonical: fullUrl,
|
||||
languages: {
|
||||
'x-default': `${baseUrl}${page.url.replace(`/${params.lang}`, '')}`,
|
||||
en: `${baseUrl}${page.url.replace(`/${params.lang}`, '')}`,
|
||||
es: `${baseUrl}/es${page.url.replace(`/${params.lang}`, '')}`,
|
||||
fr: `${baseUrl}/fr${page.url.replace(`/${params.lang}`, '')}`,
|
||||
de: `${baseUrl}/de${page.url.replace(`/${params.lang}`, '')}`,
|
||||
ja: `${baseUrl}/ja${page.url.replace(`/${params.lang}`, '')}`,
|
||||
zh: `${baseUrl}/zh${page.url.replace(`/${params.lang}`, '')}`,
|
||||
'x-default': `${BASE_URL}${page.url.replace(`/${lang}`, '')}`,
|
||||
en: `${BASE_URL}${page.url.replace(`/${lang}`, '')}`,
|
||||
es: `${BASE_URL}/es${page.url.replace(`/${lang}`, '')}`,
|
||||
fr: `${BASE_URL}/fr${page.url.replace(`/${lang}`, '')}`,
|
||||
de: `${BASE_URL}/de${page.url.replace(`/${lang}`, '')}`,
|
||||
ja: `${BASE_URL}/ja${page.url.replace(`/${lang}`, '')}`,
|
||||
zh: `${BASE_URL}/zh${page.url.replace(`/${lang}`, '')}`,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
@@ -55,8 +55,11 @@ type LayoutProps = {
|
||||
params: Promise<{ lang: string }>
|
||||
}
|
||||
|
||||
const SUPPORTED_LANGUAGES: Set<string> = new Set(i18n.languages)
|
||||
|
||||
export default async function Layout({ children, params }: LayoutProps) {
|
||||
const { lang } = await params
|
||||
const { lang: rawLang } = await params
|
||||
const lang = SUPPORTED_LANGUAGES.has(rawLang) ? rawLang : 'en'
|
||||
|
||||
const structuredData = {
|
||||
'@context': 'https://schema.org',
|
||||
@@ -107,6 +110,7 @@ export default async function Layout({ children, params }: LayoutProps) {
|
||||
title: <SimLogoFull className='h-7 w-auto' />,
|
||||
}}
|
||||
sidebar={{
|
||||
tabs: false,
|
||||
defaultOpenLevel: 0,
|
||||
collapsible: false,
|
||||
footer: null,
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
@import "tailwindcss";
|
||||
@import "fumadocs-ui/css/neutral.css";
|
||||
@import "fumadocs-ui/css/preset.css";
|
||||
@import "fumadocs-openapi/css/preset.css";
|
||||
|
||||
/* Prevent overscroll bounce effect on the page */
|
||||
html,
|
||||
@@ -8,18 +9,12 @@ body {
|
||||
overscroll-behavior: none;
|
||||
}
|
||||
|
||||
/* Reserve scrollbar space to prevent layout jitter between pages */
|
||||
html {
|
||||
scrollbar-gutter: stable;
|
||||
}
|
||||
|
||||
@theme {
|
||||
--color-fd-primary: #33c482; /* Green from Sim logo */
|
||||
--font-geist-sans: var(--font-geist-sans);
|
||||
--font-geist-mono: var(--font-geist-mono);
|
||||
}
|
||||
|
||||
/* Ensure primary color is set in both light and dark modes */
|
||||
:root {
|
||||
--color-fd-primary: #33c482;
|
||||
}
|
||||
|
||||
.dark {
|
||||
--color-fd-primary: #33c482;
|
||||
}
|
||||
|
||||
@@ -34,12 +29,6 @@ body {
|
||||
"Liberation Mono", "Courier New", monospace;
|
||||
}
|
||||
|
||||
/* Target any potential border classes */
|
||||
* {
|
||||
--fd-border-sidebar: transparent !important;
|
||||
}
|
||||
|
||||
/* Override any CSS custom properties for borders */
|
||||
:root {
|
||||
--fd-border: transparent !important;
|
||||
--fd-border-sidebar: transparent !important;
|
||||
@@ -86,7 +75,6 @@ body {
|
||||
[data-sidebar-container],
|
||||
#nd-sidebar {
|
||||
background: transparent !important;
|
||||
background-color: transparent !important;
|
||||
border: none !important;
|
||||
--color-fd-muted: transparent !important;
|
||||
--color-fd-card: transparent !important;
|
||||
@@ -96,9 +84,7 @@ body {
|
||||
aside[data-sidebar],
|
||||
aside#nd-sidebar {
|
||||
background: transparent !important;
|
||||
background-color: transparent !important;
|
||||
border: none !important;
|
||||
border-right: none !important;
|
||||
}
|
||||
|
||||
/* Fumadocs v16: Add sidebar placeholder styling for grid area */
|
||||
@@ -157,7 +143,6 @@ aside#nd-sidebar {
|
||||
#nd-sidebar > div {
|
||||
padding: 0.5rem 12px 12px;
|
||||
background: transparent !important;
|
||||
background-color: transparent !important;
|
||||
}
|
||||
|
||||
/* Override sidebar item styling to match Raindrop */
|
||||
@@ -434,10 +419,6 @@ aside[data-sidebar],
|
||||
#nd-sidebar,
|
||||
#nd-sidebar * {
|
||||
border: none !important;
|
||||
border-right: none !important;
|
||||
border-left: none !important;
|
||||
border-top: none !important;
|
||||
border-bottom: none !important;
|
||||
}
|
||||
|
||||
/* Override fumadocs background colors for sidebar */
|
||||
@@ -447,7 +428,6 @@ aside[data-sidebar],
|
||||
--color-fd-muted: transparent !important;
|
||||
--color-fd-secondary: transparent !important;
|
||||
background: transparent !important;
|
||||
background-color: transparent !important;
|
||||
}
|
||||
|
||||
/* Force normal text flow in sidebar */
|
||||
@@ -564,16 +544,682 @@ main[data-main] {
|
||||
padding-top: 1.5rem !important;
|
||||
}
|
||||
|
||||
/* Override Fumadocs default content padding */
|
||||
article[data-content],
|
||||
div[data-content] {
|
||||
padding-top: 1.5rem !important;
|
||||
}
|
||||
|
||||
/* Remove any unwanted borders/outlines from video elements */
|
||||
/* Remove any unwanted outlines from video elements */
|
||||
video {
|
||||
outline: none !important;
|
||||
border-style: solid !important;
|
||||
}
|
||||
|
||||
/* API Reference Pages — Mintlify-style overrides */
|
||||
|
||||
/* OpenAPI pages: span main + TOC grid columns for wide two-column layout.
|
||||
The grid has columns: spacer | sidebar | main | toc | spacer.
|
||||
By spanning columns 3-4, the article fills both main and toc areas,
|
||||
while the grid structure stays identical to non-OpenAPI pages (no jitter). */
|
||||
#nd-page:has(.api-page-header) {
|
||||
grid-column: 3 / span 2 !important;
|
||||
max-width: 1400px !important;
|
||||
}
|
||||
|
||||
/* Hide the empty TOC aside on OpenAPI pages so it doesn't overlay content */
|
||||
#nd-docs-layout:has(#nd-page .api-page-header) #nd-toc {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* Hide the default "Response Body" heading rendered by fumadocs-openapi */
|
||||
.response-section-wrapper > .response-section-content > h2,
|
||||
.response-section-wrapper > .response-section-content > h3 {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* Hide default accordion triggers (status code rows) — we show our own dropdown */
|
||||
.response-section-wrapper [data-orientation="vertical"] > [data-state] > h3 {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* Ensure API reference pages use the same font as the rest of the docs */
|
||||
#nd-page:has(.api-page-header),
|
||||
#nd-page:has(.api-page-header) h2,
|
||||
#nd-page:has(.api-page-header) h3,
|
||||
#nd-page:has(.api-page-header) h4,
|
||||
#nd-page:has(.api-page-header) p,
|
||||
#nd-page:has(.api-page-header) span,
|
||||
#nd-page:has(.api-page-header) div,
|
||||
#nd-page:has(.api-page-header) label,
|
||||
#nd-page:has(.api-page-header) button {
|
||||
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, -apple-system, BlinkMacSystemFont,
|
||||
"Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif;
|
||||
}
|
||||
|
||||
/* Method badge pills in page content — colored background pills */
|
||||
#nd-page span.font-mono.font-medium[class*="text-green"] {
|
||||
background-color: rgb(220 252 231 / 0.6);
|
||||
padding: 0.125rem 0.5rem;
|
||||
border-radius: 0.375rem;
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
html.dark #nd-page span.font-mono.font-medium[class*="text-green"] {
|
||||
background-color: rgb(34 197 94 / 0.15);
|
||||
}
|
||||
|
||||
#nd-page span.font-mono.font-medium[class*="text-blue"] {
|
||||
background-color: rgb(219 234 254 / 0.6);
|
||||
padding: 0.125rem 0.5rem;
|
||||
border-radius: 0.375rem;
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
html.dark #nd-page span.font-mono.font-medium[class*="text-blue"] {
|
||||
background-color: rgb(59 130 246 / 0.15);
|
||||
}
|
||||
|
||||
#nd-page span.font-mono.font-medium[class*="text-orange"] {
|
||||
background-color: rgb(255 237 213 / 0.6);
|
||||
padding: 0.125rem 0.5rem;
|
||||
border-radius: 0.375rem;
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
html.dark #nd-page span.font-mono.font-medium[class*="text-orange"] {
|
||||
background-color: rgb(249 115 22 / 0.15);
|
||||
}
|
||||
|
||||
#nd-page span.font-mono.font-medium[class*="text-red"] {
|
||||
background-color: rgb(254 226 226 / 0.6);
|
||||
padding: 0.125rem 0.5rem;
|
||||
border-radius: 0.375rem;
|
||||
font-size: 0.75rem;
|
||||
}
|
||||
html.dark #nd-page span.font-mono.font-medium[class*="text-red"] {
|
||||
background-color: rgb(239 68 68 / 0.15);
|
||||
}
|
||||
|
||||
/* Sidebar links with method badges — flex for vertical centering */
|
||||
#nd-sidebar a:has(span.font-mono.font-medium) {
|
||||
display: flex !important;
|
||||
align-items: center !important;
|
||||
gap: 6px;
|
||||
}
|
||||
|
||||
/* Sidebar method badges — ensure proper inline flex display */
|
||||
#nd-sidebar a span.font-mono.font-medium {
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
justify-content: center;
|
||||
min-width: 2.25rem;
|
||||
font-size: 10px !important;
|
||||
line-height: 1 !important;
|
||||
padding: 2.5px 4px;
|
||||
border-radius: 3px;
|
||||
flex-shrink: 0;
|
||||
}
|
||||
|
||||
/* Sidebar GET badges */
|
||||
#nd-sidebar a span.font-mono.font-medium[class*="text-green"] {
|
||||
background-color: rgb(220 252 231 / 0.6);
|
||||
}
|
||||
html.dark #nd-sidebar a span.font-mono.font-medium[class*="text-green"] {
|
||||
background-color: rgb(34 197 94 / 0.15);
|
||||
}
|
||||
|
||||
/* Sidebar POST badges */
|
||||
#nd-sidebar a span.font-mono.font-medium[class*="text-blue"] {
|
||||
background-color: rgb(219 234 254 / 0.6);
|
||||
}
|
||||
html.dark #nd-sidebar a span.font-mono.font-medium[class*="text-blue"] {
|
||||
background-color: rgb(59 130 246 / 0.15);
|
||||
}
|
||||
|
||||
/* Sidebar PUT badges */
|
||||
#nd-sidebar a span.font-mono.font-medium[class*="text-orange"] {
|
||||
background-color: rgb(255 237 213 / 0.6);
|
||||
}
|
||||
html.dark #nd-sidebar a span.font-mono.font-medium[class*="text-orange"] {
|
||||
background-color: rgb(249 115 22 / 0.15);
|
||||
}
|
||||
|
||||
/* Sidebar DELETE badges */
|
||||
#nd-sidebar a span.font-mono.font-medium[class*="text-red"] {
|
||||
background-color: rgb(254 226 226 / 0.6);
|
||||
}
|
||||
html.dark #nd-sidebar a span.font-mono.font-medium[class*="text-red"] {
|
||||
background-color: rgb(239 68 68 / 0.15);
|
||||
}
|
||||
|
||||
/* Code block containers — match regular docs styling */
|
||||
#nd-page:has(.api-page-header) figure.shiki {
|
||||
border-radius: 0.75rem !important;
|
||||
background-color: var(--color-fd-card) !important;
|
||||
}
|
||||
|
||||
/* Hide "Filter Properties" search bar everywhere — main page and popovers */
|
||||
input[placeholder="Filter Properties"] {
|
||||
display: none !important;
|
||||
}
|
||||
div:has(> input[placeholder="Filter Properties"]) {
|
||||
display: none !important;
|
||||
}
|
||||
/* Remove top border on first visible property after hidden Filter Properties */
|
||||
div:has(> input[placeholder="Filter Properties"]) + .text-sm.border-t {
|
||||
border-top: none !important;
|
||||
}
|
||||
|
||||
/* Hide "TypeScript Definitions" copy panel on API pages */
|
||||
#nd-page:has(.api-page-header) div.not-prose.rounded-xl.border.p-3.mb-4 {
|
||||
display: none !important;
|
||||
}
|
||||
#nd-page:has(.api-page-header) div.not-prose.rounded-xl.border.p-3:has(> div > p.font-medium) {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* Hide info tags (Format, Default, etc.) everywhere — main page and popovers */
|
||||
div.flex.flex-row.gap-2.flex-wrap.not-prose:has(> div.bg-fd-secondary) {
|
||||
display: none !important;
|
||||
}
|
||||
div.flex.flex-row.items-start.bg-fd-secondary.border.rounded-lg.text-xs {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* Method+path bar — cleaner, lighter styling like Gumloop.
|
||||
Override bg-fd-card CSS variable directly for reliability. */
|
||||
#nd-page:has(.api-page-header) div.flex.flex-row.items-center.rounded-xl.border.not-prose {
|
||||
--color-fd-card: rgb(249 250 251) !important;
|
||||
background-color: rgb(249 250 251) !important;
|
||||
border-color: rgb(229 231 235) !important;
|
||||
}
|
||||
html.dark
|
||||
#nd-page:has(.api-page-header)
|
||||
div.flex.flex-row.items-center.rounded-xl.border.not-prose {
|
||||
--color-fd-card: rgb(24 24 27) !important;
|
||||
background-color: rgb(24 24 27) !important;
|
||||
border-color: rgb(63 63 70) !important;
|
||||
}
|
||||
/* Method badge inside path bar — cleaner sans-serif, softer colors */
|
||||
#nd-page:has(.api-page-header)
|
||||
div.flex.flex-row.items-center.rounded-xl.border.not-prose
|
||||
span.font-mono.font-medium {
|
||||
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif !important;
|
||||
font-weight: 600 !important;
|
||||
font-size: 0.6875rem !important;
|
||||
letter-spacing: 0.025em;
|
||||
text-transform: uppercase;
|
||||
}
|
||||
/* POST — softer blue */
|
||||
#nd-page:has(.api-page-header)
|
||||
div.flex.flex-row.items-center.rounded-xl.border.not-prose
|
||||
span.font-mono.font-medium[class*="text-blue"] {
|
||||
color: rgb(37 99 235) !important;
|
||||
background-color: rgb(219 234 254 / 0.7) !important;
|
||||
}
|
||||
html.dark
|
||||
#nd-page:has(.api-page-header)
|
||||
div.flex.flex-row.items-center.rounded-xl.border.not-prose
|
||||
span.font-mono.font-medium[class*="text-blue"] {
|
||||
color: rgb(96 165 250) !important;
|
||||
background-color: rgb(59 130 246 / 0.15) !important;
|
||||
}
|
||||
/* GET — softer green */
|
||||
#nd-page:has(.api-page-header)
|
||||
div.flex.flex-row.items-center.rounded-xl.border.not-prose
|
||||
span.font-mono.font-medium[class*="text-green"] {
|
||||
color: rgb(22 163 74) !important;
|
||||
background-color: rgb(220 252 231 / 0.7) !important;
|
||||
}
|
||||
html.dark
|
||||
#nd-page:has(.api-page-header)
|
||||
div.flex.flex-row.items-center.rounded-xl.border.not-prose
|
||||
span.font-mono.font-medium[class*="text-green"] {
|
||||
color: rgb(74 222 128) !important;
|
||||
background-color: rgb(34 197 94 / 0.15) !important;
|
||||
}
|
||||
|
||||
/* Path text inside method+path bar — monospace, bright like Gumloop */
|
||||
#nd-page:has(.api-page-header) div.flex.flex-row.items-center.rounded-xl.border.not-prose code {
|
||||
color: rgb(55 65 81) !important;
|
||||
background: none !important;
|
||||
border: none !important;
|
||||
padding: 0 !important;
|
||||
font-size: 0.8125rem !important;
|
||||
}
|
||||
html.dark
|
||||
#nd-page:has(.api-page-header)
|
||||
div.flex.flex-row.items-center.rounded-xl.border.not-prose
|
||||
code {
|
||||
color: rgb(229 231 235) !important;
|
||||
}
|
||||
|
||||
/* Inline code in API pages — neutral color instead of red.
|
||||
Exclude code inside the method+path bar (handled above). */
|
||||
#nd-page:has(.api-page-header) .prose :not(pre) > code {
|
||||
color: rgb(79 70 229) !important;
|
||||
}
|
||||
html.dark #nd-page:has(.api-page-header) .prose :not(pre) > code {
|
||||
color: rgb(165 180 252) !important;
|
||||
}
|
||||
|
||||
/* Response Section — custom dropdown-based rendering (Mintlify style) */
|
||||
|
||||
/* Hide divider lines between accordion items */
|
||||
.response-section-wrapper [data-orientation="vertical"].divide-y > * {
|
||||
border-top-width: 0 !important;
|
||||
border-bottom-width: 0 !important;
|
||||
}
|
||||
.response-section-wrapper [data-orientation="vertical"].divide-y {
|
||||
border-top: none !important;
|
||||
}
|
||||
|
||||
/* Remove content type labels inside accordion items (we show one in the header) */
|
||||
.response-section-wrapper [data-orientation="vertical"] p.not-prose:has(code.text-xs) {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* Hide the top-level response description (e.g. "Execution was successfully cancelled.")
|
||||
but NOT field descriptions inside Schema which also use prose-no-margin.
|
||||
The response description is a direct child of AccordionContent (role=region) with mb-2. */
|
||||
.response-section-wrapper [data-orientation="vertical"] [role="region"] > .prose-no-margin.mb-2,
|
||||
.response-section-wrapper
|
||||
[data-orientation="vertical"]
|
||||
[role="region"]
|
||||
> div
|
||||
> .prose-no-margin.mb-2 {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* Remove left padding on accordion content so it aligns with Path Parameters */
|
||||
.response-section-wrapper [data-orientation="vertical"] [role="region"] {
|
||||
padding-inline-start: 0 !important;
|
||||
}
|
||||
|
||||
/* Response section header */
|
||||
.response-section-header {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 1rem;
|
||||
margin-top: 1.75rem;
|
||||
margin-bottom: 0.5rem;
|
||||
}
|
||||
|
||||
.response-section-title {
|
||||
font-size: 1.5rem;
|
||||
font-weight: 600;
|
||||
margin: 0;
|
||||
color: var(--color-fd-foreground);
|
||||
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, -apple-system, sans-serif;
|
||||
}
|
||||
|
||||
.response-section-meta {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.75rem;
|
||||
margin-left: auto;
|
||||
}
|
||||
|
||||
/* Status code dropdown */
|
||||
.response-section-dropdown-wrapper {
|
||||
position: relative;
|
||||
}
|
||||
|
||||
.response-section-dropdown-trigger {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
gap: 0.25rem;
|
||||
padding: 0.125rem 0.25rem;
|
||||
font-size: 0.875rem;
|
||||
font-weight: 500;
|
||||
color: var(--color-fd-muted-foreground);
|
||||
background: none;
|
||||
border: none;
|
||||
cursor: pointer;
|
||||
border-radius: 0.25rem;
|
||||
transition: color 0.15s;
|
||||
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
|
||||
}
|
||||
.response-section-dropdown-trigger:hover {
|
||||
color: var(--color-fd-foreground);
|
||||
}
|
||||
|
||||
.response-section-chevron {
|
||||
width: 0.75rem;
|
||||
height: 0.75rem;
|
||||
transition: transform 0.15s;
|
||||
}
|
||||
.response-section-chevron-open {
|
||||
transform: rotate(180deg);
|
||||
}
|
||||
|
||||
.response-section-dropdown-menu {
|
||||
position: absolute;
|
||||
top: calc(100% + 0.25rem);
|
||||
left: 0;
|
||||
z-index: 50;
|
||||
min-width: 5rem;
|
||||
background-color: white;
|
||||
border: 1px solid rgb(229 231 235);
|
||||
border-radius: 0.5rem;
|
||||
box-shadow:
|
||||
0 4px 6px -1px rgb(0 0 0 / 0.1),
|
||||
0 2px 4px -2px rgb(0 0 0 / 0.1);
|
||||
padding: 0.25rem;
|
||||
overflow: hidden;
|
||||
}
|
||||
html.dark .response-section-dropdown-menu {
|
||||
background-color: rgb(24 24 27);
|
||||
border-color: rgb(63 63 70);
|
||||
box-shadow: 0 4px 6px -1px rgb(0 0 0 / 0.3);
|
||||
}
|
||||
|
||||
.response-section-dropdown-item {
|
||||
display: flex;
|
||||
align-items: center;
|
||||
justify-content: space-between;
|
||||
width: 100%;
|
||||
padding: 0.375rem 0.5rem;
|
||||
font-size: 0.875rem;
|
||||
color: var(--color-fd-muted-foreground);
|
||||
background: none;
|
||||
border: none;
|
||||
cursor: pointer;
|
||||
border-radius: 0.25rem;
|
||||
transition:
|
||||
background-color 0.1s,
|
||||
color 0.1s;
|
||||
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
|
||||
}
|
||||
.response-section-dropdown-item:hover {
|
||||
background-color: rgb(243 244 246);
|
||||
color: var(--color-fd-foreground);
|
||||
}
|
||||
html.dark .response-section-dropdown-item:hover {
|
||||
background-color: rgb(39 39 42);
|
||||
}
|
||||
.response-section-dropdown-item-selected {
|
||||
color: var(--color-fd-foreground);
|
||||
}
|
||||
|
||||
.response-section-check {
|
||||
width: 0.875rem;
|
||||
height: 0.875rem;
|
||||
}
|
||||
|
||||
.response-section-content-type {
|
||||
font-size: 0.875rem;
|
||||
color: var(--color-fd-muted-foreground);
|
||||
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
|
||||
}
|
||||
|
||||
/* Response schema container — remove border to match Path Parameters style */
|
||||
.response-section-wrapper [data-orientation="vertical"] .border.px-3.py-2.rounded-lg {
|
||||
border: none !important;
|
||||
padding: 0 !important;
|
||||
border-radius: 0 !important;
|
||||
background-color: transparent;
|
||||
}
|
||||
|
||||
/* Property row — reorder: name (1) → type badge (2) → required badge (3) */
|
||||
#nd-page:has(.api-page-header) .flex.flex-wrap.items-center.gap-3.not-prose {
|
||||
display: flex;
|
||||
flex-wrap: wrap;
|
||||
align-items: center;
|
||||
}
|
||||
|
||||
/* Name span — order 1 */
|
||||
#nd-page:has(.api-page-header)
|
||||
.flex.flex-wrap.items-center.gap-3.not-prose
|
||||
> span.font-medium.font-mono.text-fd-primary {
|
||||
order: 1;
|
||||
}
|
||||
|
||||
/* Type badge — order 2, grey pill like Mintlify */
|
||||
#nd-page:has(.api-page-header)
|
||||
.flex.flex-wrap.items-center.gap-3.not-prose
|
||||
> span.text-sm.font-mono.text-fd-muted-foreground {
|
||||
order: 2;
|
||||
background-color: rgb(240 240 243);
|
||||
color: rgb(100 100 110);
|
||||
padding: 0.125rem 0.5rem;
|
||||
border-radius: 0.375rem;
|
||||
font-size: 0.6875rem;
|
||||
line-height: 1.25rem;
|
||||
font-weight: 500;
|
||||
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
|
||||
}
|
||||
html.dark
|
||||
#nd-page:has(.api-page-header)
|
||||
.flex.flex-wrap.items-center.gap-3.not-prose
|
||||
> span.text-sm.font-mono.text-fd-muted-foreground {
|
||||
background-color: rgb(39 39 42);
|
||||
color: rgb(212 212 216);
|
||||
}
|
||||
|
||||
/* Hide the "*" inside the name span — we'll add "required" as a ::after on the flex row */
|
||||
#nd-page:has(.api-page-header) span.font-medium.font-mono.text-fd-primary > span.text-red-400 {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* Required badge — order 3, light red pill */
|
||||
#nd-page:has(.api-page-header)
|
||||
.flex.flex-wrap.items-center.gap-3.not-prose:has(span.text-red-400)::after {
|
||||
content: "required";
|
||||
order: 3;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
background-color: rgb(254 235 235);
|
||||
color: rgb(220 38 38);
|
||||
padding: 0.125rem 0.5rem;
|
||||
border-radius: 0.375rem;
|
||||
font-size: 0.6875rem;
|
||||
line-height: 1.25rem;
|
||||
font-weight: 500;
|
||||
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
|
||||
}
|
||||
html.dark
|
||||
#nd-page:has(.api-page-header)
|
||||
.flex.flex-wrap.items-center.gap-3.not-prose:has(span.text-red-400)::after {
|
||||
background-color: rgb(127 29 29 / 0.2);
|
||||
color: rgb(252 165 165);
|
||||
}
|
||||
|
||||
/* Optional "?" indicator — hide it */
|
||||
#nd-page:has(.api-page-header)
|
||||
span.font-medium.font-mono.text-fd-primary
|
||||
> span.text-fd-muted-foreground {
|
||||
display: none;
|
||||
}
|
||||
|
||||
/* Hide the auth scheme type label (e.g. "apiKey") next to Authorization heading */
|
||||
#nd-page:has(.api-page-header) .flex.items-start.justify-between.gap-2 > div.not-prose {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* Auth property — replace "<token>" with "string" badge, add "header" and "required" badges.
|
||||
Auth properties use my-4 (vs py-4 for regular properties). */
|
||||
|
||||
/* Auth property flex row — name: order 1, type: order 2, ::before "header": order 3, ::after "required": order 4 */
|
||||
#nd-page:has(.api-page-header)
|
||||
div.my-4
|
||||
> .flex.flex-wrap.items-center.gap-3.not-prose
|
||||
> span.font-medium.font-mono.text-fd-primary {
|
||||
order: 1;
|
||||
}
|
||||
#nd-page:has(.api-page-header)
|
||||
div.my-4
|
||||
> .flex.flex-wrap.items-center.gap-3.not-prose
|
||||
> span.text-sm.font-mono.text-fd-muted-foreground {
|
||||
order: 2;
|
||||
font-size: 0;
|
||||
padding: 0 !important;
|
||||
background: none !important;
|
||||
line-height: 0;
|
||||
}
|
||||
#nd-page:has(.api-page-header)
|
||||
div.my-4
|
||||
> .flex.flex-wrap.items-center.gap-3.not-prose
|
||||
> span.text-sm.font-mono.text-fd-muted-foreground::after {
|
||||
content: "string";
|
||||
font-size: 0.6875rem;
|
||||
line-height: 1.25rem;
|
||||
font-weight: 500;
|
||||
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
|
||||
background-color: rgb(240 240 243);
|
||||
color: rgb(100 100 110);
|
||||
padding: 0.125rem 0.5rem;
|
||||
border-radius: 0.375rem;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
}
|
||||
html.dark
|
||||
#nd-page:has(.api-page-header)
|
||||
div.my-4
|
||||
> .flex.flex-wrap.items-center.gap-3.not-prose
|
||||
> span.text-sm.font-mono.text-fd-muted-foreground::after {
|
||||
background-color: rgb(39 39 42);
|
||||
color: rgb(212 212 216);
|
||||
}
|
||||
|
||||
/* "header" badge via ::before on the auth flex row */
|
||||
#nd-page:has(.api-page-header) div.my-4 > .flex.flex-wrap.items-center.gap-3.not-prose::before {
|
||||
content: "header";
|
||||
order: 3;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
background-color: rgb(240 240 243);
|
||||
color: rgb(100 100 110);
|
||||
padding: 0.125rem 0.5rem;
|
||||
border-radius: 0.375rem;
|
||||
font-size: 0.6875rem;
|
||||
line-height: 1.25rem;
|
||||
font-weight: 500;
|
||||
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
|
||||
}
|
||||
html.dark
|
||||
#nd-page:has(.api-page-header)
|
||||
div.my-4
|
||||
> .flex.flex-wrap.items-center.gap-3.not-prose::before {
|
||||
background-color: rgb(39 39 42);
|
||||
color: rgb(212 212 216);
|
||||
}
|
||||
|
||||
/* "required" badge via ::after on the auth flex row — light red pill */
|
||||
#nd-page:has(.api-page-header) div.my-4 > .flex.flex-wrap.items-center.gap-3.not-prose::after {
|
||||
content: "required";
|
||||
order: 4;
|
||||
display: inline-flex;
|
||||
align-items: center;
|
||||
background-color: rgb(254 235 235);
|
||||
color: rgb(220 38 38);
|
||||
padding: 0.125rem 0.5rem;
|
||||
border-radius: 0.375rem;
|
||||
font-size: 0.6875rem;
|
||||
line-height: 1.25rem;
|
||||
font-weight: 500;
|
||||
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
|
||||
}
|
||||
html.dark
|
||||
#nd-page:has(.api-page-header)
|
||||
div.my-4
|
||||
> .flex.flex-wrap.items-center.gap-3.not-prose::after {
|
||||
background-color: rgb(127 29 29 / 0.2);
|
||||
color: rgb(252 165 165);
|
||||
}
|
||||
|
||||
/* Hide "In: header" text below auth property — redundant with the header badge */
|
||||
#nd-page:has(.api-page-header) div.my-4 .prose-no-margin p:has(> code) {
|
||||
display: none !important;
|
||||
}
|
||||
|
||||
/* Section dividers — bottom border after Authorization and Body sections. */
|
||||
.api-section-divider {
|
||||
padding-bottom: 0.5rem;
|
||||
border-bottom: 1px solid rgb(229 231 235 / 0.6);
|
||||
}
|
||||
html.dark .api-section-divider {
|
||||
border-bottom-color: rgb(255 255 255 / 0.07);
|
||||
}
|
||||
|
||||
/* Property rows — breathing room like Mintlify.
|
||||
Regular properties use border-t py-4; auth properties use border-t my-4. */
|
||||
#nd-page:has(.api-page-header) .text-sm.border-t.py-4 {
|
||||
padding-top: 1.25rem !important;
|
||||
padding-bottom: 1.25rem !important;
|
||||
}
|
||||
#nd-page:has(.api-page-header) .text-sm.border-t.my-4 {
|
||||
margin-top: 1.25rem !important;
|
||||
margin-bottom: 1.25rem !important;
|
||||
padding-top: 1.25rem;
|
||||
}
|
||||
|
||||
/* Divider lines between fields — very subtle like Mintlify */
|
||||
#nd-page:has(.api-page-header) .text-sm.border-t {
|
||||
border-color: rgb(229 231 235 / 0.6);
|
||||
}
|
||||
html.dark #nd-page:has(.api-page-header) .text-sm.border-t {
|
||||
border-color: rgb(255 255 255 / 0.07);
|
||||
}
|
||||
|
||||
/* Body/Callback section "application/json" label — remove inline code styling */
|
||||
#nd-page:has(.api-page-header) .flex.gap-2.items-center.justify-between p.not-prose code.text-xs,
|
||||
#nd-page:has(.api-page-header) .flex.justify-between.gap-2.items-end p.not-prose code.text-xs {
|
||||
background: none !important;
|
||||
border: none !important;
|
||||
padding: 0 !important;
|
||||
color: var(--color-fd-muted-foreground) !important;
|
||||
font-size: 0.875rem !important;
|
||||
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif !important;
|
||||
}
|
||||
|
||||
/* Object/array type triggers in property rows — order 2 + badge chip styling */
|
||||
#nd-page:has(.api-page-header) .flex.flex-wrap.items-center.gap-3.not-prose > button,
|
||||
#nd-page:has(.api-page-header) .flex.flex-wrap.items-center.gap-3.not-prose > span:has(> button) {
|
||||
order: 2;
|
||||
background-color: rgb(240 240 243);
|
||||
color: rgb(100 100 110);
|
||||
padding: 0.125rem 0.5rem;
|
||||
border-radius: 0.375rem;
|
||||
font-size: 0.6875rem;
|
||||
line-height: 1.25rem;
|
||||
font-weight: 500;
|
||||
font-family: var(--font-geist-sans), ui-sans-serif, system-ui, sans-serif;
|
||||
}
|
||||
html.dark #nd-page:has(.api-page-header) .flex.flex-wrap.items-center.gap-3.not-prose > button,
|
||||
html.dark
|
||||
#nd-page:has(.api-page-header)
|
||||
.flex.flex-wrap.items-center.gap-3.not-prose
|
||||
> span:has(> button) {
|
||||
background-color: rgb(39 39 42);
|
||||
color: rgb(212 212 216);
|
||||
}
|
||||
|
||||
/* Section headings (Authorization, Path Parameters, etc.) — consistent top spacing */
|
||||
#nd-page:has(.api-page-header) .min-w-0.flex-1 h2 {
|
||||
margin-top: 1.75rem !important;
|
||||
margin-bottom: 0.25rem !important;
|
||||
}
|
||||
|
||||
/* Code examples in right column — wrap long lines instead of horizontal scroll */
|
||||
#nd-page:has(.api-page-header) pre {
|
||||
white-space: pre-wrap !important;
|
||||
word-break: break-all !important;
|
||||
}
|
||||
#nd-page:has(.api-page-header) pre code {
|
||||
width: 100% !important;
|
||||
word-break: break-all !important;
|
||||
overflow-wrap: break-word !important;
|
||||
}
|
||||
|
||||
/* API page header — constrain title/copy-page to left content column, not full width.
|
||||
Only applies on OpenAPI pages (which have the two-column layout). */
|
||||
@media (min-width: 1280px) {
|
||||
.api-page-header {
|
||||
max-width: calc(100% - 400px - 1.5rem);
|
||||
}
|
||||
}
|
||||
|
||||
/* Footer navigation — constrain to left content column on OpenAPI pages only.
|
||||
Target pages that contain the two-column layout via :has() selector. */
|
||||
#nd-page:has(.api-page-header) > div:last-child {
|
||||
max-width: calc(100% - 400px - 1.5rem);
|
||||
}
|
||||
@media (max-width: 1024px) {
|
||||
#nd-page:has(.api-page-header) > div:last-child {
|
||||
max-width: 100%;
|
||||
}
|
||||
}
|
||||
|
||||
/* Tailwind v4 content sources */
|
||||
|
||||
@@ -1,21 +0,0 @@
|
||||
import type { BaseLayoutProps } from 'fumadocs-ui/layouts/shared'
|
||||
|
||||
/**
|
||||
* Shared layout configurations
|
||||
*
|
||||
* you can customise layouts individually from:
|
||||
* Home Layout: app/(home)/layout.tsx
|
||||
* Docs Layout: app/docs/layout.tsx
|
||||
*/
|
||||
export const baseOptions: BaseLayoutProps = {
|
||||
nav: {
|
||||
title: (
|
||||
<>
|
||||
<svg width='24' height='24' xmlns='http://www.w3.org/2000/svg' aria-label='Logo'>
|
||||
<circle cx={12} cy={12} r={12} fill='currentColor' />
|
||||
</svg>
|
||||
My App
|
||||
</>
|
||||
),
|
||||
},
|
||||
}
|
||||
@@ -52,15 +52,26 @@ export function SidebarItem({ item }: { item: Item }) {
|
||||
)
|
||||
}
|
||||
|
||||
function isApiReferenceFolder(node: Folder): boolean {
|
||||
if (node.index?.url.includes('/api-reference/')) return true
|
||||
for (const child of node.children) {
|
||||
if (child.type === 'page' && child.url.includes('/api-reference/')) return true
|
||||
if (child.type === 'folder' && isApiReferenceFolder(child)) return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
export function SidebarFolder({ item, children }: { item: Folder; children: ReactNode }) {
|
||||
const pathname = usePathname()
|
||||
const hasActiveChild = checkHasActiveChild(item, pathname)
|
||||
const isApiRef = isApiReferenceFolder(item)
|
||||
const isOnApiRefPage = stripLangPrefix(pathname).startsWith('/api-reference')
|
||||
const hasChildren = item.children.length > 0
|
||||
const [open, setOpen] = useState(hasActiveChild)
|
||||
const [open, setOpen] = useState(hasActiveChild || (isApiRef && isOnApiRefPage))
|
||||
|
||||
useEffect(() => {
|
||||
setOpen(hasActiveChild)
|
||||
}, [hasActiveChild])
|
||||
setOpen(hasActiveChild || (isApiRef && isOnApiRefPage))
|
||||
}, [hasActiveChild, isApiRef, isOnApiRefPage])
|
||||
|
||||
const active = item.index ? isActive(item.index.url, pathname, false) : false
|
||||
|
||||
@@ -157,16 +168,18 @@ export function SidebarFolder({ item, children }: { item: Folder; children: Reac
|
||||
{hasChildren && (
|
||||
<div
|
||||
className={cn(
|
||||
'overflow-hidden transition-all duration-200 ease-in-out',
|
||||
open ? 'max-h-[10000px] opacity-100' : 'max-h-0 opacity-0'
|
||||
'grid transition-[grid-template-rows,opacity] duration-200 ease-in-out',
|
||||
open ? 'grid-rows-[1fr] opacity-100' : 'grid-rows-[0fr] opacity-0'
|
||||
)}
|
||||
>
|
||||
{/* Mobile: simple indent */}
|
||||
<div className='ml-4 flex flex-col gap-0.5 lg:hidden'>{children}</div>
|
||||
{/* Desktop: styled with border */}
|
||||
<ul className='mt-0.5 ml-2 hidden space-y-[0.0625rem] border-gray-200/60 border-l pl-2.5 lg:block dark:border-gray-700/60'>
|
||||
{children}
|
||||
</ul>
|
||||
<div className='overflow-hidden'>
|
||||
{/* Mobile: simple indent */}
|
||||
<div className='ml-4 flex flex-col gap-0.5 lg:hidden'>{children}</div>
|
||||
{/* Desktop: styled with border */}
|
||||
<ul className='mt-0.5 ml-2 hidden space-y-[0.0625rem] border-gray-200/60 border-l pl-2.5 lg:block dark:border-gray-700/60'>
|
||||
{children}
|
||||
</ul>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
|
||||
@@ -1,12 +1,9 @@
|
||||
'use client'
|
||||
|
||||
import { useState } from 'react'
|
||||
import { ArrowRight, ChevronRight } from 'lucide-react'
|
||||
import Link from 'next/link'
|
||||
|
||||
export function TOCFooter() {
|
||||
const [isHovered, setIsHovered] = useState(false)
|
||||
|
||||
return (
|
||||
<div className='sticky bottom-0 mt-6'>
|
||||
<div className='flex flex-col gap-2 rounded-lg border border-border bg-secondary p-6 text-sm'>
|
||||
@@ -21,18 +18,19 @@ export function TOCFooter() {
|
||||
href='https://sim.ai/signup'
|
||||
target='_blank'
|
||||
rel='noopener noreferrer'
|
||||
onMouseEnter={() => setIsHovered(true)}
|
||||
onMouseLeave={() => setIsHovered(false)}
|
||||
className='group mt-2 inline-flex h-8 w-fit items-center justify-center gap-1 whitespace-nowrap rounded-[10px] border border-[#2AAD6C] bg-gradient-to-b from-[#3ED990] to-[#2AAD6C] px-3 pr-[10px] pl-[12px] font-medium text-sm text-white shadow-[inset_0_2px_4px_0_#5EE8A8] outline-none transition-all hover:shadow-lg focus-visible:border-ring focus-visible:ring-[3px] focus-visible:ring-ring/50'
|
||||
aria-label='Get started with Sim - Sign up for free'
|
||||
>
|
||||
<span>Get started</span>
|
||||
<span className='inline-flex transition-transform duration-200 group-hover:translate-x-0.5'>
|
||||
{isHovered ? (
|
||||
<ArrowRight className='h-4 w-4' aria-hidden='true' />
|
||||
) : (
|
||||
<ChevronRight className='h-4 w-4' aria-hidden='true' />
|
||||
)}
|
||||
<span className='relative inline-flex h-4 w-4 transition-transform duration-200 group-hover:translate-x-0.5'>
|
||||
<ChevronRight
|
||||
className='absolute inset-0 h-4 w-4 transition-opacity duration-200 group-hover:opacity-0'
|
||||
aria-hidden='true'
|
||||
/>
|
||||
<ArrowRight
|
||||
className='absolute inset-0 h-4 w-4 opacity-0 transition-opacity duration-200 group-hover:opacity-100'
|
||||
aria-hidden='true'
|
||||
/>
|
||||
</span>
|
||||
</Link>
|
||||
</div>
|
||||
|
||||
@@ -1209,6 +1209,17 @@ export function AlgoliaIcon(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function AmplitudeIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 49 49'>
|
||||
<path
|
||||
fill='#FFFFFF'
|
||||
d='M23.4,15.3c0.6,1.8,1.2,4.1,1.9,6.7c-2.6,0-5.3-0.1-7.8-0.1h-1.3c1.5-5.7,3.2-10.1,4.6-11.1 c0.1-0.1,0.2-0.1,0.4-0.1c0.2,0,0.3,0.1,0.5,0.3C21.9,11.5,22.5,12.7,23.4,15.3z M49,24.5C49,38,38,49,24.5,49S0,38,0,24.5 S11,0,24.5,0S49,11,49,24.5z M42.7,23.9c0-0.6-0.4-1.2-1-1.3l0,0l0,0l0,0c-0.1,0-0.1,0-0.2,0h-0.2c-4.1-0.3-8.4-0.4-12.4-0.5l0,0 C27,14.8,24.5,7.4,21.3,7.4c-3,0-5.8,4.9-8.2,14.5c-1.7,0-3.2,0-4.6-0.1c-0.1,0-0.2,0-0.2,0c-0.3,0-0.5,0-0.5,0 c-0.8,0.1-1.4,0.9-1.4,1.7c0,0.8,0.6,1.6,1.5,1.7l0,0h4.6c-0.4,1.9-0.8,3.8-1.1,5.6l-0.1,0.8l0,0c0,0.6,0.5,1.1,1.1,1.1 c0.4,0,0.8-0.2,1-0.5l0,0l2.2-7.1h10.7c0.8,3.1,1.7,6.3,2.8,9.3c0.6,1.6,2,5.4,4.4,5.4l0,0c3.6,0,5-5.8,5.9-9.6 c0.2-0.8,0.4-1.5,0.5-2.1l0.1-0.2l0,0c0-0.1,0-0.2,0-0.3c-0.1-0.2-0.2-0.3-0.4-0.4c-0.3-0.1-0.5,0.1-0.6,0.4l0,0l-0.1,0.2 c-0.3,0.8-0.6,1.6-0.8,2.3v0.1c-1.6,4.4-2.3,6.4-3.7,6.4l0,0l0,0l0,0c-1.8,0-3.5-7.3-4.1-10.1c-0.1-0.5-0.2-0.9-0.3-1.3h11.7 c0.2,0,0.4-0.1,0.6-0.1l0,0c0,0,0,0,0.1,0c0,0,0,0,0.1,0l0,0c0,0,0.1,0,0.1-0.1l0,0C42.5,24.6,42.7,24.3,42.7,23.9z'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function GoogleBooksIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 478.633 540.068'>
|
||||
@@ -1938,13 +1949,11 @@ export function ElevenLabsIcon(props: SVGProps<SVGSVGElement>) {
|
||||
|
||||
export function LinkupIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'>
|
||||
<g transform='translate(12, 12) scale(1.3) translate(-12, -12)'>
|
||||
<path
|
||||
d='M20.2 14.1c-.4-.3-1.6-.4-2.9-.2.5-1.4 1.3-3.9.1-5-.6-.5-1.5-.7-2.6-.5-.3 0-.6.1-1 .2-1.1-1.6-2.4-2.5-3.8-2.5-1.6 0-3.1 1-4.1 2.9-1.2 2.1-1.9 5.1-1.9 8.8v.03l.4.3c3-.9 7.5-2.3 10.7-2.9 0 .9.1 1.9.1 2.8v.03l.4.3c.1 0 5.4-1.7 5.3-3.3 0-.2-.1-.5-.3-.7zM19.9 14.7c.03.4-1.7 1.4-4 2.3.5-.7 1-1.6 1.3-2.5 1.4-.1 2.4-.1 2.7.2zM16.4 14.6c-.3.7-.7 1.4-1.2 2-.02-.6-.1-1.2-.2-1.8.4-.1.9-.1 1.4-.2zM16.5 9.4c.8.7.9 2.4.1 5.1-.5.1-1 .1-1.5.2-.3-2-.9-3.8-1.7-5.3.3-.1.6-.2.8-.2.9-.1 1.7.05 2.3.2zM9.5 6.8c1.2 0 2.3.7 3.2 2.1-2.8 1.1-5.9 3.4-8.4 7.8.2-5.1 1.9-9.9 5.2-9.9zM4.7 17c3.4-4.9 6.4-6.8 8.4-7.8.7 1.3 1.2 2.9 1.5 4.8-3.2.6-7.3 1.8-9.9 3z'
|
||||
fill='#000000'
|
||||
/>
|
||||
</g>
|
||||
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 154 107' fill='none'>
|
||||
<path
|
||||
d='M150.677 72.7113C146.612 70.2493 137.909 69.542 124.794 70.6076C128.992 57.6776 133.757 35.3911 121.323 25.1527C115.886 20.6743 107.471 19.0437 97.6162 20.5594C94.6758 21.0142 91.5752 21.7445 88.3878 22.732C78.8667 8.28165 66.2954 0 53.8613 0C39.4288 0 26.1304 9.3381 16.4081 26.2872C5.67515 45.014 0 71.9626 0 104.23V104.533L3.60356 106.94L3.88251 106.825C30.5754 95.5628 67.5759 85.0718 100.593 79.4037C101.604 87.644 102.116 95.9945 102.116 104.235V104.52L105.491 107L105.761 106.913C106.255 106.752 155.159 90.8822 153.979 77.5894C153.856 76.2022 153.183 74.2271 150.677 72.7113ZM148.409 78.09C148.715 81.5442 133.236 91.0568 111.838 98.8883C115.968 92.0995 119.818 84.1715 122.777 76.3584C135.659 75.1411 144.531 75.5545 147.792 77.5296C148.377 77.8833 148.409 78.09 148.409 78.09ZM116.668 77.0106C114.084 83.3769 110.951 89.6329 107.54 95.2458C107.334 89.5135 106.913 83.8821 106.296 78.4621C109.922 77.8971 113.407 77.4102 116.668 77.0106ZM117.774 29.4979C125.379 35.7585 125.782 51.3205 118.867 71.1772C114.747 71.6319 110.284 72.2382 105.596 72.9777C103.049 55.1742 98.2839 39.966 91.4243 27.7525C94.566 26.8155 96.9669 26.3469 98.4622 26.1127C106.721 24.8404 113.581 26.0438 117.774 29.4979ZM53.8567 5.62215C65.0561 5.62215 74.8882 12.0022 83.0922 24.5923C57.7027 34.5413 30.3193 59.4092 5.78032 94.8003C7.43119 51.4813 23.0299 5.62215 53.8613 5.62215M10.1933 98.2406C40.7504 53.9341 68.2024 36.4429 86.0739 29.5852C92.4487 41.2383 97.2046 56.5522 99.8433 73.9331C70.5209 79.0316 35.6377 88.4983 10.1933 98.2406Z'
|
||||
fill='#000000'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
@@ -2453,6 +2462,17 @@ export function OutlookIcon(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function PagerDutyIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 64 64' fill='none'>
|
||||
<path
|
||||
d='M6.704 59.217H0v-33.65c0-3.455 1.418-5.544 2.604-6.704 2.63-2.58 6.2-2.656 6.782-2.656h10.546c3.765 0 5.93 1.52 7.117 2.8 2.346 2.553 2.372 5.853 2.32 6.73v12.687c0 3.662-1.496 5.828-2.733 6.988-2.553 2.398-5.93 2.45-6.73 2.424H6.704zm13.46-18.102c.36 0 1.367-.103 1.908-.62.413-.387.62-1.083.62-2.1v-13.02c0-.36-.077-1.315-.593-1.857-.5-.516-1.444-.62-2.166-.62h-10.6c-2.63 0-2.63 1.985-2.63 2.656v15.55zM57.296 4.783H64V38.46c0 3.455-1.418 5.544-2.604 6.704-2.63 2.58-6.2 2.656-6.782 2.656H44.068c-3.765 0-5.93-1.52-7.117-2.8-2.346-2.553-2.372-5.853-2.32-6.73V25.62c0-3.662 1.496-5.828 2.733-6.988 2.553-2.398 5.93-2.45 6.73-2.424h13.202zM43.836 22.9c-.36 0-1.367.103-1.908.62-.413.387-.62 1.083-.62 2.1v13.02c0 .36.077 1.315.593 1.857.5.516 1.444.62 2.166.62h10.598c2.656-.026 2.656-2 2.656-2.682V22.9z'
|
||||
fill='#06AC38'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function MicrosoftExcelIcon(props: SVGProps<SVGSVGElement>) {
|
||||
const id = useId()
|
||||
const gradientId = `excel_gradient_${id}`
|
||||
@@ -3996,10 +4016,10 @@ export function IntercomIcon(props: SVGProps<SVGSVGElement>) {
|
||||
|
||||
export function LoopsIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} viewBox='0 0 256 256' fill='none' xmlns='http://www.w3.org/2000/svg'>
|
||||
<svg {...props} viewBox='0 0 214 186' fill='none' xmlns='http://www.w3.org/2000/svg'>
|
||||
<path
|
||||
fill='currentColor'
|
||||
d='M192.352 88.042c0-7.012-5.685-12.697-12.697-12.697s-12.697 5.685-12.697 12.697c0 .634.052 1.255.142 1.866a25.248 25.248 0 0 0-4.9-.49c-14.006 0-25.36 11.354-25.36 25.36 0 1.63.16 3.222.456 4.765a37.8 37.8 0 0 0-9.296-1.173c-20.95 0-37.935 16.985-37.935 37.935S107.05 194.24 128 194.24s37.935-16.985 37.935-37.935a37.7 37.7 0 0 0-3.78-16.555 25.2 25.2 0 0 0 12.487-3.336 25.2 25.2 0 0 0 4.558 3.336v.02c14.006 0 25.36-11.354 25.36-25.36 0-12.48-9.018-22.855-20.888-24.996a12.6 12.6 0 0 0 8.68-11.972m-77.05 68.263c0-7.012 5.685-12.697 12.697-12.697s12.697 5.685 12.697 12.697c0 7.013-5.685 12.697-12.697 12.697s-12.697-5.685-12.697-12.697'
|
||||
d='M122.19,0 H90.27 C40.51,0 0,39.88 0,92.95 C0,141.07 38.93,183.77 90.27,183.77 H122.19 C172.61,183.77 213.31,142.82 213.31,92.95 C213.31,43.29 173.09,0 122.19,0 Z M10.82,92.54 C10.82,50.19 45.91,11.49 91.96,11.49 C138.73,11.49 172.69,50.33 172.69,92.13 C172.69,117.76 154.06,139.09 129.02,143.31 C145.16,131.15 155.48,112.73 155.48,92.4 C155.48,59.09 127.44,28.82 92.37,28.82 C57.23,28.82 28.51,57.23 28.51,92.91 C28.51,122.63 43.61,151.08 69.99,168.21 L71.74,169.33 C35.99,161.39 10.82,130.11 10.82,92.54 Z M106.33,42.76 C128.88,50.19 143.91,68.92 143.91,92.26 C143.91,114.23 128.68,134.63 106.12,141.71 C105.44,141.96 105.17,141.96 105.17,141.96 C83.91,135.76 69.29,116.38 69.29,92.71 C69.29,69.91 83.71,50.33 106.33,42.76 Z M120.91,172.13 C76.11,172.13 40.09,137.21 40.09,93.32 C40.09,67.03 57.17,46.11 83.98,41.33 C67.04,53.83 57.3,71.71 57.3,92.71 C57.3,125.75 82.94,155.33 120.77,155.33 C155.01,155.33 184.31,125.2 184.31,92.47 C184.31,62.34 169.96,34.06 141.92,14.55 L141.65,14.34 C175.81,23.68 202.26,54.11 202.26,92.81 C202.26,135.69 166.38,172.13 120.91,172.13 Z'
|
||||
fill='#FB5001'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
@@ -4542,7 +4562,7 @@ export function DatabricksIcon(props: SVGProps<SVGSVGElement>) {
|
||||
<svg {...props} viewBox='0 0 241 266' fill='none' xmlns='http://www.w3.org/2000/svg'>
|
||||
<path
|
||||
d='M228.085 109.654L120.615 171.674L5.53493 105.41L0 108.475V156.582L120.615 225.911L228.085 164.128V189.596L120.615 251.615L5.53493 185.351L0 188.417V196.67L120.615 266L241 196.67V148.564L235.465 145.498L120.615 211.527L12.9148 149.743V124.275L120.615 186.059L241 116.729V69.3298L235.004 65.7925L120.615 131.585L18.4498 73.1028L120.615 14.3848L204.562 62.7269L211.942 58.4823V52.5869L120.615 0L0 69.3298V76.8759L120.615 146.206L228.085 84.1862V109.654Z'
|
||||
fill='#F9F7F4'
|
||||
fill='#FF3621'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
@@ -5578,6 +5598,35 @@ export function GoogleMapsIcon(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function GooglePagespeedIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} viewBox='-1.74 -1.81 285.55 266.85' xmlns='http://www.w3.org/2000/svg'>
|
||||
<path
|
||||
d='M272.73 37.23v179.68a18.58 18.58 0 0 1-18.57 18.59H18.65A18.58 18.58 0 0 1 .06 216.94V37.23z'
|
||||
fill='#e1e1e1'
|
||||
/>
|
||||
<path
|
||||
d='M18.65 0h235.5a18.58 18.58 0 0 1 18.58 18.56v18.67H.07V18.59A18.58 18.58 0 0 1 18.64 0z'
|
||||
fill='#c2c2c2'
|
||||
/>
|
||||
<path
|
||||
d='M136.3 92.96a99 99 0 0 0-99 99v.13c0 2.08-.12 4.64 0 6.2h43.25a54.87 54.87 0 0 1 0-6.2 55.81 55.81 0 0 1 85.06-47.45l31.12-31.12a98.76 98.76 0 0 0-60.44-20.57z'
|
||||
fill='#4285f4'
|
||||
/>
|
||||
<path
|
||||
d='M196.73 113.46l-31.14 31.14a55.74 55.74 0 0 1 26.56 47.45 54.87 54.87 0 0 1 0 6.2h43.39c.12-1.48 0-4.12 0-6.2a99 99 0 0 0-38.81-78.59z'
|
||||
fill='#f44336'
|
||||
/>
|
||||
<circle cx='24.85' cy='18.59' fill='#eee' r='6.2' />
|
||||
<circle cx='49.65' cy='18.59' fill='#eee' r='6.2' />
|
||||
<path
|
||||
d='M197.01 117.23a3.05 3.05 0 0 0 .59-1.81 3.11 3.11 0 0 0-3.1-3.1 3 3 0 0 0-1.91.68l-67.56 52a18.58 18.58 0 1 0 27.24 24.33l44.73-72.1z'
|
||||
fill='#9e9e9e'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function GoogleTranslateIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 998.1 998.3'>
|
||||
|
||||
@@ -1,12 +1,17 @@
|
||||
'use client'
|
||||
|
||||
import Link from 'next/link'
|
||||
import { usePathname } from 'next/navigation'
|
||||
import { LanguageDropdown } from '@/components/ui/language-dropdown'
|
||||
import { SearchTrigger } from '@/components/ui/search-trigger'
|
||||
import { SimLogoFull } from '@/components/ui/sim-logo'
|
||||
import { ThemeToggle } from '@/components/ui/theme-toggle'
|
||||
import { cn } from '@/lib/utils'
|
||||
|
||||
export function Navbar() {
|
||||
const pathname = usePathname()
|
||||
const isApiReference = pathname.includes('/api-reference')
|
||||
|
||||
return (
|
||||
<nav className='sticky top-0 z-50 border-border/50 border-b bg-background/80 backdrop-blur-md backdrop-saturate-150'>
|
||||
{/* Desktop: Single row layout */}
|
||||
@@ -31,16 +36,30 @@ export function Navbar() {
|
||||
</div>
|
||||
|
||||
{/* Right cluster aligns with TOC edge */}
|
||||
<div className='flex items-center gap-4'>
|
||||
<div className='flex items-center gap-1'>
|
||||
<Link
|
||||
href='/introduction'
|
||||
className={cn(
|
||||
'rounded-xl px-3 py-2 font-normal text-[0.9375rem] leading-[1.4] transition-colors hover:bg-foreground/8 hover:text-foreground',
|
||||
!isApiReference ? 'text-foreground' : 'text-foreground/60'
|
||||
)}
|
||||
>
|
||||
Documentation
|
||||
</Link>
|
||||
<Link
|
||||
href='/api-reference/getting-started'
|
||||
className={cn(
|
||||
'rounded-xl px-3 py-2 font-normal text-[0.9375rem] leading-[1.4] transition-colors hover:bg-foreground/8 hover:text-foreground',
|
||||
isApiReference ? 'text-foreground' : 'text-foreground/60'
|
||||
)}
|
||||
>
|
||||
API
|
||||
</Link>
|
||||
<Link
|
||||
href='https://sim.ai'
|
||||
target='_blank'
|
||||
rel='noopener noreferrer'
|
||||
className='rounded-xl px-3 py-2 font-normal text-[0.9375rem] text-foreground/60 leading-[1.4] transition-colors hover:bg-foreground/8 hover:text-foreground'
|
||||
style={{
|
||||
fontFamily:
|
||||
'-apple-system, BlinkMacSystemFont, "Segoe UI", Roboto, "Helvetica Neue", Arial, sans-serif',
|
||||
}}
|
||||
>
|
||||
Platform
|
||||
</Link>
|
||||
|
||||
@@ -25,8 +25,8 @@ export function StructuredData({
|
||||
headline: title,
|
||||
description: description,
|
||||
url: url,
|
||||
datePublished: dateModified || new Date().toISOString(),
|
||||
dateModified: dateModified || new Date().toISOString(),
|
||||
...(dateModified && { datePublished: dateModified }),
|
||||
...(dateModified && { dateModified }),
|
||||
author: {
|
||||
'@type': 'Organization',
|
||||
name: 'Sim Team',
|
||||
@@ -91,12 +91,6 @@ export function StructuredData({
|
||||
inLanguage: ['en', 'es', 'fr', 'de', 'ja', 'zh'],
|
||||
}
|
||||
|
||||
const faqStructuredData = title.toLowerCase().includes('faq') && {
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'FAQPage',
|
||||
mainEntity: [],
|
||||
}
|
||||
|
||||
const softwareStructuredData = {
|
||||
'@context': 'https://schema.org',
|
||||
'@type': 'SoftwareApplication',
|
||||
@@ -151,15 +145,6 @@ export function StructuredData({
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
{faqStructuredData && (
|
||||
<Script
|
||||
id='faq-structured-data'
|
||||
type='application/ld+json'
|
||||
dangerouslySetInnerHTML={{
|
||||
__html: JSON.stringify(faqStructuredData),
|
||||
}}
|
||||
/>
|
||||
)}
|
||||
{url === baseUrl && (
|
||||
<Script
|
||||
id='software-structured-data'
|
||||
|
||||
@@ -9,6 +9,7 @@ import {
|
||||
AirtableIcon,
|
||||
AirweaveIcon,
|
||||
AlgoliaIcon,
|
||||
AmplitudeIcon,
|
||||
ApifyIcon,
|
||||
ApolloIcon,
|
||||
ArxivIcon,
|
||||
@@ -56,6 +57,7 @@ import {
|
||||
GoogleGroupsIcon,
|
||||
GoogleIcon,
|
||||
GoogleMapsIcon,
|
||||
GooglePagespeedIcon,
|
||||
GoogleSheetsIcon,
|
||||
GoogleSlidesIcon,
|
||||
GoogleTasksIcon,
|
||||
@@ -102,6 +104,7 @@ import {
|
||||
OpenAIIcon,
|
||||
OutlookIcon,
|
||||
PackageSearchIcon,
|
||||
PagerDutyIcon,
|
||||
ParallelIcon,
|
||||
PerplexityIcon,
|
||||
PineconeIcon,
|
||||
@@ -167,6 +170,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
|
||||
airtable: AirtableIcon,
|
||||
airweave: AirweaveIcon,
|
||||
algolia: AlgoliaIcon,
|
||||
amplitude: AmplitudeIcon,
|
||||
apify: ApifyIcon,
|
||||
apollo: ApolloIcon,
|
||||
arxiv: ArxivIcon,
|
||||
@@ -211,6 +215,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
|
||||
google_forms: GoogleFormsIcon,
|
||||
google_groups: GoogleGroupsIcon,
|
||||
google_maps: GoogleMapsIcon,
|
||||
google_pagespeed: GooglePagespeedIcon,
|
||||
google_search: GoogleIcon,
|
||||
google_sheets_v2: GoogleSheetsIcon,
|
||||
google_slides_v2: GoogleSlidesIcon,
|
||||
@@ -258,6 +263,7 @@ export const blockTypeToIconMap: Record<string, IconComponent> = {
|
||||
onepassword: OnePasswordIcon,
|
||||
openai: OpenAIIcon,
|
||||
outlook: OutlookIcon,
|
||||
pagerduty: PagerDutyIcon,
|
||||
parallel_ai: ParallelIcon,
|
||||
perplexity: PerplexityIcon,
|
||||
pinecone: PineconeIcon,
|
||||
|
||||
169
apps/docs/components/ui/response-section.tsx
Normal file
169
apps/docs/components/ui/response-section.tsx
Normal file
@@ -0,0 +1,169 @@
|
||||
'use client'
|
||||
|
||||
import { useEffect, useRef, useState } from 'react'
|
||||
import { ChevronDown } from 'lucide-react'
|
||||
import { cn } from '@/lib/utils'
|
||||
|
||||
interface ResponseSectionProps {
|
||||
children: React.ReactNode
|
||||
}
|
||||
|
||||
export function ResponseSection({ children }: ResponseSectionProps) {
|
||||
const containerRef = useRef<HTMLDivElement>(null)
|
||||
const [statusCodes, setStatusCodes] = useState<string[]>([])
|
||||
const [selectedCode, setSelectedCode] = useState<string>('')
|
||||
const [isOpen, setIsOpen] = useState(false)
|
||||
const dropdownRef = useRef<HTMLDivElement>(null)
|
||||
|
||||
function getAccordionItems() {
|
||||
const root = containerRef.current?.querySelector('[data-orientation="vertical"]')
|
||||
if (!root) return []
|
||||
return Array.from(root.children).filter(
|
||||
(el) => el.getAttribute('data-state') !== null
|
||||
) as HTMLElement[]
|
||||
}
|
||||
|
||||
function showStatusCode(code: string) {
|
||||
const items = getAccordionItems()
|
||||
for (const item of items) {
|
||||
const triggerBtn = item.querySelector('h3 button') as HTMLButtonElement | null
|
||||
const text = triggerBtn?.textContent?.trim() ?? ''
|
||||
const itemCode = text.match(/^\d{3}/)?.[0]
|
||||
|
||||
if (itemCode === code) {
|
||||
item.style.display = ''
|
||||
if (item.getAttribute('data-state') === 'closed' && triggerBtn) {
|
||||
triggerBtn.click()
|
||||
}
|
||||
} else {
|
||||
item.style.display = 'none'
|
||||
if (item.getAttribute('data-state') === 'open' && triggerBtn) {
|
||||
triggerBtn.click()
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Detect when the fumadocs accordion children mount via MutationObserver,
|
||||
* then extract status codes and show the first one.
|
||||
* Replaces the previous approach that used `children` as a dependency
|
||||
* (which triggered on every render since children is a new object each time).
|
||||
*/
|
||||
useEffect(() => {
|
||||
const container = containerRef.current
|
||||
if (!container) return
|
||||
|
||||
const initialize = () => {
|
||||
const items = getAccordionItems()
|
||||
if (items.length === 0) return false
|
||||
|
||||
const codes: string[] = []
|
||||
const seen = new Set<string>()
|
||||
|
||||
for (const item of items) {
|
||||
const triggerBtn = item.querySelector('h3 button')
|
||||
if (triggerBtn) {
|
||||
const text = triggerBtn.textContent?.trim() ?? ''
|
||||
const code = text.match(/^\d{3}/)?.[0]
|
||||
if (code && !seen.has(code)) {
|
||||
seen.add(code)
|
||||
codes.push(code)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if (codes.length > 0) {
|
||||
setStatusCodes(codes)
|
||||
setSelectedCode(codes[0])
|
||||
showStatusCode(codes[0])
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
if (initialize()) return
|
||||
|
||||
const observer = new MutationObserver(() => {
|
||||
if (initialize()) {
|
||||
observer.disconnect()
|
||||
}
|
||||
})
|
||||
observer.observe(container, { childList: true, subtree: true })
|
||||
|
||||
return () => observer.disconnect()
|
||||
}, []) // eslint-disable-line react-hooks/exhaustive-deps
|
||||
|
||||
function handleSelectCode(code: string) {
|
||||
setSelectedCode(code)
|
||||
setIsOpen(false)
|
||||
showStatusCode(code)
|
||||
}
|
||||
|
||||
useEffect(() => {
|
||||
function handleClickOutside(event: MouseEvent) {
|
||||
if (dropdownRef.current && !dropdownRef.current.contains(event.target as Node)) {
|
||||
setIsOpen(false)
|
||||
}
|
||||
}
|
||||
document.addEventListener('mousedown', handleClickOutside)
|
||||
return () => document.removeEventListener('mousedown', handleClickOutside)
|
||||
}, [])
|
||||
|
||||
return (
|
||||
<div ref={containerRef} className='response-section-wrapper'>
|
||||
{statusCodes.length > 0 && (
|
||||
<div className='response-section-header'>
|
||||
<h2 className='response-section-title'>Response</h2>
|
||||
<div className='response-section-meta'>
|
||||
<div ref={dropdownRef} className='response-section-dropdown-wrapper'>
|
||||
<button
|
||||
type='button'
|
||||
className='response-section-dropdown-trigger'
|
||||
onClick={() => setIsOpen(!isOpen)}
|
||||
>
|
||||
<span>{selectedCode}</span>
|
||||
<ChevronDown
|
||||
className={cn(
|
||||
'response-section-chevron',
|
||||
isOpen && 'response-section-chevron-open'
|
||||
)}
|
||||
/>
|
||||
</button>
|
||||
{isOpen && (
|
||||
<div className='response-section-dropdown-menu'>
|
||||
{statusCodes.map((code) => (
|
||||
<button
|
||||
key={code}
|
||||
type='button'
|
||||
className={cn(
|
||||
'response-section-dropdown-item',
|
||||
code === selectedCode && 'response-section-dropdown-item-selected'
|
||||
)}
|
||||
onClick={() => handleSelectCode(code)}
|
||||
>
|
||||
<span>{code}</span>
|
||||
{code === selectedCode && (
|
||||
<svg
|
||||
className='response-section-check'
|
||||
viewBox='0 0 24 24'
|
||||
fill='none'
|
||||
stroke='currentColor'
|
||||
strokeWidth='2'
|
||||
>
|
||||
<polyline points='20 6 9 17 4 12' />
|
||||
</svg>
|
||||
)}
|
||||
</button>
|
||||
))}
|
||||
</div>
|
||||
)}
|
||||
</div>
|
||||
<span className='response-section-content-type'>application/json</span>
|
||||
</div>
|
||||
</div>
|
||||
)}
|
||||
<div className='response-section-content'>{children}</div>
|
||||
</div>
|
||||
)
|
||||
}
|
||||
94
apps/docs/content/docs/de/api-reference/authentication.mdx
Normal file
94
apps/docs/content/docs/de/api-reference/authentication.mdx
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
title: Authentication
|
||||
description: API key types, generation, and how to authenticate requests
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
To access the Sim API, you need an API key. Sim supports two types of API keys — **personal keys** and **workspace keys** — each with different billing and access behaviors.
|
||||
|
||||
## Key Types
|
||||
|
||||
| | **Personal Keys** | **Workspace Keys** |
|
||||
| --- | --- | --- |
|
||||
| **Billed to** | Your individual account | Workspace owner |
|
||||
| **Scope** | Across workspaces you have access to | Shared across the workspace |
|
||||
| **Managed by** | Each user individually | Workspace admins |
|
||||
| **Permissions** | Must be enabled at workspace level | Require admin permissions |
|
||||
|
||||
<Callout type="info">
|
||||
Workspace admins can disable personal API key usage for their workspace. If disabled, only workspace keys can be used.
|
||||
</Callout>
|
||||
|
||||
## Generating API Keys
|
||||
|
||||
To generate a key, open the Sim dashboard and navigate to **Settings**, then go to **Sim Keys** and click **Create**.
|
||||
|
||||
<Callout type="warn">
|
||||
API keys are only shown once when generated. Store your key securely — you will not be able to view it again.
|
||||
</Callout>
|
||||
|
||||
## Using API Keys
|
||||
|
||||
Pass your API key in the `X-API-Key` header with every request:
|
||||
|
||||
<Tabs items={['curl', 'TypeScript', 'Python']}>
|
||||
<Tab value="curl">
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}}'
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="TypeScript">
|
||||
```typescript
|
||||
const response = await fetch(
|
||||
'https://www.sim.ai/api/workflows/{workflowId}/execute',
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY!,
|
||||
},
|
||||
body: JSON.stringify({ inputs: {} }),
|
||||
}
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Python">
|
||||
```python
|
||||
import requests
|
||||
|
||||
response = requests.post(
|
||||
"https://www.sim.ai/api/workflows/{workflowId}/execute",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": os.environ["SIM_API_KEY"],
|
||||
},
|
||||
json={"inputs": {}},
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Where Keys Are Used
|
||||
|
||||
API keys authenticate access to:
|
||||
|
||||
- **Workflow execution** — run deployed workflows via the API
|
||||
- **Logs API** — query workflow execution logs and metrics
|
||||
- **MCP servers** — authenticate connections to deployed MCP servers
|
||||
- **SDKs** — the [Python](/api-reference/python) and [TypeScript](/api-reference/typescript) SDKs use API keys for all operations
|
||||
|
||||
## Security
|
||||
|
||||
- Keys use the `sk-sim-` prefix and are encrypted at rest
|
||||
- Keys can be revoked at any time from the dashboard
|
||||
- Use environment variables to store keys — never hardcode them in source code
|
||||
- For browser-based applications, use a backend proxy to avoid exposing keys to the client
|
||||
|
||||
<Callout type="warn">
|
||||
Never expose your API key in client-side code. Use a server-side proxy to make authenticated requests on behalf of your frontend.
|
||||
</Callout>
|
||||
210
apps/docs/content/docs/de/api-reference/getting-started.mdx
Normal file
210
apps/docs/content/docs/de/api-reference/getting-started.mdx
Normal file
@@ -0,0 +1,210 @@
|
||||
---
|
||||
title: Getting Started
|
||||
description: Base URL, first API call, response format, error handling, and pagination
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
|
||||
## Base URL
|
||||
|
||||
All API requests are made to:
|
||||
|
||||
```
|
||||
https://www.sim.ai
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
<Steps>
|
||||
|
||||
<Step>
|
||||
### Get your API key
|
||||
|
||||
Go to the Sim dashboard and navigate to **Settings → Sim Keys**, then click **Create**. See [Authentication](/api-reference/authentication) for details on key types.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Find your workflow ID
|
||||
|
||||
Open a workflow in the Sim editor. The workflow ID is in the URL:
|
||||
|
||||
```
|
||||
https://www.sim.ai/workspace/{workspaceId}/w/{workflowId}
|
||||
```
|
||||
|
||||
You can also use the [List Workflows](/api-reference/workflows/listWorkflows) endpoint to get all workflow IDs in a workspace.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Deploy your workflow
|
||||
|
||||
A workflow must be deployed before it can be executed via the API. Click the **Deploy** button in the editor toolbar.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Make your first request
|
||||
|
||||
<Tabs items={['curl', 'TypeScript', 'Python']}>
|
||||
<Tab value="curl">
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}}'
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="TypeScript">
|
||||
```typescript
|
||||
const response = await fetch(
|
||||
`https://www.sim.ai/api/workflows/${workflowId}/execute`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY!,
|
||||
},
|
||||
body: JSON.stringify({ inputs: {} }),
|
||||
}
|
||||
)
|
||||
|
||||
const data = await response.json()
|
||||
console.log(data.output)
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Python">
|
||||
```python
|
||||
import requests
|
||||
import os
|
||||
|
||||
response = requests.post(
|
||||
f"https://www.sim.ai/api/workflows/{workflow_id}/execute",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": os.environ["SIM_API_KEY"],
|
||||
},
|
||||
json={"inputs": {}},
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
print(data["output"])
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
## Sync vs Async Execution
|
||||
|
||||
By default, workflow executions are **synchronous** — the API blocks until the workflow completes and returns the result directly.
|
||||
|
||||
For long-running workflows, use **asynchronous execution** by passing `async: true`:
|
||||
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}, "async": true}'
|
||||
```
|
||||
|
||||
This returns immediately with a `taskId`:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"taskId": "job_abc123",
|
||||
"status": "queued"
|
||||
}
|
||||
```
|
||||
|
||||
Poll the [Get Job Status](/api-reference/workflows/getJobStatus) endpoint until the status is `completed` or `failed`:
|
||||
|
||||
```bash
|
||||
curl https://www.sim.ai/api/jobs/{taskId} \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
```
|
||||
|
||||
<Callout type="info">
|
||||
Job status transitions follow: `queued` → `processing` → `completed` or `failed`. The `output` field is only present when status is `completed`.
|
||||
</Callout>
|
||||
|
||||
## Response Format
|
||||
|
||||
Successful responses include an `output` object with your workflow results and a `limits` object with your current rate limit and usage status:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"output": {
|
||||
"result": "Hello, world!"
|
||||
},
|
||||
"limits": {
|
||||
"workflowExecutionRateLimit": {
|
||||
"sync": {
|
||||
"requestsPerMinute": 60,
|
||||
"maxBurst": 10,
|
||||
"remaining": 59,
|
||||
"resetAt": "2025-01-01T00:01:00Z"
|
||||
},
|
||||
"async": {
|
||||
"requestsPerMinute": 30,
|
||||
"maxBurst": 5,
|
||||
"remaining": 30,
|
||||
"resetAt": "2025-01-01T00:01:00Z"
|
||||
}
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": 1.25,
|
||||
"limit": 50.00,
|
||||
"plan": "pro",
|
||||
"isExceeded": false
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The API uses standard HTTP status codes. Error responses include a human-readable `error` message:
|
||||
|
||||
```json
|
||||
{
|
||||
"error": "Workflow not found"
|
||||
}
|
||||
```
|
||||
|
||||
| Status | Meaning | What to do |
|
||||
| --- | --- | --- |
|
||||
| `400` | Invalid request parameters | Check the `details` array for specific field errors |
|
||||
| `401` | Missing or invalid API key | Verify your `X-API-Key` header |
|
||||
| `403` | Access denied | Check you have permission for this resource |
|
||||
| `404` | Resource not found | Verify the ID exists and belongs to your workspace |
|
||||
| `429` | Rate limit exceeded | Wait for the duration in the `Retry-After` header |
|
||||
|
||||
<Callout type="info">
|
||||
Use the [Get Usage Limits](/api-reference/usage/getUsageLimits) endpoint to check your current rate limit status and billing usage at any time.
|
||||
</Callout>
|
||||
|
||||
## Rate Limits
|
||||
|
||||
Rate limits depend on your subscription plan and apply separately to synchronous and asynchronous executions. Every execution response includes a `limits` object showing your current rate limit status.
|
||||
|
||||
When rate limited, the API returns a `429` response with a `Retry-After` header indicating how many seconds to wait before retrying.
|
||||
|
||||
## Pagination
|
||||
|
||||
List endpoints (workflows, logs, audit logs) use **cursor-based pagination**:
|
||||
|
||||
```bash
|
||||
# First page
|
||||
curl "https://www.sim.ai/api/v1/logs?limit=20" \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
|
||||
# Next page — use the nextCursor from the previous response
|
||||
curl "https://www.sim.ai/api/v1/logs?limit=20&cursor=abc123" \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
```
|
||||
|
||||
The response includes a `nextCursor` field. When `nextCursor` is absent or `null`, you have reached the last page.
|
||||
16
apps/docs/content/docs/de/api-reference/meta.json
Normal file
16
apps/docs/content/docs/de/api-reference/meta.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"title": "API Reference",
|
||||
"root": true,
|
||||
"pages": [
|
||||
"getting-started",
|
||||
"authentication",
|
||||
"---SDKs---",
|
||||
"python",
|
||||
"typescript",
|
||||
"---Endpoints---",
|
||||
"(generated)/workflows",
|
||||
"(generated)/logs",
|
||||
"(generated)/usage",
|
||||
"(generated)/audit-logs"
|
||||
]
|
||||
}
|
||||
766
apps/docs/content/docs/de/api-reference/python.mdx
Normal file
766
apps/docs/content/docs/de/api-reference/python.mdx
Normal file
@@ -0,0 +1,766 @@
|
||||
---
|
||||
title: Python
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
Das offizielle Python SDK für Sim ermöglicht es Ihnen, Workflows programmatisch aus Ihren Python-Anwendungen heraus mit dem offiziellen Python SDK auszuführen.
|
||||
|
||||
<Callout type="info">
|
||||
Das Python SDK unterstützt Python 3.8+ mit Unterstützung für asynchrone Ausführung, automatischer Ratenbegrenzung mit exponentiellem Backoff und Nutzungsverfolgung.
|
||||
</Callout>
|
||||
|
||||
## Installation
|
||||
|
||||
Installieren Sie das SDK mit pip:
|
||||
|
||||
```bash
|
||||
pip install simstudio-sdk
|
||||
```
|
||||
|
||||
## Schnellstart
|
||||
|
||||
Hier ist ein einfaches Beispiel für den Einstieg:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Initialize the client
|
||||
client = SimStudioClient(
|
||||
api_key="your-api-key-here",
|
||||
base_url="https://sim.ai" # optional, defaults to https://sim.ai
|
||||
)
|
||||
|
||||
# Execute a workflow
|
||||
try:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Workflow executed successfully:", result)
|
||||
except Exception as error:
|
||||
print("Workflow execution failed:", error)
|
||||
```
|
||||
|
||||
## API-Referenz
|
||||
|
||||
### SimStudioClient
|
||||
|
||||
#### Konstruktor
|
||||
|
||||
```python
|
||||
SimStudioClient(api_key: str, base_url: str = "https://sim.ai")
|
||||
```
|
||||
|
||||
**Parameter:**
|
||||
- `api_key` (str): Ihr Sim API-Schlüssel
|
||||
- `base_url` (str, optional): Basis-URL für die Sim API
|
||||
|
||||
#### Methoden
|
||||
|
||||
##### execute_workflow()
|
||||
|
||||
Führt einen Workflow mit optionalen Eingabedaten aus.
|
||||
|
||||
```python
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Hello, world!"},
|
||||
timeout=30.0 # 30 seconds
|
||||
)
|
||||
```
|
||||
|
||||
**Parameter:**
|
||||
- `workflow_id` (str): Die ID des auszuführenden Workflows
|
||||
- `input_data` (dict, optional): Eingabedaten, die an den Workflow übergeben werden
|
||||
- `timeout` (float, optional): Timeout in Sekunden (Standard: 30.0)
|
||||
- `stream` (bool, optional): Streaming-Antworten aktivieren (Standard: False)
|
||||
- `selected_outputs` (list[str], optional): Block-Ausgaben zum Streamen im Format `blockName.attribute` (z. B. `["agent1.content"]`)
|
||||
- `async_execution` (bool, optional): Asynchron ausführen (Standard: False)
|
||||
|
||||
**Rückgabewert:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
Wenn `async_execution=True`, wird sofort mit einer Task-ID zum Polling zurückgegeben. Andernfalls wird auf die Fertigstellung gewartet.
|
||||
|
||||
##### get_workflow_status()
|
||||
|
||||
Ruft den Status eines Workflows ab (Deployment-Status usw.).
|
||||
|
||||
```python
|
||||
status = client.get_workflow_status("workflow-id")
|
||||
print("Is deployed:", status.is_deployed)
|
||||
```
|
||||
|
||||
**Parameter:**
|
||||
- `workflow_id` (str): Die ID des Workflows
|
||||
|
||||
**Rückgabe:** `WorkflowStatus`
|
||||
|
||||
##### validate_workflow()
|
||||
|
||||
Überprüft, ob ein Workflow zur Ausführung bereit ist.
|
||||
|
||||
```python
|
||||
is_ready = client.validate_workflow("workflow-id")
|
||||
if is_ready:
|
||||
# Workflow is deployed and ready
|
||||
pass
|
||||
```
|
||||
|
||||
**Parameter:**
|
||||
- `workflow_id` (str): Die ID des Workflows
|
||||
|
||||
**Rückgabe:** `bool`
|
||||
|
||||
##### get_job_status()
|
||||
|
||||
Ruft den Status einer asynchronen Job-Ausführung ab.
|
||||
|
||||
```python
|
||||
status = client.get_job_status("task-id-from-async-execution")
|
||||
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
|
||||
if status["status"] == "completed":
|
||||
print("Output:", status["output"])
|
||||
```
|
||||
|
||||
**Parameter:**
|
||||
- `task_id` (str): Die Task-ID, die von der asynchronen Ausführung zurückgegeben wurde
|
||||
|
||||
**Rückgabe:** `Dict[str, Any]`
|
||||
|
||||
**Antwortfelder:**
|
||||
- `success` (bool): Ob die Anfrage erfolgreich war
|
||||
- `taskId` (str): Die Task-ID
|
||||
- `status` (str): Einer von `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (dict): Enthält `startedAt`, `completedAt` und `duration`
|
||||
- `output` (any, optional): Die Workflow-Ausgabe (wenn abgeschlossen)
|
||||
- `error` (any, optional): Fehlerdetails (wenn fehlgeschlagen)
|
||||
- `estimatedDuration` (int, optional): Geschätzte Dauer in Millisekunden (wenn in Bearbeitung/in Warteschlange)
|
||||
|
||||
##### execute_with_retry()
|
||||
|
||||
Führt einen Workflow mit automatischer Wiederholung bei Rate-Limit-Fehlern unter Verwendung von exponentiellem Backoff aus.
|
||||
|
||||
```python
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Hello"},
|
||||
timeout=30.0,
|
||||
max_retries=3, # Maximum number of retries
|
||||
initial_delay=1.0, # Initial delay in seconds
|
||||
max_delay=30.0, # Maximum delay in seconds
|
||||
backoff_multiplier=2.0 # Exponential backoff multiplier
|
||||
)
|
||||
```
|
||||
|
||||
**Parameter:**
|
||||
- `workflow_id` (str): Die ID des auszuführenden Workflows
|
||||
- `input_data` (dict, optional): Eingabedaten, die an den Workflow übergeben werden
|
||||
- `timeout` (float, optional): Timeout in Sekunden
|
||||
- `stream` (bool, optional): Streaming-Antworten aktivieren
|
||||
- `selected_outputs` (list, optional): Block-Ausgaben zum Streamen
|
||||
- `async_execution` (bool, optional): Asynchron ausführen
|
||||
- `max_retries` (int, optional): Maximale Anzahl von Wiederholungen (Standard: 3)
|
||||
- `initial_delay` (float, optional): Anfangsverzögerung in Sekunden (Standard: 1.0)
|
||||
- `max_delay` (float, optional): Maximale Verzögerung in Sekunden (Standard: 30.0)
|
||||
- `backoff_multiplier` (float, optional): Backoff-Multiplikator (Standard: 2.0)
|
||||
|
||||
**Rückgabe:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
Die Wiederholungslogik verwendet exponentielles Backoff (1s → 2s → 4s → 8s...) mit ±25% Jitter, um Thundering Herd zu verhindern. Wenn die API einen `retry-after`-Header bereitstellt, wird dieser stattdessen verwendet.
|
||||
|
||||
##### get_rate_limit_info()
|
||||
|
||||
Ruft die aktuellen Rate-Limit-Informationen aus der letzten API-Antwort ab.
|
||||
|
||||
```python
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
print("Limit:", rate_limit_info.limit)
|
||||
print("Remaining:", rate_limit_info.remaining)
|
||||
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
|
||||
```
|
||||
|
||||
**Rückgabewert:** `RateLimitInfo | None`
|
||||
|
||||
##### get_usage_limits()
|
||||
|
||||
Ruft aktuelle Nutzungslimits und Kontingentinformationen für Ihr Konto ab.
|
||||
|
||||
```python
|
||||
limits = client.get_usage_limits()
|
||||
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
|
||||
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
|
||||
print("Current period cost:", limits.usage["currentPeriodCost"])
|
||||
print("Plan:", limits.usage["plan"])
|
||||
```
|
||||
|
||||
**Rückgabewert:** `UsageLimits`
|
||||
|
||||
**Antwortstruktur:**
|
||||
|
||||
```python
|
||||
{
|
||||
"success": bool,
|
||||
"rateLimit": {
|
||||
"sync": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"async": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"authType": str # 'api' or 'manual'
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": float,
|
||||
"limit": float,
|
||||
"plan": str # e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### set_api_key()
|
||||
|
||||
Aktualisiert den API-Schlüssel.
|
||||
|
||||
```python
|
||||
client.set_api_key("new-api-key")
|
||||
```
|
||||
|
||||
##### set_base_url()
|
||||
|
||||
Aktualisiert die Basis-URL.
|
||||
|
||||
```python
|
||||
client.set_base_url("https://my-custom-domain.com")
|
||||
```
|
||||
|
||||
##### close()
|
||||
|
||||
Schließt die zugrunde liegende HTTP-Sitzung.
|
||||
|
||||
```python
|
||||
client.close()
|
||||
```
|
||||
|
||||
## Datenklassen
|
||||
|
||||
### WorkflowExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class WorkflowExecutionResult:
|
||||
success: bool
|
||||
output: Optional[Any] = None
|
||||
error: Optional[str] = None
|
||||
logs: Optional[List[Any]] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
trace_spans: Optional[List[Any]] = None
|
||||
total_duration: Optional[float] = None
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class AsyncExecutionResult:
|
||||
success: bool
|
||||
task_id: str
|
||||
status: str # 'queued'
|
||||
created_at: str
|
||||
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class WorkflowStatus:
|
||||
is_deployed: bool
|
||||
deployed_at: Optional[str] = None
|
||||
needs_redeployment: bool = False
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class RateLimitInfo:
|
||||
limit: int
|
||||
remaining: int
|
||||
reset: int
|
||||
retry_after: Optional[int] = None
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class UsageLimits:
|
||||
success: bool
|
||||
rate_limit: Dict[str, Any]
|
||||
usage: Dict[str, Any]
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```python
|
||||
class SimStudioError(Exception):
|
||||
def __init__(self, message: str, code: Optional[str] = None, status: Optional[int] = None):
|
||||
super().__init__(message)
|
||||
self.code = code
|
||||
self.status = status
|
||||
```
|
||||
|
||||
**Häufige Fehlercodes:**
|
||||
- `UNAUTHORIZED`: Ungültiger API-Schlüssel
|
||||
- `TIMEOUT`: Zeitüberschreitung der Anfrage
|
||||
- `RATE_LIMIT_EXCEEDED`: Ratenlimit überschritten
|
||||
- `USAGE_LIMIT_EXCEEDED`: Nutzungslimit überschritten
|
||||
- `EXECUTION_ERROR`: Workflow-Ausführung fehlgeschlagen
|
||||
|
||||
## Beispiele
|
||||
|
||||
### Grundlegende Workflow-Ausführung
|
||||
|
||||
<Steps>
|
||||
<Step title="Client initialisieren">
|
||||
Richten Sie den SimStudioClient mit Ihrem API-Schlüssel ein.
|
||||
</Step>
|
||||
<Step title="Workflow validieren">
|
||||
Prüfen Sie, ob der Workflow bereitgestellt und zur Ausführung bereit ist.
|
||||
</Step>
|
||||
<Step title="Workflow ausführen">
|
||||
Führen Sie den Workflow mit Ihren Eingabedaten aus.
|
||||
</Step>
|
||||
<Step title="Ergebnis verarbeiten">
|
||||
Verarbeiten Sie das Ausführungsergebnis und behandeln Sie eventuelle Fehler.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def run_workflow():
|
||||
try:
|
||||
# Check if workflow is ready
|
||||
is_ready = client.validate_workflow("my-workflow-id")
|
||||
if not is_ready:
|
||||
raise Exception("Workflow is not deployed or ready")
|
||||
|
||||
# Execute the workflow
|
||||
result = client.execute_workflow(
|
||||
"my-workflow-id",
|
||||
input_data={
|
||||
"message": "Process this data",
|
||||
"user_id": "12345"
|
||||
}
|
||||
)
|
||||
|
||||
if result.success:
|
||||
print("Output:", result.output)
|
||||
print("Duration:", result.metadata.get("duration") if result.metadata else None)
|
||||
else:
|
||||
print("Workflow failed:", result.error)
|
||||
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
run_workflow()
|
||||
```
|
||||
|
||||
### Fehlerbehandlung
|
||||
|
||||
Behandeln Sie verschiedene Fehlertypen, die während der Workflow-Ausführung auftreten können:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_error_handling():
|
||||
try:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
return result
|
||||
except SimStudioError as error:
|
||||
if error.code == "UNAUTHORIZED":
|
||||
print("Invalid API key")
|
||||
elif error.code == "TIMEOUT":
|
||||
print("Workflow execution timed out")
|
||||
elif error.code == "USAGE_LIMIT_EXCEEDED":
|
||||
print("Usage limit exceeded")
|
||||
elif error.code == "INVALID_JSON":
|
||||
print("Invalid JSON in request body")
|
||||
else:
|
||||
print(f"Workflow error: {error}")
|
||||
raise
|
||||
except Exception as error:
|
||||
print(f"Unexpected error: {error}")
|
||||
raise
|
||||
```
|
||||
|
||||
### Verwendung des Context-Managers
|
||||
|
||||
Verwenden Sie den Client als Context-Manager, um die Ressourcenbereinigung automatisch zu handhaben:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
# Using context manager to automatically close the session
|
||||
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Result:", result)
|
||||
# Session is automatically closed here
|
||||
```
|
||||
|
||||
### Batch-Workflow-Ausführung
|
||||
|
||||
Führen Sie mehrere Workflows effizient aus:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_workflows_batch(workflow_data_pairs):
|
||||
"""Execute multiple workflows with different input data."""
|
||||
results = []
|
||||
|
||||
for workflow_id, input_data in workflow_data_pairs:
|
||||
try:
|
||||
# Validate workflow before execution
|
||||
if not client.validate_workflow(workflow_id):
|
||||
print(f"Skipping {workflow_id}: not deployed")
|
||||
continue
|
||||
|
||||
result = client.execute_workflow(workflow_id, input_data)
|
||||
results.append({
|
||||
"workflow_id": workflow_id,
|
||||
"success": result.success,
|
||||
"output": result.output,
|
||||
"error": result.error
|
||||
})
|
||||
|
||||
except Exception as error:
|
||||
results.append({
|
||||
"workflow_id": workflow_id,
|
||||
"success": False,
|
||||
"error": str(error)
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
# Example usage
|
||||
workflows = [
|
||||
("workflow-1", {"type": "analysis", "data": "sample1"}),
|
||||
("workflow-2", {"type": "processing", "data": "sample2"}),
|
||||
]
|
||||
|
||||
results = execute_workflows_batch(workflows)
|
||||
for result in results:
|
||||
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
|
||||
```
|
||||
|
||||
### Asynchrone Workflow-Ausführung
|
||||
|
||||
Führen Sie Workflows asynchron für langwierige Aufgaben aus:
|
||||
|
||||
```python
|
||||
import os
|
||||
import time
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_async():
|
||||
try:
|
||||
# Start async execution
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"data": "large dataset"},
|
||||
async_execution=True # Execute asynchronously
|
||||
)
|
||||
|
||||
# Check if result is an async execution
|
||||
if hasattr(result, 'task_id'):
|
||||
print(f"Task ID: {result.task_id}")
|
||||
print(f"Status endpoint: {result.links['status']}")
|
||||
|
||||
# Poll for completion
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
while status["status"] in ["queued", "processing"]:
|
||||
print(f"Current status: {status['status']}")
|
||||
time.sleep(2) # Wait 2 seconds
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
if status["status"] == "completed":
|
||||
print("Workflow completed!")
|
||||
print(f"Output: {status['output']}")
|
||||
print(f"Duration: {status['metadata']['duration']}")
|
||||
else:
|
||||
print(f"Workflow failed: {status['error']}")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error: {error}")
|
||||
|
||||
execute_async()
|
||||
```
|
||||
|
||||
### Ratenlimitierung und Wiederholung
|
||||
|
||||
Behandeln Sie Ratenbegrenzungen automatisch mit exponentiellem Backoff:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_retry_handling():
|
||||
try:
|
||||
# Automatically retries on rate limit
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Process this"},
|
||||
max_retries=5,
|
||||
initial_delay=1.0,
|
||||
max_delay=60.0,
|
||||
backoff_multiplier=2.0
|
||||
)
|
||||
|
||||
print(f"Success: {result}")
|
||||
except SimStudioError as error:
|
||||
if error.code == "RATE_LIMIT_EXCEEDED":
|
||||
print("Rate limit exceeded after all retries")
|
||||
|
||||
# Check rate limit info
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
from datetime import datetime
|
||||
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
|
||||
print(f"Rate limit resets at: {reset_time}")
|
||||
|
||||
execute_with_retry_handling()
|
||||
```
|
||||
|
||||
### Nutzungsüberwachung
|
||||
|
||||
Überwachen Sie die Nutzung und Limits Ihres Kontos:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def check_usage():
|
||||
try:
|
||||
limits = client.get_usage_limits()
|
||||
|
||||
print("=== Rate Limits ===")
|
||||
print("Sync requests:")
|
||||
print(f" Limit: {limits.rate_limit['sync']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
|
||||
|
||||
print("\nAsync requests:")
|
||||
print(f" Limit: {limits.rate_limit['async']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
|
||||
|
||||
print("\n=== Usage ===")
|
||||
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
|
||||
print(f"Limit: ${limits.usage['limit']:.2f}")
|
||||
print(f"Plan: {limits.usage['plan']}")
|
||||
|
||||
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
|
||||
print(f"Usage: {percent_used:.1f}%")
|
||||
|
||||
if percent_used > 80:
|
||||
print("⚠️ Warning: You are approaching your usage limit!")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error checking usage: {error}")
|
||||
|
||||
check_usage()
|
||||
```
|
||||
|
||||
### Streaming-Workflow-Ausführung
|
||||
|
||||
Führen Sie Workflows mit Echtzeit-Streaming-Antworten aus:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_streaming():
|
||||
"""Execute workflow with streaming enabled."""
|
||||
try:
|
||||
# Enable streaming for specific block outputs
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Count to five"},
|
||||
stream=True,
|
||||
selected_outputs=["agent1.content"] # Use blockName.attribute format
|
||||
)
|
||||
|
||||
print("Workflow result:", result)
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
execute_with_streaming()
|
||||
```
|
||||
|
||||
Die Streaming-Antwort folgt dem Server-Sent-Events- (SSE-) Format:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**Flask-Streaming-Beispiel:**
|
||||
|
||||
```python
|
||||
from flask import Flask, Response, stream_with_context
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/stream-workflow')
|
||||
def stream_workflow():
|
||||
"""Stream workflow execution to the client."""
|
||||
|
||||
def generate():
|
||||
response = requests.post(
|
||||
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': os.getenv('SIM_API_KEY')
|
||||
},
|
||||
json={
|
||||
'message': 'Generate a story',
|
||||
'stream': True,
|
||||
'selectedOutputs': ['agent1.content']
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
data = decoded_line[6:] # Remove 'data: ' prefix
|
||||
|
||||
if data == '[DONE]':
|
||||
break
|
||||
|
||||
try:
|
||||
parsed = json.loads(data)
|
||||
if 'chunk' in parsed:
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
elif parsed.get('event') == 'done':
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
print("Execution complete:", parsed.get('metadata'))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
```
|
||||
|
||||
### Umgebungskonfiguration
|
||||
|
||||
Konfigurieren Sie den Client mit Umgebungsvariablen:
|
||||
|
||||
<Tabs items={['Development', 'Production']}>
|
||||
<Tab value="Development">
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Development configuration
|
||||
client = SimStudioClient(
|
||||
api_key=os.getenv("SIM_API_KEY")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
</Tab>
|
||||
<Tab value="Production">
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Production configuration with error handling
|
||||
api_key = os.getenv("SIM_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("SIM_API_KEY environment variable is required")
|
||||
|
||||
client = SimStudioClient(
|
||||
api_key=api_key,
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Ihren API-Schlüssel erhalten
|
||||
|
||||
<Steps>
|
||||
<Step title="Bei Sim anmelden">
|
||||
Navigieren Sie zu [Sim](https://sim.ai) und melden Sie sich in Ihrem Konto an.
|
||||
</Step>
|
||||
<Step title="Workflow öffnen">
|
||||
Navigieren Sie zu dem Workflow, den Sie programmatisch ausführen möchten.
|
||||
</Step>
|
||||
<Step title="Workflow bereitstellen">
|
||||
Klicken Sie auf "Bereitstellen", um Ihren Workflow bereitzustellen, falls dies noch nicht geschehen ist.
|
||||
</Step>
|
||||
<Step title="API-Schlüssel erstellen oder auswählen">
|
||||
Wählen oder erstellen Sie während des Bereitstellungsprozesses einen API-Schlüssel.
|
||||
</Step>
|
||||
<Step title="API-Schlüssel kopieren">
|
||||
Kopieren Sie den API-Schlüssel, um ihn in Ihrer Python-Anwendung zu verwenden.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Voraussetzungen
|
||||
|
||||
- Python 3.8+
|
||||
- requests >= 2.25.0
|
||||
|
||||
## Lizenz
|
||||
|
||||
Apache-2.0
|
||||
1052
apps/docs/content/docs/de/api-reference/typescript.mdx
Normal file
1052
apps/docs/content/docs/de/api-reference/typescript.mdx
Normal file
File diff suppressed because it is too large
Load Diff
24
apps/docs/content/docs/de/meta.json
Normal file
24
apps/docs/content/docs/de/meta.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"title": "Sim Documentation",
|
||||
"pages": [
|
||||
"./introduction/index",
|
||||
"./getting-started/index",
|
||||
"./quick-reference/index",
|
||||
"triggers",
|
||||
"blocks",
|
||||
"tools",
|
||||
"connections",
|
||||
"mcp",
|
||||
"copilot",
|
||||
"skills",
|
||||
"knowledgebase",
|
||||
"variables",
|
||||
"credentials",
|
||||
"execution",
|
||||
"permissions",
|
||||
"self-hosting",
|
||||
"./enterprise/index",
|
||||
"./keyboard-shortcuts/index"
|
||||
],
|
||||
"defaultOpen": false
|
||||
}
|
||||
@@ -0,0 +1,3 @@
|
||||
{
|
||||
"pages": ["executeWorkflow", "cancelExecution", "listWorkflows", "getWorkflow", "getJobStatus"]
|
||||
}
|
||||
94
apps/docs/content/docs/en/api-reference/authentication.mdx
Normal file
94
apps/docs/content/docs/en/api-reference/authentication.mdx
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
title: Authentication
|
||||
description: API key types, generation, and how to authenticate requests
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
To access the Sim API, you need an API key. Sim supports two types of API keys — **personal keys** and **workspace keys** — each with different billing and access behaviors.
|
||||
|
||||
## Key Types
|
||||
|
||||
| | **Personal Keys** | **Workspace Keys** |
|
||||
| --- | --- | --- |
|
||||
| **Billed to** | Your individual account | Workspace owner |
|
||||
| **Scope** | Across workspaces you have access to | Shared across the workspace |
|
||||
| **Managed by** | Each user individually | Workspace admins |
|
||||
| **Permissions** | Must be enabled at workspace level | Require admin permissions |
|
||||
|
||||
<Callout type="info">
|
||||
Workspace admins can disable personal API key usage for their workspace. If disabled, only workspace keys can be used.
|
||||
</Callout>
|
||||
|
||||
## Generating API Keys
|
||||
|
||||
To generate a key, open the Sim dashboard and navigate to **Settings**, then go to **Sim Keys** and click **Create**.
|
||||
|
||||
<Callout type="warn">
|
||||
API keys are only shown once when generated. Store your key securely — you will not be able to view it again.
|
||||
</Callout>
|
||||
|
||||
## Using API Keys
|
||||
|
||||
Pass your API key in the `X-API-Key` header with every request:
|
||||
|
||||
<Tabs items={['curl', 'TypeScript', 'Python']}>
|
||||
<Tab value="curl">
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}}'
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="TypeScript">
|
||||
```typescript
|
||||
const response = await fetch(
|
||||
'https://www.sim.ai/api/workflows/{workflowId}/execute',
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY!,
|
||||
},
|
||||
body: JSON.stringify({ inputs: {} }),
|
||||
}
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Python">
|
||||
```python
|
||||
import requests
|
||||
|
||||
response = requests.post(
|
||||
"https://www.sim.ai/api/workflows/{workflowId}/execute",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": os.environ["SIM_API_KEY"],
|
||||
},
|
||||
json={"inputs": {}},
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Where Keys Are Used
|
||||
|
||||
API keys authenticate access to:
|
||||
|
||||
- **Workflow execution** — run deployed workflows via the API
|
||||
- **Logs API** — query workflow execution logs and metrics
|
||||
- **MCP servers** — authenticate connections to deployed MCP servers
|
||||
- **SDKs** — the [Python](/api-reference/python) and [TypeScript](/api-reference/typescript) SDKs use API keys for all operations
|
||||
|
||||
## Security
|
||||
|
||||
- Keys use the `sk-sim-` prefix and are encrypted at rest
|
||||
- Keys can be revoked at any time from the dashboard
|
||||
- Use environment variables to store keys — never hardcode them in source code
|
||||
- For browser-based applications, use a backend proxy to avoid exposing keys to the client
|
||||
|
||||
<Callout type="warn">
|
||||
Never expose your API key in client-side code. Use a server-side proxy to make authenticated requests on behalf of your frontend.
|
||||
</Callout>
|
||||
210
apps/docs/content/docs/en/api-reference/getting-started.mdx
Normal file
210
apps/docs/content/docs/en/api-reference/getting-started.mdx
Normal file
@@ -0,0 +1,210 @@
|
||||
---
|
||||
title: Getting Started
|
||||
description: Base URL, first API call, response format, error handling, and pagination
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
|
||||
## Base URL
|
||||
|
||||
All API requests are made to:
|
||||
|
||||
```
|
||||
https://www.sim.ai
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
<Steps>
|
||||
|
||||
<Step>
|
||||
### Get your API key
|
||||
|
||||
Go to the Sim platform and navigate to **Settings**, then go to **Sim Keys** and click **Create**. See [Authentication](/api-reference/authentication) for details on key types.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Find your workflow ID
|
||||
|
||||
Open a workflow in the Sim editor. The workflow ID is in the URL:
|
||||
|
||||
```
|
||||
https://www.sim.ai/workspace/{workspaceId}/w/{workflowId}
|
||||
```
|
||||
|
||||
You can also use the [List Workflows](/api-reference/workflows/listWorkflows) endpoint to get all workflow IDs in a workspace.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Deploy your workflow
|
||||
|
||||
A workflow must be deployed before it can be executed via the API. Click the **Deploy** button in the editor toolbar, or use the dashboard to manage deployments.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Make your first request
|
||||
|
||||
<Tabs items={['curl', 'TypeScript', 'Python']}>
|
||||
<Tab value="curl">
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}}'
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="TypeScript">
|
||||
```typescript
|
||||
const response = await fetch(
|
||||
`https://www.sim.ai/api/workflows/${workflowId}/execute`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY!,
|
||||
},
|
||||
body: JSON.stringify({ inputs: {} }),
|
||||
}
|
||||
)
|
||||
|
||||
const data = await response.json()
|
||||
console.log(data.output)
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Python">
|
||||
```python
|
||||
import requests
|
||||
import os
|
||||
|
||||
response = requests.post(
|
||||
f"https://www.sim.ai/api/workflows/{workflow_id}/execute",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": os.environ["SIM_API_KEY"],
|
||||
},
|
||||
json={"inputs": {}},
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
print(data["output"])
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
## Sync vs Async Execution
|
||||
|
||||
By default, workflow executions are **synchronous** — the API blocks until the workflow completes and returns the result directly.
|
||||
|
||||
For long-running workflows, use **asynchronous execution** by passing `async: true`:
|
||||
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}, "async": true}'
|
||||
```
|
||||
|
||||
This returns immediately with a `taskId`:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"taskId": "job_abc123",
|
||||
"status": "queued"
|
||||
}
|
||||
```
|
||||
|
||||
Poll the [Get Job Status](/api-reference/workflows/getJobStatus) endpoint until the status is `completed` or `failed`:
|
||||
|
||||
```bash
|
||||
curl https://www.sim.ai/api/jobs/{taskId} \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
```
|
||||
|
||||
<Callout type="info">
|
||||
Job status transitions follow: `queued` → `processing` → `completed` or `failed`. The `output` field is only present when status is `completed`.
|
||||
</Callout>
|
||||
|
||||
## Response Format
|
||||
|
||||
Successful responses include an `output` object with your workflow results and a `limits` object with your current rate limit and usage status:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"output": {
|
||||
"result": "Hello, world!"
|
||||
},
|
||||
"limits": {
|
||||
"workflowExecutionRateLimit": {
|
||||
"sync": {
|
||||
"requestsPerMinute": 60,
|
||||
"maxBurst": 10,
|
||||
"remaining": 59,
|
||||
"resetAt": "2025-01-01T00:01:00Z"
|
||||
},
|
||||
"async": {
|
||||
"requestsPerMinute": 30,
|
||||
"maxBurst": 5,
|
||||
"remaining": 30,
|
||||
"resetAt": "2025-01-01T00:01:00Z"
|
||||
}
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": 1.25,
|
||||
"limit": 50.00,
|
||||
"plan": "pro",
|
||||
"isExceeded": false
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The API uses standard HTTP status codes. Error responses include a human-readable `error` message:
|
||||
|
||||
```json
|
||||
{
|
||||
"error": "Workflow not found"
|
||||
}
|
||||
```
|
||||
|
||||
| Status | Meaning | What to do |
|
||||
| --- | --- | --- |
|
||||
| `400` | Invalid request parameters | Check the `details` array for specific field errors |
|
||||
| `401` | Missing or invalid API key | Verify your `X-API-Key` header |
|
||||
| `403` | Access denied | Check you have permission for this resource |
|
||||
| `404` | Resource not found | Verify the ID exists and belongs to your workspace |
|
||||
| `429` | Rate limit exceeded | Wait for the duration in the `Retry-After` header |
|
||||
|
||||
<Callout type="info">
|
||||
Use the [Get Usage Limits](/api-reference/usage/getUsageLimits) endpoint to check your current rate limit status and billing usage at any time.
|
||||
</Callout>
|
||||
|
||||
## Rate Limits
|
||||
|
||||
Rate limits depend on your subscription plan and apply separately to synchronous and asynchronous executions. Every execution response includes a `limits` object showing your current rate limit status.
|
||||
|
||||
When rate limited, the API returns a `429` response with a `Retry-After` header indicating how many seconds to wait before retrying.
|
||||
|
||||
## Pagination
|
||||
|
||||
List endpoints (workflows, logs, audit logs) use **cursor-based pagination**:
|
||||
|
||||
```bash
|
||||
# First page
|
||||
curl "https://www.sim.ai/api/v1/logs?limit=20" \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
|
||||
# Next page — use the nextCursor from the previous response
|
||||
curl "https://www.sim.ai/api/v1/logs?limit=20&cursor=abc123" \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
```
|
||||
|
||||
The response includes a `nextCursor` field. When `nextCursor` is absent or `null`, you have reached the last page.
|
||||
16
apps/docs/content/docs/en/api-reference/meta.json
Normal file
16
apps/docs/content/docs/en/api-reference/meta.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"title": "API Reference",
|
||||
"root": true,
|
||||
"pages": [
|
||||
"getting-started",
|
||||
"authentication",
|
||||
"---SDKs---",
|
||||
"python",
|
||||
"typescript",
|
||||
"---Endpoints---",
|
||||
"(generated)/workflows",
|
||||
"(generated)/logs",
|
||||
"(generated)/usage",
|
||||
"(generated)/audit-logs"
|
||||
]
|
||||
}
|
||||
761
apps/docs/content/docs/en/api-reference/python.mdx
Normal file
761
apps/docs/content/docs/en/api-reference/python.mdx
Normal file
@@ -0,0 +1,761 @@
|
||||
---
|
||||
title: Python
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
The official Python SDK for Sim allows you to execute workflows programmatically from your Python applications using the official Python SDK.
|
||||
|
||||
<Callout type="info">
|
||||
The Python SDK supports Python 3.8+ with async execution support, automatic rate limiting with exponential backoff, and usage tracking.
|
||||
</Callout>
|
||||
|
||||
## Installation
|
||||
|
||||
Install the SDK using pip:
|
||||
|
||||
```bash
|
||||
pip install simstudio-sdk
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
Here's a simple example to get you started:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Initialize the client
|
||||
client = SimStudioClient(
|
||||
api_key="your-api-key-here",
|
||||
base_url="https://sim.ai" # optional, defaults to https://sim.ai
|
||||
)
|
||||
|
||||
# Execute a workflow
|
||||
try:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Workflow executed successfully:", result)
|
||||
except Exception as error:
|
||||
print("Workflow execution failed:", error)
|
||||
```
|
||||
|
||||
## API Reference
|
||||
|
||||
### SimStudioClient
|
||||
|
||||
#### Constructor
|
||||
|
||||
```python
|
||||
SimStudioClient(api_key: str, base_url: str = "https://sim.ai")
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `api_key` (str): Your Sim API key
|
||||
- `base_url` (str, optional): Base URL for the Sim API
|
||||
|
||||
#### Methods
|
||||
|
||||
##### execute_workflow()
|
||||
|
||||
Execute a workflow with optional input data.
|
||||
|
||||
```python
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Hello, world!"},
|
||||
timeout=30.0 # 30 seconds
|
||||
)
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `workflow_id` (str): The ID of the workflow to execute
|
||||
- `input_data` (dict, optional): Input data to pass to the workflow
|
||||
- `timeout` (float, optional): Timeout in seconds (default: 30.0)
|
||||
- `stream` (bool, optional): Enable streaming responses (default: False)
|
||||
- `selected_outputs` (list[str], optional): Block outputs to stream in `blockName.attribute` format (e.g., `["agent1.content"]`)
|
||||
- `async_execution` (bool, optional): Execute asynchronously (default: False)
|
||||
|
||||
**Returns:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
When `async_execution=True`, returns immediately with a task ID for polling. Otherwise, waits for completion.
|
||||
|
||||
##### get_workflow_status()
|
||||
|
||||
Get the status of a workflow (deployment status, etc.).
|
||||
|
||||
```python
|
||||
status = client.get_workflow_status("workflow-id")
|
||||
print("Is deployed:", status.is_deployed)
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `workflow_id` (str): The ID of the workflow
|
||||
|
||||
**Returns:** `WorkflowStatus`
|
||||
|
||||
##### validate_workflow()
|
||||
|
||||
Validate that a workflow is ready for execution.
|
||||
|
||||
```python
|
||||
is_ready = client.validate_workflow("workflow-id")
|
||||
if is_ready:
|
||||
# Workflow is deployed and ready
|
||||
pass
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `workflow_id` (str): The ID of the workflow
|
||||
|
||||
**Returns:** `bool`
|
||||
|
||||
##### get_job_status()
|
||||
|
||||
Get the status of an async job execution.
|
||||
|
||||
```python
|
||||
status = client.get_job_status("task-id-from-async-execution")
|
||||
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
|
||||
if status["status"] == "completed":
|
||||
print("Output:", status["output"])
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `task_id` (str): The task ID returned from async execution
|
||||
|
||||
**Returns:** `Dict[str, Any]`
|
||||
|
||||
**Response fields:**
|
||||
- `success` (bool): Whether the request was successful
|
||||
- `taskId` (str): The task ID
|
||||
- `status` (str): One of `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (dict): Contains `startedAt`, `completedAt`, and `duration`
|
||||
- `output` (any, optional): The workflow output (when completed)
|
||||
- `error` (any, optional): Error details (when failed)
|
||||
- `estimatedDuration` (int, optional): Estimated duration in milliseconds (when processing/queued)
|
||||
|
||||
##### execute_with_retry()
|
||||
|
||||
Execute a workflow with automatic retry on rate limit errors using exponential backoff.
|
||||
|
||||
```python
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Hello"},
|
||||
timeout=30.0,
|
||||
max_retries=3, # Maximum number of retries
|
||||
initial_delay=1.0, # Initial delay in seconds
|
||||
max_delay=30.0, # Maximum delay in seconds
|
||||
backoff_multiplier=2.0 # Exponential backoff multiplier
|
||||
)
|
||||
```
|
||||
|
||||
**Parameters:**
|
||||
- `workflow_id` (str): The ID of the workflow to execute
|
||||
- `input_data` (dict, optional): Input data to pass to the workflow
|
||||
- `timeout` (float, optional): Timeout in seconds
|
||||
- `stream` (bool, optional): Enable streaming responses
|
||||
- `selected_outputs` (list, optional): Block outputs to stream
|
||||
- `async_execution` (bool, optional): Execute asynchronously
|
||||
- `max_retries` (int, optional): Maximum number of retries (default: 3)
|
||||
- `initial_delay` (float, optional): Initial delay in seconds (default: 1.0)
|
||||
- `max_delay` (float, optional): Maximum delay in seconds (default: 30.0)
|
||||
- `backoff_multiplier` (float, optional): Backoff multiplier (default: 2.0)
|
||||
|
||||
**Returns:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
The retry logic uses exponential backoff (1s → 2s → 4s → 8s...) with ±25% jitter to prevent thundering herd. If the API provides a `retry-after` header, it will be used instead.
|
||||
|
||||
##### get_rate_limit_info()
|
||||
|
||||
Get the current rate limit information from the last API response.
|
||||
|
||||
```python
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
print("Limit:", rate_limit_info.limit)
|
||||
print("Remaining:", rate_limit_info.remaining)
|
||||
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
|
||||
```
|
||||
|
||||
**Returns:** `RateLimitInfo | None`
|
||||
|
||||
##### get_usage_limits()
|
||||
|
||||
Get current usage limits and quota information for your account.
|
||||
|
||||
```python
|
||||
limits = client.get_usage_limits()
|
||||
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
|
||||
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
|
||||
print("Current period cost:", limits.usage["currentPeriodCost"])
|
||||
print("Plan:", limits.usage["plan"])
|
||||
```
|
||||
|
||||
**Returns:** `UsageLimits`
|
||||
|
||||
**Response structure:**
|
||||
```python
|
||||
{
|
||||
"success": bool,
|
||||
"rateLimit": {
|
||||
"sync": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"async": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"authType": str # 'api' or 'manual'
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": float,
|
||||
"limit": float,
|
||||
"plan": str # e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### set_api_key()
|
||||
|
||||
Update the API key.
|
||||
|
||||
```python
|
||||
client.set_api_key("new-api-key")
|
||||
```
|
||||
|
||||
##### set_base_url()
|
||||
|
||||
Update the base URL.
|
||||
|
||||
```python
|
||||
client.set_base_url("https://my-custom-domain.com")
|
||||
```
|
||||
|
||||
##### close()
|
||||
|
||||
Close the underlying HTTP session.
|
||||
|
||||
```python
|
||||
client.close()
|
||||
```
|
||||
|
||||
## Data Classes
|
||||
|
||||
### WorkflowExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class WorkflowExecutionResult:
|
||||
success: bool
|
||||
output: Optional[Any] = None
|
||||
error: Optional[str] = None
|
||||
logs: Optional[List[Any]] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
trace_spans: Optional[List[Any]] = None
|
||||
total_duration: Optional[float] = None
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class AsyncExecutionResult:
|
||||
success: bool
|
||||
task_id: str
|
||||
status: str # 'queued'
|
||||
created_at: str
|
||||
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class WorkflowStatus:
|
||||
is_deployed: bool
|
||||
deployed_at: Optional[str] = None
|
||||
needs_redeployment: bool = False
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class RateLimitInfo:
|
||||
limit: int
|
||||
remaining: int
|
||||
reset: int
|
||||
retry_after: Optional[int] = None
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class UsageLimits:
|
||||
success: bool
|
||||
rate_limit: Dict[str, Any]
|
||||
usage: Dict[str, Any]
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```python
|
||||
class SimStudioError(Exception):
|
||||
def __init__(self, message: str, code: Optional[str] = None, status: Optional[int] = None):
|
||||
super().__init__(message)
|
||||
self.code = code
|
||||
self.status = status
|
||||
```
|
||||
|
||||
**Common error codes:**
|
||||
- `UNAUTHORIZED`: Invalid API key
|
||||
- `TIMEOUT`: Request timed out
|
||||
- `RATE_LIMIT_EXCEEDED`: Rate limit exceeded
|
||||
- `USAGE_LIMIT_EXCEEDED`: Usage limit exceeded
|
||||
- `EXECUTION_ERROR`: Workflow execution failed
|
||||
|
||||
## Examples
|
||||
|
||||
### Basic Workflow Execution
|
||||
|
||||
<Steps>
|
||||
<Step title="Initialize the client">
|
||||
Set up the SimStudioClient with your API key.
|
||||
</Step>
|
||||
<Step title="Validate the workflow">
|
||||
Check if the workflow is deployed and ready for execution.
|
||||
</Step>
|
||||
<Step title="Execute the workflow">
|
||||
Run the workflow with your input data.
|
||||
</Step>
|
||||
<Step title="Handle the result">
|
||||
Process the execution result and handle any errors.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def run_workflow():
|
||||
try:
|
||||
# Check if workflow is ready
|
||||
is_ready = client.validate_workflow("my-workflow-id")
|
||||
if not is_ready:
|
||||
raise Exception("Workflow is not deployed or ready")
|
||||
|
||||
# Execute the workflow
|
||||
result = client.execute_workflow(
|
||||
"my-workflow-id",
|
||||
input_data={
|
||||
"message": "Process this data",
|
||||
"user_id": "12345"
|
||||
}
|
||||
)
|
||||
|
||||
if result.success:
|
||||
print("Output:", result.output)
|
||||
print("Duration:", result.metadata.get("duration") if result.metadata else None)
|
||||
else:
|
||||
print("Workflow failed:", result.error)
|
||||
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
run_workflow()
|
||||
```
|
||||
|
||||
### Error Handling
|
||||
|
||||
Handle different types of errors that may occur during workflow execution:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_error_handling():
|
||||
try:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
return result
|
||||
except SimStudioError as error:
|
||||
if error.code == "UNAUTHORIZED":
|
||||
print("Invalid API key")
|
||||
elif error.code == "TIMEOUT":
|
||||
print("Workflow execution timed out")
|
||||
elif error.code == "USAGE_LIMIT_EXCEEDED":
|
||||
print("Usage limit exceeded")
|
||||
elif error.code == "INVALID_JSON":
|
||||
print("Invalid JSON in request body")
|
||||
else:
|
||||
print(f"Workflow error: {error}")
|
||||
raise
|
||||
except Exception as error:
|
||||
print(f"Unexpected error: {error}")
|
||||
raise
|
||||
```
|
||||
|
||||
### Context Manager Usage
|
||||
|
||||
Use the client as a context manager to automatically handle resource cleanup:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
# Using context manager to automatically close the session
|
||||
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Result:", result)
|
||||
# Session is automatically closed here
|
||||
```
|
||||
|
||||
### Batch Workflow Execution
|
||||
|
||||
Execute multiple workflows efficiently:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_workflows_batch(workflow_data_pairs):
|
||||
"""Execute multiple workflows with different input data."""
|
||||
results = []
|
||||
|
||||
for workflow_id, input_data in workflow_data_pairs:
|
||||
try:
|
||||
# Validate workflow before execution
|
||||
if not client.validate_workflow(workflow_id):
|
||||
print(f"Skipping {workflow_id}: not deployed")
|
||||
continue
|
||||
|
||||
result = client.execute_workflow(workflow_id, input_data)
|
||||
results.append({
|
||||
"workflow_id": workflow_id,
|
||||
"success": result.success,
|
||||
"output": result.output,
|
||||
"error": result.error
|
||||
})
|
||||
|
||||
except Exception as error:
|
||||
results.append({
|
||||
"workflow_id": workflow_id,
|
||||
"success": False,
|
||||
"error": str(error)
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
# Example usage
|
||||
workflows = [
|
||||
("workflow-1", {"type": "analysis", "data": "sample1"}),
|
||||
("workflow-2", {"type": "processing", "data": "sample2"}),
|
||||
]
|
||||
|
||||
results = execute_workflows_batch(workflows)
|
||||
for result in results:
|
||||
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
|
||||
```
|
||||
|
||||
### Async Workflow Execution
|
||||
|
||||
Execute workflows asynchronously for long-running tasks:
|
||||
|
||||
```python
|
||||
import os
|
||||
import time
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_async():
|
||||
try:
|
||||
# Start async execution
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"data": "large dataset"},
|
||||
async_execution=True # Execute asynchronously
|
||||
)
|
||||
|
||||
# Check if result is an async execution
|
||||
if hasattr(result, 'task_id'):
|
||||
print(f"Task ID: {result.task_id}")
|
||||
print(f"Status endpoint: {result.links['status']}")
|
||||
|
||||
# Poll for completion
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
while status["status"] in ["queued", "processing"]:
|
||||
print(f"Current status: {status['status']}")
|
||||
time.sleep(2) # Wait 2 seconds
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
if status["status"] == "completed":
|
||||
print("Workflow completed!")
|
||||
print(f"Output: {status['output']}")
|
||||
print(f"Duration: {status['metadata']['duration']}")
|
||||
else:
|
||||
print(f"Workflow failed: {status['error']}")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error: {error}")
|
||||
|
||||
execute_async()
|
||||
```
|
||||
|
||||
### Rate Limiting and Retry
|
||||
|
||||
Handle rate limits automatically with exponential backoff:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_retry_handling():
|
||||
try:
|
||||
# Automatically retries on rate limit
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Process this"},
|
||||
max_retries=5,
|
||||
initial_delay=1.0,
|
||||
max_delay=60.0,
|
||||
backoff_multiplier=2.0
|
||||
)
|
||||
|
||||
print(f"Success: {result}")
|
||||
except SimStudioError as error:
|
||||
if error.code == "RATE_LIMIT_EXCEEDED":
|
||||
print("Rate limit exceeded after all retries")
|
||||
|
||||
# Check rate limit info
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
from datetime import datetime
|
||||
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
|
||||
print(f"Rate limit resets at: {reset_time}")
|
||||
|
||||
execute_with_retry_handling()
|
||||
```
|
||||
|
||||
### Usage Monitoring
|
||||
|
||||
Monitor your account usage and limits:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def check_usage():
|
||||
try:
|
||||
limits = client.get_usage_limits()
|
||||
|
||||
print("=== Rate Limits ===")
|
||||
print("Sync requests:")
|
||||
print(f" Limit: {limits.rate_limit['sync']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
|
||||
|
||||
print("\nAsync requests:")
|
||||
print(f" Limit: {limits.rate_limit['async']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
|
||||
|
||||
print("\n=== Usage ===")
|
||||
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
|
||||
print(f"Limit: ${limits.usage['limit']:.2f}")
|
||||
print(f"Plan: {limits.usage['plan']}")
|
||||
|
||||
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
|
||||
print(f"Usage: {percent_used:.1f}%")
|
||||
|
||||
if percent_used > 80:
|
||||
print("⚠️ Warning: You are approaching your usage limit!")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error checking usage: {error}")
|
||||
|
||||
check_usage()
|
||||
```
|
||||
|
||||
### Streaming Workflow Execution
|
||||
|
||||
Execute workflows with real-time streaming responses:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_streaming():
|
||||
"""Execute workflow with streaming enabled."""
|
||||
try:
|
||||
# Enable streaming for specific block outputs
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Count to five"},
|
||||
stream=True,
|
||||
selected_outputs=["agent1.content"] # Use blockName.attribute format
|
||||
)
|
||||
|
||||
print("Workflow result:", result)
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
execute_with_streaming()
|
||||
```
|
||||
|
||||
The streaming response follows the Server-Sent Events (SSE) format:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**Flask Streaming Example:**
|
||||
|
||||
```python
|
||||
from flask import Flask, Response, stream_with_context
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/stream-workflow')
|
||||
def stream_workflow():
|
||||
"""Stream workflow execution to the client."""
|
||||
|
||||
def generate():
|
||||
response = requests.post(
|
||||
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': os.getenv('SIM_API_KEY')
|
||||
},
|
||||
json={
|
||||
'message': 'Generate a story',
|
||||
'stream': True,
|
||||
'selectedOutputs': ['agent1.content']
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
data = decoded_line[6:] # Remove 'data: ' prefix
|
||||
|
||||
if data == '[DONE]':
|
||||
break
|
||||
|
||||
try:
|
||||
parsed = json.loads(data)
|
||||
if 'chunk' in parsed:
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
elif parsed.get('event') == 'done':
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
print("Execution complete:", parsed.get('metadata'))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
```
|
||||
|
||||
### Environment Configuration
|
||||
|
||||
Configure the client using environment variables:
|
||||
|
||||
<Tabs items={['Development', 'Production']}>
|
||||
<Tab value="Development">
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Development configuration
|
||||
client = SimStudioClient(
|
||||
api_key=os.getenv("SIM_API_KEY")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Production">
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Production configuration with error handling
|
||||
api_key = os.getenv("SIM_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("SIM_API_KEY environment variable is required")
|
||||
|
||||
client = SimStudioClient(
|
||||
api_key=api_key,
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Getting Your API Key
|
||||
|
||||
<Steps>
|
||||
<Step title="Log in to Sim">
|
||||
Navigate to [Sim](https://sim.ai) and log in to your account.
|
||||
</Step>
|
||||
<Step title="Open your workflow">
|
||||
Navigate to the workflow you want to execute programmatically.
|
||||
</Step>
|
||||
<Step title="Deploy your workflow">
|
||||
Click on "Deploy" to deploy your workflow if it hasn't been deployed yet.
|
||||
</Step>
|
||||
<Step title="Create or select an API key">
|
||||
During the deployment process, select or create an API key.
|
||||
</Step>
|
||||
<Step title="Copy the API key">
|
||||
Copy the API key to use in your Python application.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Requirements
|
||||
|
||||
- Python 3.8+
|
||||
- requests >= 2.25.0
|
||||
|
||||
## License
|
||||
|
||||
Apache-2.0
|
||||
1035
apps/docs/content/docs/en/api-reference/typescript.mdx
Normal file
1035
apps/docs/content/docs/en/api-reference/typescript.mdx
Normal file
File diff suppressed because it is too large
Load Diff
@@ -17,7 +17,7 @@ curl -H "x-api-key: YOUR_API_KEY" \
|
||||
https://sim.ai/api/v1/logs?workspaceId=YOUR_WORKSPACE_ID
|
||||
```
|
||||
|
||||
You can generate API keys from your user settings in the Sim dashboard.
|
||||
You can generate API keys from the Sim platform and navigate to **Settings**, then go to **Sim Keys** and click **Create**.
|
||||
|
||||
## Logs API
|
||||
|
||||
|
||||
@@ -16,7 +16,6 @@
|
||||
"credentials",
|
||||
"execution",
|
||||
"permissions",
|
||||
"sdks",
|
||||
"self-hosting",
|
||||
"./enterprise/index",
|
||||
"./keyboard-shortcuts/index"
|
||||
|
||||
@@ -113,7 +113,7 @@ Users can create two types of environment variables:
|
||||
### Personal Environment Variables
|
||||
- Only visible to the individual user
|
||||
- Available in all workflows they run
|
||||
- Managed in user settings
|
||||
- Managed in **Settings**, then go to **Secrets**
|
||||
|
||||
### Workspace Environment Variables
|
||||
- **Read permission**: Can see variable names and values
|
||||
|
||||
313
apps/docs/content/docs/en/tools/amplitude.mdx
Normal file
313
apps/docs/content/docs/en/tools/amplitude.mdx
Normal file
@@ -0,0 +1,313 @@
|
||||
---
|
||||
title: Amplitude
|
||||
description: Track events and query analytics from Amplitude
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="amplitude"
|
||||
color="#1B1F3B"
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[Amplitude](https://amplitude.com/) is a leading digital analytics platform that helps teams understand user behavior, measure product performance, and make data-driven decisions at scale.
|
||||
|
||||
The Amplitude integration in Sim connects with the Amplitude HTTP and Dashboard REST APIs using API key and secret key authentication, allowing your agents to track events, manage user properties, and query analytics data programmatically. This API-based approach ensures secure access to Amplitude's full suite of analytics capabilities.
|
||||
|
||||
With the Amplitude integration, your agents can:
|
||||
|
||||
- **Track events**: Send custom events to Amplitude with rich properties, revenue data, and user context directly from your workflows
|
||||
- **Identify users**: Set and update user properties using operations like $set, $setOnce, $add, $append, and $unset to maintain detailed user profiles
|
||||
- **Search for users**: Look up users by User ID, Device ID, or Amplitude ID to retrieve profile information and metadata
|
||||
- **Query event analytics**: Run event segmentation queries with grouping, custom metrics (uniques, totals, averages, DAU percentages), and flexible date ranges
|
||||
- **Monitor user activity**: Retrieve event streams for specific users to understand individual user journeys and behavior patterns
|
||||
- **Analyze active users**: Get active or new user counts over time with daily, weekly, or monthly granularity
|
||||
- **Track revenue**: Access revenue LTV metrics including ARPU, ARPPU, total revenue, and paying user counts
|
||||
|
||||
In Sim, the Amplitude integration enables powerful analytics automation scenarios. Your agents can track product events in real time based on workflow triggers, enrich user profiles as new data becomes available, query segmentation data to inform downstream decisions, or build monitoring workflows that alert on changes in key metrics. By connecting Sim with Amplitude, you can build intelligent agents that bridge the gap between analytics insights and automated action, enabling data-driven workflows that respond to user behavior patterns and product performance trends.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate Amplitude into your workflow to track events, identify users and groups, search for users, query analytics, and retrieve revenue data.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `amplitude_send_event`
|
||||
|
||||
Track an event in Amplitude using the HTTP V2 API.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Amplitude API Key |
|
||||
| `userId` | string | No | User ID \(required if no device_id\) |
|
||||
| `deviceId` | string | No | Device ID \(required if no user_id\) |
|
||||
| `eventType` | string | Yes | Name of the event \(e.g., "page_view", "purchase"\) |
|
||||
| `eventProperties` | string | No | JSON object of custom event properties |
|
||||
| `userProperties` | string | No | JSON object of user properties to set \(supports $set, $setOnce, $add, $append, $unset\) |
|
||||
| `time` | string | No | Event timestamp in milliseconds since epoch |
|
||||
| `sessionId` | string | No | Session start time in milliseconds since epoch |
|
||||
| `insertId` | string | No | Unique ID for deduplication \(within 7-day window\) |
|
||||
| `appVersion` | string | No | Application version string |
|
||||
| `platform` | string | No | Platform \(e.g., "Web", "iOS", "Android"\) |
|
||||
| `country` | string | No | Two-letter country code |
|
||||
| `language` | string | No | Language code \(e.g., "en"\) |
|
||||
| `ip` | string | No | IP address for geo-location |
|
||||
| `price` | string | No | Price of the item purchased |
|
||||
| `quantity` | string | No | Quantity of items purchased |
|
||||
| `revenue` | string | No | Revenue amount |
|
||||
| `productId` | string | No | Product identifier |
|
||||
| `revenueType` | string | No | Revenue type \(e.g., "purchase", "refund"\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `code` | number | Response code \(200 for success\) |
|
||||
| `eventsIngested` | number | Number of events ingested |
|
||||
| `payloadSizeBytes` | number | Size of the payload in bytes |
|
||||
| `serverUploadTime` | number | Server upload timestamp |
|
||||
|
||||
### `amplitude_identify_user`
|
||||
|
||||
Set user properties in Amplitude using the Identify API. Supports $set, $setOnce, $add, $append, $unset operations.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Amplitude API Key |
|
||||
| `userId` | string | No | User ID \(required if no device_id\) |
|
||||
| `deviceId` | string | No | Device ID \(required if no user_id\) |
|
||||
| `userProperties` | string | Yes | JSON object of user properties. Use operations like $set, $setOnce, $add, $append, $unset. |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `code` | number | HTTP response status code |
|
||||
| `message` | string | Response message |
|
||||
|
||||
### `amplitude_group_identify`
|
||||
|
||||
Set group-level properties in Amplitude. Supports $set, $setOnce, $add, $append, $unset operations.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Amplitude API Key |
|
||||
| `groupType` | string | Yes | Group classification \(e.g., "company", "org_id"\) |
|
||||
| `groupValue` | string | Yes | Specific group identifier \(e.g., "Acme Corp"\) |
|
||||
| `groupProperties` | string | Yes | JSON object of group properties. Use operations like $set, $setOnce, $add, $append, $unset. |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `code` | number | HTTP response status code |
|
||||
| `message` | string | Response message |
|
||||
|
||||
### `amplitude_user_search`
|
||||
|
||||
Search for a user by User ID, Device ID, or Amplitude ID using the Dashboard REST API.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Amplitude API Key |
|
||||
| `secretKey` | string | Yes | Amplitude Secret Key |
|
||||
| `user` | string | Yes | User ID, Device ID, or Amplitude ID to search for |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `matches` | array | List of matching users |
|
||||
| ↳ `amplitudeId` | number | Amplitude internal user ID |
|
||||
| ↳ `userId` | string | External user ID |
|
||||
| `type` | string | Match type \(e.g., match_user_or_device_id\) |
|
||||
|
||||
### `amplitude_user_activity`
|
||||
|
||||
Get the event stream for a specific user by their Amplitude ID.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Amplitude API Key |
|
||||
| `secretKey` | string | Yes | Amplitude Secret Key |
|
||||
| `amplitudeId` | string | Yes | Amplitude internal user ID |
|
||||
| `offset` | string | No | Offset for pagination \(default 0\) |
|
||||
| `limit` | string | No | Maximum number of events to return \(default 1000, max 1000\) |
|
||||
| `direction` | string | No | Sort direction: "latest" or "earliest" \(default: latest\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `events` | array | List of user events |
|
||||
| ↳ `eventType` | string | Type of event |
|
||||
| ↳ `eventTime` | string | Event timestamp |
|
||||
| ↳ `eventProperties` | json | Custom event properties |
|
||||
| ↳ `userProperties` | json | User properties at event time |
|
||||
| ↳ `sessionId` | number | Session ID |
|
||||
| ↳ `platform` | string | Platform |
|
||||
| ↳ `country` | string | Country |
|
||||
| ↳ `city` | string | City |
|
||||
| `userData` | json | User metadata |
|
||||
| ↳ `userId` | string | External user ID |
|
||||
| ↳ `canonicalAmplitudeId` | number | Canonical Amplitude ID |
|
||||
| ↳ `numEvents` | number | Total event count |
|
||||
| ↳ `numSessions` | number | Total session count |
|
||||
| ↳ `platform` | string | Primary platform |
|
||||
| ↳ `country` | string | Country |
|
||||
|
||||
### `amplitude_user_profile`
|
||||
|
||||
Get a user profile including properties, cohort memberships, and computed properties.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `secretKey` | string | Yes | Amplitude Secret Key |
|
||||
| `userId` | string | No | External user ID \(required if no device_id\) |
|
||||
| `deviceId` | string | No | Device ID \(required if no user_id\) |
|
||||
| `getAmpProps` | string | No | Include Amplitude user properties \(true/false, default: false\) |
|
||||
| `getCohortIds` | string | No | Include cohort IDs the user belongs to \(true/false, default: false\) |
|
||||
| `getComputations` | string | No | Include computed user properties \(true/false, default: false\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `userId` | string | External user ID |
|
||||
| `deviceId` | string | Device ID |
|
||||
| `ampProps` | json | Amplitude user properties \(library, first_used, last_used, custom properties\) |
|
||||
| `cohortIds` | array | List of cohort IDs the user belongs to |
|
||||
| `computations` | json | Computed user properties |
|
||||
|
||||
### `amplitude_event_segmentation`
|
||||
|
||||
Query event analytics data with segmentation. Get event counts, uniques, averages, and more.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Amplitude API Key |
|
||||
| `secretKey` | string | Yes | Amplitude Secret Key |
|
||||
| `eventType` | string | Yes | Event type name to analyze |
|
||||
| `start` | string | Yes | Start date in YYYYMMDD format |
|
||||
| `end` | string | Yes | End date in YYYYMMDD format |
|
||||
| `metric` | string | No | Metric type: uniques, totals, pct_dau, average, histogram, sums, value_avg, or formula \(default: uniques\) |
|
||||
| `interval` | string | No | Time interval: 1 \(daily\), 7 \(weekly\), or 30 \(monthly\) |
|
||||
| `groupBy` | string | No | Property name to group by \(prefix custom user properties with "gp:"\) |
|
||||
| `limit` | string | No | Maximum number of group-by values \(max 1000\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `series` | json | Time-series data arrays indexed by series |
|
||||
| `seriesLabels` | array | Labels for each data series |
|
||||
| `seriesCollapsed` | json | Collapsed aggregate totals per series |
|
||||
| `xValues` | array | Date values for the x-axis |
|
||||
|
||||
### `amplitude_get_active_users`
|
||||
|
||||
Get active or new user counts over a date range from the Dashboard REST API.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Amplitude API Key |
|
||||
| `secretKey` | string | Yes | Amplitude Secret Key |
|
||||
| `start` | string | Yes | Start date in YYYYMMDD format |
|
||||
| `end` | string | Yes | End date in YYYYMMDD format |
|
||||
| `metric` | string | No | Metric type: "active" or "new" \(default: active\) |
|
||||
| `interval` | string | No | Time interval: 1 \(daily\), 7 \(weekly\), or 30 \(monthly\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `series` | json | Array of data series with user counts per time interval |
|
||||
| `seriesMeta` | array | Metadata labels for each data series \(e.g., segment names\) |
|
||||
| `xValues` | array | Date values for the x-axis |
|
||||
|
||||
### `amplitude_realtime_active_users`
|
||||
|
||||
Get real-time active user counts at 5-minute granularity for the last 2 days.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Amplitude API Key |
|
||||
| `secretKey` | string | Yes | Amplitude Secret Key |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `series` | json | Array of data series with active user counts at 5-minute intervals |
|
||||
| `seriesLabels` | array | Labels for each series \(e.g., "Today", "Yesterday"\) |
|
||||
| `xValues` | array | Time values for the x-axis \(e.g., "15:00", "15:05"\) |
|
||||
|
||||
### `amplitude_list_events`
|
||||
|
||||
List all event types in the Amplitude project with their weekly totals and unique counts.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Amplitude API Key |
|
||||
| `secretKey` | string | Yes | Amplitude Secret Key |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `events` | array | List of event types in the project |
|
||||
| ↳ `value` | string | Event type name |
|
||||
| ↳ `displayName` | string | Event display name |
|
||||
| ↳ `totals` | number | Weekly total count |
|
||||
| ↳ `hidden` | boolean | Whether the event is hidden |
|
||||
| ↳ `deleted` | boolean | Whether the event is deleted |
|
||||
|
||||
### `amplitude_get_revenue`
|
||||
|
||||
Get revenue LTV data including ARPU, ARPPU, total revenue, and paying user counts.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Amplitude API Key |
|
||||
| `secretKey` | string | Yes | Amplitude Secret Key |
|
||||
| `start` | string | Yes | Start date in YYYYMMDD format |
|
||||
| `end` | string | Yes | End date in YYYYMMDD format |
|
||||
| `metric` | string | No | Metric: 0 \(ARPU\), 1 \(ARPPU\), 2 \(Total Revenue\), 3 \(Paying Users\) |
|
||||
| `interval` | string | No | Time interval: 1 \(daily\), 7 \(weekly\), or 30 \(monthly\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `series` | json | Array of revenue data series |
|
||||
| `seriesLabels` | array | Labels for each data series |
|
||||
| `xValues` | array | Date values for the x-axis |
|
||||
|
||||
|
||||
@@ -7,7 +7,7 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="databricks"
|
||||
color="#FF3621"
|
||||
color="#F9F7F4"
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
|
||||
84
apps/docs/content/docs/en/tools/google_pagespeed.mdx
Normal file
84
apps/docs/content/docs/en/tools/google_pagespeed.mdx
Normal file
@@ -0,0 +1,84 @@
|
||||
---
|
||||
title: Google PageSpeed
|
||||
description: Analyze webpage performance with Google PageSpeed Insights
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="google_pagespeed"
|
||||
color="#E0E0E0"
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[Google PageSpeed Insights](https://pagespeed.web.dev/) is a web performance analysis tool powered by Lighthouse that evaluates the quality of web pages across multiple dimensions including performance, accessibility, SEO, and best practices.
|
||||
|
||||
With the Google PageSpeed integration in Sim, you can:
|
||||
|
||||
- **Analyze webpage performance**: Get detailed performance scores and metrics for any public URL, including First Contentful Paint, Largest Contentful Paint, and Speed Index
|
||||
- **Evaluate accessibility**: Check how well a webpage meets accessibility standards and identify areas for improvement
|
||||
- **Audit SEO**: Assess a page's search engine optimization and discover opportunities to improve rankings
|
||||
- **Review best practices**: Verify that a webpage follows modern web development best practices
|
||||
- **Compare strategies**: Run analyses using either desktop or mobile strategies to understand performance across device types
|
||||
- **Localize results**: Retrieve analysis results in different locales for internationalized reporting
|
||||
|
||||
In Sim, the Google PageSpeed integration enables your agents to programmatically audit web pages as part of automated workflows. This is useful for monitoring site performance over time, triggering alerts when scores drop below thresholds, generating performance reports, and ensuring that deployed changes meet quality standards before release.
|
||||
|
||||
### Getting Your API Key
|
||||
|
||||
1. Go to the [Google Cloud Console](https://console.cloud.google.com/)
|
||||
2. Create or select a project
|
||||
3. Enable the **PageSpeed Insights API** from the API Library
|
||||
4. Navigate to **Credentials** and create an API key
|
||||
5. Use the API key in the Sim block configuration
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Analyze web pages for performance, accessibility, SEO, and best practices using Google PageSpeed Insights API powered by Lighthouse.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `google_pagespeed_analyze`
|
||||
|
||||
Analyze a webpage for performance, accessibility, SEO, and best practices using Google PageSpeed Insights.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Google PageSpeed Insights API Key |
|
||||
| `url` | string | Yes | The URL of the webpage to analyze |
|
||||
| `category` | string | No | Lighthouse categories to analyze \(comma-separated\): performance, accessibility, best-practices, seo |
|
||||
| `strategy` | string | No | Analysis strategy: desktop or mobile |
|
||||
| `locale` | string | No | Locale for results \(e.g., en, fr, de\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `finalUrl` | string | The final URL after redirects |
|
||||
| `performanceScore` | number | Performance category score \(0-1\) |
|
||||
| `accessibilityScore` | number | Accessibility category score \(0-1\) |
|
||||
| `bestPracticesScore` | number | Best Practices category score \(0-1\) |
|
||||
| `seoScore` | number | SEO category score \(0-1\) |
|
||||
| `firstContentfulPaint` | string | Time to First Contentful Paint \(display value\) |
|
||||
| `firstContentfulPaintMs` | number | Time to First Contentful Paint in milliseconds |
|
||||
| `largestContentfulPaint` | string | Time to Largest Contentful Paint \(display value\) |
|
||||
| `largestContentfulPaintMs` | number | Time to Largest Contentful Paint in milliseconds |
|
||||
| `totalBlockingTime` | string | Total Blocking Time \(display value\) |
|
||||
| `totalBlockingTimeMs` | number | Total Blocking Time in milliseconds |
|
||||
| `cumulativeLayoutShift` | string | Cumulative Layout Shift \(display value\) |
|
||||
| `cumulativeLayoutShiftValue` | number | Cumulative Layout Shift numeric value |
|
||||
| `speedIndex` | string | Speed Index \(display value\) |
|
||||
| `speedIndexMs` | number | Speed Index in milliseconds |
|
||||
| `interactive` | string | Time to Interactive \(display value\) |
|
||||
| `interactiveMs` | number | Time to Interactive in milliseconds |
|
||||
| `overallCategory` | string | Overall loading experience category \(FAST, AVERAGE, SLOW, or NONE\) |
|
||||
| `analysisTimestamp` | string | UTC timestamp of the analysis |
|
||||
| `lighthouseVersion` | string | Version of Lighthouse used for the analysis |
|
||||
|
||||
|
||||
@@ -35,541 +35,472 @@ Integrate Greenhouse into the workflow. List and retrieve candidates, jobs, appl
|
||||
|
||||
### `greenhouse_list_candidates`
|
||||
|
||||
Lists candidates from Greenhouse with optional filtering by date, job, or email
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Greenhouse Harvest API key |
|
||||
| `per_page` | number | No | Number of results per page \(1-500, default 100\) |
|
||||
| `page` | number | No | Page number for pagination |
|
||||
| `created_after` | string | No | Return only candidates created at or after this ISO 8601 timestamp |
|
||||
| `created_before` | string | No | Return only candidates created before this ISO 8601 timestamp |
|
||||
| `updated_after` | string | No | Return only candidates updated at or after this ISO 8601 timestamp |
|
||||
| `updated_before` | string | No | Return only candidates updated before this ISO 8601 timestamp |
|
||||
| `job_id` | string | No | Filter to candidates who applied to this job ID \(excludes prospects\) |
|
||||
| `email` | string | No | Filter to candidates with this email address |
|
||||
| `candidate_ids` | string | No | Comma-separated candidate IDs to retrieve \(max 50\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `candidates` | json | List of candidates |
|
||||
| `jobs` | json | List of jobs |
|
||||
| `applications` | json | List of applications |
|
||||
| `users` | json | List of users |
|
||||
| `departments` | json | List of departments |
|
||||
| `offices` | json | List of offices |
|
||||
| `stages` | json | List of job stages |
|
||||
| `count` | number | Number of results returned |
|
||||
| `id` | number | Resource ID |
|
||||
| `first_name` | string | First name |
|
||||
| `last_name` | string | Last name |
|
||||
| `name` | string | Resource name |
|
||||
| `status` | string | Status |
|
||||
| `email_addresses` | json | Email addresses |
|
||||
| `phone_numbers` | json | Phone numbers |
|
||||
| `tags` | json | Tags |
|
||||
| `application_ids` | json | Associated application IDs |
|
||||
| `recruiter` | json | Assigned recruiter |
|
||||
| `coordinator` | json | Assigned coordinator |
|
||||
| `current_stage` | json | Current interview stage |
|
||||
| `source` | json | Application source |
|
||||
| `hiring_team` | json | Hiring team members |
|
||||
| `openings` | json | Job openings |
|
||||
| `custom_fields` | json | Custom field values |
|
||||
| `attachments` | json | File attachments |
|
||||
| `educations` | json | Education history |
|
||||
| `employments` | json | Employment history |
|
||||
| `answers` | json | Application question answers |
|
||||
| `prospect` | boolean | Whether this is a prospect |
|
||||
| `confidential` | boolean | Whether the job is confidential |
|
||||
| `is_private` | boolean | Whether the candidate is private |
|
||||
| `can_email` | boolean | Whether the candidate can be emailed |
|
||||
| `disabled` | boolean | Whether the user is disabled |
|
||||
| `site_admin` | boolean | Whether the user is a site admin |
|
||||
| `primary_email_address` | string | Primary email address |
|
||||
| `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| `candidates` | array | List of candidates |
|
||||
| ↳ `id` | number | Candidate ID |
|
||||
| ↳ `first_name` | string | First name |
|
||||
| ↳ `last_name` | string | Last name |
|
||||
| ↳ `company` | string | Current employer |
|
||||
| ↳ `title` | string | Current job title |
|
||||
| ↳ `is_private` | boolean | Whether candidate is private |
|
||||
| ↳ `can_email` | boolean | Whether candidate can be emailed |
|
||||
| ↳ `email_addresses` | array | Email addresses |
|
||||
| ↳ `value` | string | Email address |
|
||||
| ↳ `type` | string | Email type \(personal, work, other\) |
|
||||
| ↳ `tags` | array | Candidate tags |
|
||||
| ↳ `application_ids` | array | Associated application IDs |
|
||||
| ↳ `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| ↳ `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| ↳ `last_activity` | string | Last activity timestamp \(ISO 8601\) |
|
||||
| `count` | number | Number of candidates returned |
|
||||
|
||||
### `greenhouse_get_candidate`
|
||||
|
||||
Retrieves a specific candidate by ID with full details including contact info, education, and employment history
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Greenhouse Harvest API key |
|
||||
| `candidateId` | string | Yes | The ID of the candidate to retrieve |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `candidates` | json | List of candidates |
|
||||
| `jobs` | json | List of jobs |
|
||||
| `applications` | json | List of applications |
|
||||
| `users` | json | List of users |
|
||||
| `departments` | json | List of departments |
|
||||
| `offices` | json | List of offices |
|
||||
| `stages` | json | List of job stages |
|
||||
| `count` | number | Number of results returned |
|
||||
| `id` | number | Resource ID |
|
||||
| `id` | number | Candidate ID |
|
||||
| `first_name` | string | First name |
|
||||
| `last_name` | string | Last name |
|
||||
| `name` | string | Resource name |
|
||||
| `status` | string | Status |
|
||||
| `email_addresses` | json | Email addresses |
|
||||
| `phone_numbers` | json | Phone numbers |
|
||||
| `tags` | json | Tags |
|
||||
| `application_ids` | json | Associated application IDs |
|
||||
| `recruiter` | json | Assigned recruiter |
|
||||
| `coordinator` | json | Assigned coordinator |
|
||||
| `current_stage` | json | Current interview stage |
|
||||
| `source` | json | Application source |
|
||||
| `hiring_team` | json | Hiring team members |
|
||||
| `openings` | json | Job openings |
|
||||
| `custom_fields` | json | Custom field values |
|
||||
| `attachments` | json | File attachments |
|
||||
| `educations` | json | Education history |
|
||||
| `employments` | json | Employment history |
|
||||
| `answers` | json | Application question answers |
|
||||
| `prospect` | boolean | Whether this is a prospect |
|
||||
| `confidential` | boolean | Whether the job is confidential |
|
||||
| `is_private` | boolean | Whether the candidate is private |
|
||||
| `can_email` | boolean | Whether the candidate can be emailed |
|
||||
| `disabled` | boolean | Whether the user is disabled |
|
||||
| `site_admin` | boolean | Whether the user is a site admin |
|
||||
| `primary_email_address` | string | Primary email address |
|
||||
| `company` | string | Current employer |
|
||||
| `title` | string | Current job title |
|
||||
| `is_private` | boolean | Whether candidate is private |
|
||||
| `can_email` | boolean | Whether candidate can be emailed |
|
||||
| `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| `last_activity` | string | Last activity timestamp \(ISO 8601\) |
|
||||
| `email_addresses` | array | Email addresses |
|
||||
| ↳ `value` | string | Email address |
|
||||
| ↳ `type` | string | Type \(personal, work, other\) |
|
||||
| `phone_numbers` | array | Phone numbers |
|
||||
| ↳ `value` | string | Phone number |
|
||||
| ↳ `type` | string | Type \(home, work, mobile, skype, other\) |
|
||||
| `addresses` | array | Addresses |
|
||||
| ↳ `value` | string | Address |
|
||||
| ↳ `type` | string | Type \(home, work, other\) |
|
||||
| `website_addresses` | array | Website addresses |
|
||||
| ↳ `value` | string | URL |
|
||||
| ↳ `type` | string | Type \(personal, company, portfolio, blog, other\) |
|
||||
| `social_media_addresses` | array | Social media profiles |
|
||||
| ↳ `value` | string | URL or handle |
|
||||
| `tags` | array | Tags |
|
||||
| `application_ids` | array | Associated application IDs |
|
||||
| `recruiter` | object | Assigned recruiter |
|
||||
| ↳ `id` | number | User ID |
|
||||
| ↳ `first_name` | string | First name |
|
||||
| ↳ `last_name` | string | Last name |
|
||||
| ↳ `name` | string | Full name |
|
||||
| ↳ `employee_id` | string | Employee ID |
|
||||
| `coordinator` | object | Assigned coordinator |
|
||||
| ↳ `id` | number | User ID |
|
||||
| ↳ `first_name` | string | First name |
|
||||
| ↳ `last_name` | string | Last name |
|
||||
| ↳ `name` | string | Full name |
|
||||
| ↳ `employee_id` | string | Employee ID |
|
||||
| `attachments` | array | File attachments \(URLs expire after 7 days\) |
|
||||
| ↳ `filename` | string | File name |
|
||||
| ↳ `url` | string | Download URL \(expires after 7 days\) |
|
||||
| ↳ `type` | string | Type \(resume, cover_letter, offer_packet, other\) |
|
||||
| ↳ `created_at` | string | Upload timestamp |
|
||||
| `educations` | array | Education history |
|
||||
| ↳ `id` | number | Education record ID |
|
||||
| ↳ `school_name` | string | School name |
|
||||
| ↳ `degree` | string | Degree type |
|
||||
| ↳ `discipline` | string | Field of study |
|
||||
| ↳ `start_date` | string | Start date \(ISO 8601\) |
|
||||
| ↳ `end_date` | string | End date \(ISO 8601\) |
|
||||
| `employments` | array | Employment history |
|
||||
| ↳ `id` | number | Employment record ID |
|
||||
| ↳ `company_name` | string | Company name |
|
||||
| ↳ `title` | string | Job title |
|
||||
| ↳ `start_date` | string | Start date \(ISO 8601\) |
|
||||
| ↳ `end_date` | string | End date \(ISO 8601\) |
|
||||
| `custom_fields` | object | Custom field values |
|
||||
|
||||
### `greenhouse_list_jobs`
|
||||
|
||||
Lists jobs from Greenhouse with optional filtering by status, department, or office
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Greenhouse Harvest API key |
|
||||
| `per_page` | number | No | Number of results per page \(1-500, default 100\) |
|
||||
| `page` | number | No | Page number for pagination |
|
||||
| `status` | string | No | Filter by job status \(open, closed, draft\) |
|
||||
| `created_after` | string | No | Return only jobs created at or after this ISO 8601 timestamp |
|
||||
| `created_before` | string | No | Return only jobs created before this ISO 8601 timestamp |
|
||||
| `updated_after` | string | No | Return only jobs updated at or after this ISO 8601 timestamp |
|
||||
| `updated_before` | string | No | Return only jobs updated before this ISO 8601 timestamp |
|
||||
| `department_id` | string | No | Filter to jobs in this department ID |
|
||||
| `office_id` | string | No | Filter to jobs in this office ID |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `candidates` | json | List of candidates |
|
||||
| `jobs` | json | List of jobs |
|
||||
| `applications` | json | List of applications |
|
||||
| `users` | json | List of users |
|
||||
| `departments` | json | List of departments |
|
||||
| `offices` | json | List of offices |
|
||||
| `stages` | json | List of job stages |
|
||||
| `count` | number | Number of results returned |
|
||||
| `id` | number | Resource ID |
|
||||
| `first_name` | string | First name |
|
||||
| `last_name` | string | Last name |
|
||||
| `name` | string | Resource name |
|
||||
| `status` | string | Status |
|
||||
| `email_addresses` | json | Email addresses |
|
||||
| `phone_numbers` | json | Phone numbers |
|
||||
| `tags` | json | Tags |
|
||||
| `application_ids` | json | Associated application IDs |
|
||||
| `recruiter` | json | Assigned recruiter |
|
||||
| `coordinator` | json | Assigned coordinator |
|
||||
| `current_stage` | json | Current interview stage |
|
||||
| `source` | json | Application source |
|
||||
| `hiring_team` | json | Hiring team members |
|
||||
| `openings` | json | Job openings |
|
||||
| `custom_fields` | json | Custom field values |
|
||||
| `attachments` | json | File attachments |
|
||||
| `educations` | json | Education history |
|
||||
| `employments` | json | Employment history |
|
||||
| `answers` | json | Application question answers |
|
||||
| `prospect` | boolean | Whether this is a prospect |
|
||||
| `confidential` | boolean | Whether the job is confidential |
|
||||
| `is_private` | boolean | Whether the candidate is private |
|
||||
| `can_email` | boolean | Whether the candidate can be emailed |
|
||||
| `disabled` | boolean | Whether the user is disabled |
|
||||
| `site_admin` | boolean | Whether the user is a site admin |
|
||||
| `primary_email_address` | string | Primary email address |
|
||||
| `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| `jobs` | array | List of jobs |
|
||||
| ↳ `id` | number | Job ID |
|
||||
| ↳ `name` | string | Job title |
|
||||
| ↳ `status` | string | Job status \(open, closed, draft\) |
|
||||
| ↳ `confidential` | boolean | Whether the job is confidential |
|
||||
| ↳ `departments` | array | Associated departments |
|
||||
| ↳ `id` | number | Department ID |
|
||||
| ↳ `name` | string | Department name |
|
||||
| ↳ `offices` | array | Associated offices |
|
||||
| ↳ `id` | number | Office ID |
|
||||
| ↳ `name` | string | Office name |
|
||||
| ↳ `opened_at` | string | Date job was opened \(ISO 8601\) |
|
||||
| ↳ `closed_at` | string | Date job was closed \(ISO 8601\) |
|
||||
| ↳ `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| ↳ `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| `count` | number | Number of jobs returned |
|
||||
|
||||
### `greenhouse_get_job`
|
||||
|
||||
Retrieves a specific job by ID with full details including hiring team, openings, and custom fields
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Greenhouse Harvest API key |
|
||||
| `jobId` | string | Yes | The ID of the job to retrieve |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `candidates` | json | List of candidates |
|
||||
| `jobs` | json | List of jobs |
|
||||
| `applications` | json | List of applications |
|
||||
| `users` | json | List of users |
|
||||
| `departments` | json | List of departments |
|
||||
| `offices` | json | List of offices |
|
||||
| `stages` | json | List of job stages |
|
||||
| `count` | number | Number of results returned |
|
||||
| `id` | number | Resource ID |
|
||||
| `first_name` | string | First name |
|
||||
| `last_name` | string | Last name |
|
||||
| `name` | string | Resource name |
|
||||
| `status` | string | Status |
|
||||
| `email_addresses` | json | Email addresses |
|
||||
| `phone_numbers` | json | Phone numbers |
|
||||
| `tags` | json | Tags |
|
||||
| `application_ids` | json | Associated application IDs |
|
||||
| `recruiter` | json | Assigned recruiter |
|
||||
| `coordinator` | json | Assigned coordinator |
|
||||
| `current_stage` | json | Current interview stage |
|
||||
| `source` | json | Application source |
|
||||
| `hiring_team` | json | Hiring team members |
|
||||
| `openings` | json | Job openings |
|
||||
| `custom_fields` | json | Custom field values |
|
||||
| `attachments` | json | File attachments |
|
||||
| `educations` | json | Education history |
|
||||
| `employments` | json | Employment history |
|
||||
| `answers` | json | Application question answers |
|
||||
| `prospect` | boolean | Whether this is a prospect |
|
||||
| `id` | number | Job ID |
|
||||
| `name` | string | Job title |
|
||||
| `requisition_id` | string | External requisition ID |
|
||||
| `status` | string | Job status \(open, closed, draft\) |
|
||||
| `confidential` | boolean | Whether the job is confidential |
|
||||
| `is_private` | boolean | Whether the candidate is private |
|
||||
| `can_email` | boolean | Whether the candidate can be emailed |
|
||||
| `disabled` | boolean | Whether the user is disabled |
|
||||
| `site_admin` | boolean | Whether the user is a site admin |
|
||||
| `primary_email_address` | string | Primary email address |
|
||||
| `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| `opened_at` | string | Date job was opened \(ISO 8601\) |
|
||||
| `closed_at` | string | Date job was closed \(ISO 8601\) |
|
||||
| `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| `is_template` | boolean | Whether this is a job template |
|
||||
| `notes` | string | Hiring plan notes \(may contain HTML\) |
|
||||
| `departments` | array | Associated departments |
|
||||
| ↳ `id` | number | Department ID |
|
||||
| ↳ `name` | string | Department name |
|
||||
| ↳ `parent_id` | number | Parent department ID |
|
||||
| `offices` | array | Associated offices |
|
||||
| ↳ `id` | number | Office ID |
|
||||
| ↳ `name` | string | Office name |
|
||||
| ↳ `location` | object | Office location |
|
||||
| ↳ `name` | string | Location name |
|
||||
| `hiring_team` | object | Hiring team members |
|
||||
| ↳ `hiring_managers` | array | Hiring managers |
|
||||
| ↳ `recruiters` | array | Recruiters \(includes responsible flag\) |
|
||||
| ↳ `coordinators` | array | Coordinators \(includes responsible flag\) |
|
||||
| ↳ `sourcers` | array | Sourcers |
|
||||
| `openings` | array | Job openings/slots |
|
||||
| ↳ `id` | number | Opening internal ID |
|
||||
| ↳ `opening_id` | string | Custom opening identifier |
|
||||
| ↳ `status` | string | Opening status \(open, closed\) |
|
||||
| ↳ `opened_at` | string | Date opened \(ISO 8601\) |
|
||||
| ↳ `closed_at` | string | Date closed \(ISO 8601\) |
|
||||
| ↳ `application_id` | number | Hired application ID |
|
||||
| ↳ `close_reason` | object | Reason for closing |
|
||||
| ↳ `id` | number | Close reason ID |
|
||||
| ↳ `name` | string | Close reason name |
|
||||
| `custom_fields` | object | Custom field values |
|
||||
|
||||
### `greenhouse_list_applications`
|
||||
|
||||
Lists applications from Greenhouse with optional filtering by job, status, or date
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Greenhouse Harvest API key |
|
||||
| `per_page` | number | No | Number of results per page \(1-500, default 100\) |
|
||||
| `page` | number | No | Page number for pagination |
|
||||
| `job_id` | string | No | Filter applications by job ID |
|
||||
| `status` | string | No | Filter by status \(active, converted, hired, rejected\) |
|
||||
| `created_after` | string | No | Return only applications created at or after this ISO 8601 timestamp |
|
||||
| `created_before` | string | No | Return only applications created before this ISO 8601 timestamp |
|
||||
| `last_activity_after` | string | No | Return only applications with activity at or after this ISO 8601 timestamp |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `candidates` | json | List of candidates |
|
||||
| `jobs` | json | List of jobs |
|
||||
| `applications` | json | List of applications |
|
||||
| `users` | json | List of users |
|
||||
| `departments` | json | List of departments |
|
||||
| `offices` | json | List of offices |
|
||||
| `stages` | json | List of job stages |
|
||||
| `count` | number | Number of results returned |
|
||||
| `id` | number | Resource ID |
|
||||
| `first_name` | string | First name |
|
||||
| `last_name` | string | Last name |
|
||||
| `name` | string | Resource name |
|
||||
| `status` | string | Status |
|
||||
| `email_addresses` | json | Email addresses |
|
||||
| `phone_numbers` | json | Phone numbers |
|
||||
| `tags` | json | Tags |
|
||||
| `application_ids` | json | Associated application IDs |
|
||||
| `recruiter` | json | Assigned recruiter |
|
||||
| `coordinator` | json | Assigned coordinator |
|
||||
| `current_stage` | json | Current interview stage |
|
||||
| `source` | json | Application source |
|
||||
| `hiring_team` | json | Hiring team members |
|
||||
| `openings` | json | Job openings |
|
||||
| `custom_fields` | json | Custom field values |
|
||||
| `attachments` | json | File attachments |
|
||||
| `educations` | json | Education history |
|
||||
| `employments` | json | Employment history |
|
||||
| `answers` | json | Application question answers |
|
||||
| `prospect` | boolean | Whether this is a prospect |
|
||||
| `confidential` | boolean | Whether the job is confidential |
|
||||
| `is_private` | boolean | Whether the candidate is private |
|
||||
| `can_email` | boolean | Whether the candidate can be emailed |
|
||||
| `disabled` | boolean | Whether the user is disabled |
|
||||
| `site_admin` | boolean | Whether the user is a site admin |
|
||||
| `primary_email_address` | string | Primary email address |
|
||||
| `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| `applications` | array | List of applications |
|
||||
| ↳ `id` | number | Application ID |
|
||||
| ↳ `candidate_id` | number | Associated candidate ID |
|
||||
| ↳ `prospect` | boolean | Whether this is a prospect application |
|
||||
| ↳ `status` | string | Status \(active, converted, hired, rejected\) |
|
||||
| ↳ `current_stage` | object | Current interview stage |
|
||||
| ↳ `id` | number | Stage ID |
|
||||
| ↳ `name` | string | Stage name |
|
||||
| ↳ `jobs` | array | Associated jobs |
|
||||
| ↳ `id` | number | Job ID |
|
||||
| ↳ `name` | string | Job name |
|
||||
| ↳ `applied_at` | string | Application date \(ISO 8601\) |
|
||||
| ↳ `rejected_at` | string | Rejection date \(ISO 8601\) |
|
||||
| ↳ `last_activity_at` | string | Last activity date \(ISO 8601\) |
|
||||
| `count` | number | Number of applications returned |
|
||||
|
||||
### `greenhouse_get_application`
|
||||
|
||||
Retrieves a specific application by ID with full details including source, stage, answers, and attachments
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Greenhouse Harvest API key |
|
||||
| `applicationId` | string | Yes | The ID of the application to retrieve |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `candidates` | json | List of candidates |
|
||||
| `jobs` | json | List of jobs |
|
||||
| `applications` | json | List of applications |
|
||||
| `users` | json | List of users |
|
||||
| `departments` | json | List of departments |
|
||||
| `offices` | json | List of offices |
|
||||
| `stages` | json | List of job stages |
|
||||
| `count` | number | Number of results returned |
|
||||
| `id` | number | Resource ID |
|
||||
| `first_name` | string | First name |
|
||||
| `last_name` | string | Last name |
|
||||
| `name` | string | Resource name |
|
||||
| `status` | string | Status |
|
||||
| `email_addresses` | json | Email addresses |
|
||||
| `phone_numbers` | json | Phone numbers |
|
||||
| `tags` | json | Tags |
|
||||
| `application_ids` | json | Associated application IDs |
|
||||
| `recruiter` | json | Assigned recruiter |
|
||||
| `coordinator` | json | Assigned coordinator |
|
||||
| `current_stage` | json | Current interview stage |
|
||||
| `source` | json | Application source |
|
||||
| `hiring_team` | json | Hiring team members |
|
||||
| `openings` | json | Job openings |
|
||||
| `custom_fields` | json | Custom field values |
|
||||
| `attachments` | json | File attachments |
|
||||
| `educations` | json | Education history |
|
||||
| `employments` | json | Employment history |
|
||||
| `answers` | json | Application question answers |
|
||||
| `prospect` | boolean | Whether this is a prospect |
|
||||
| `confidential` | boolean | Whether the job is confidential |
|
||||
| `is_private` | boolean | Whether the candidate is private |
|
||||
| `can_email` | boolean | Whether the candidate can be emailed |
|
||||
| `disabled` | boolean | Whether the user is disabled |
|
||||
| `site_admin` | boolean | Whether the user is a site admin |
|
||||
| `primary_email_address` | string | Primary email address |
|
||||
| `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| `id` | number | Application ID |
|
||||
| `candidate_id` | number | Associated candidate ID |
|
||||
| `prospect` | boolean | Whether this is a prospect application |
|
||||
| `status` | string | Status \(active, converted, hired, rejected\) |
|
||||
| `applied_at` | string | Application date \(ISO 8601\) |
|
||||
| `rejected_at` | string | Rejection date \(ISO 8601\) |
|
||||
| `last_activity_at` | string | Last activity date \(ISO 8601\) |
|
||||
| `location` | object | Candidate location |
|
||||
| ↳ `address` | string | Location address |
|
||||
| `source` | object | Application source |
|
||||
| ↳ `id` | number | Source ID |
|
||||
| ↳ `public_name` | string | Source name |
|
||||
| `credited_to` | object | User credited for the application |
|
||||
| ↳ `id` | number | User ID |
|
||||
| ↳ `first_name` | string | First name |
|
||||
| ↳ `last_name` | string | Last name |
|
||||
| ↳ `name` | string | Full name |
|
||||
| ↳ `employee_id` | string | Employee ID |
|
||||
| `recruiter` | object | Assigned recruiter |
|
||||
| ↳ `id` | number | User ID |
|
||||
| ↳ `first_name` | string | First name |
|
||||
| ↳ `last_name` | string | Last name |
|
||||
| ↳ `name` | string | Full name |
|
||||
| ↳ `employee_id` | string | Employee ID |
|
||||
| `coordinator` | object | Assigned coordinator |
|
||||
| ↳ `id` | number | User ID |
|
||||
| ↳ `first_name` | string | First name |
|
||||
| ↳ `last_name` | string | Last name |
|
||||
| ↳ `name` | string | Full name |
|
||||
| ↳ `employee_id` | string | Employee ID |
|
||||
| `current_stage` | object | Current interview stage \(null when hired\) |
|
||||
| ↳ `id` | number | Stage ID |
|
||||
| ↳ `name` | string | Stage name |
|
||||
| `rejection_reason` | object | Rejection reason |
|
||||
| ↳ `id` | number | Rejection reason ID |
|
||||
| ↳ `name` | string | Rejection reason name |
|
||||
| ↳ `type` | object | Rejection reason type |
|
||||
| ↳ `id` | number | Type ID |
|
||||
| ↳ `name` | string | Type name |
|
||||
| `jobs` | array | Associated jobs |
|
||||
| ↳ `id` | number | Job ID |
|
||||
| ↳ `name` | string | Job name |
|
||||
| `job_post_id` | number | Job post ID |
|
||||
| `answers` | array | Application question answers |
|
||||
| ↳ `question` | string | Question text |
|
||||
| ↳ `answer` | string | Answer text |
|
||||
| `attachments` | array | File attachments \(URLs expire after 7 days\) |
|
||||
| ↳ `filename` | string | File name |
|
||||
| ↳ `url` | string | Download URL \(expires after 7 days\) |
|
||||
| ↳ `type` | string | Type \(resume, cover_letter, offer_packet, other\) |
|
||||
| ↳ `created_at` | string | Upload timestamp |
|
||||
| `custom_fields` | object | Custom field values |
|
||||
|
||||
### `greenhouse_list_users`
|
||||
|
||||
Lists Greenhouse users (recruiters, hiring managers, admins) with optional filtering
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Greenhouse Harvest API key |
|
||||
| `per_page` | number | No | Number of results per page \(1-500, default 100\) |
|
||||
| `page` | number | No | Page number for pagination |
|
||||
| `created_after` | string | No | Return only users created at or after this ISO 8601 timestamp |
|
||||
| `created_before` | string | No | Return only users created before this ISO 8601 timestamp |
|
||||
| `updated_after` | string | No | Return only users updated at or after this ISO 8601 timestamp |
|
||||
| `updated_before` | string | No | Return only users updated before this ISO 8601 timestamp |
|
||||
| `email` | string | No | Filter by email address |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `candidates` | json | List of candidates |
|
||||
| `jobs` | json | List of jobs |
|
||||
| `applications` | json | List of applications |
|
||||
| `users` | json | List of users |
|
||||
| `departments` | json | List of departments |
|
||||
| `offices` | json | List of offices |
|
||||
| `stages` | json | List of job stages |
|
||||
| `count` | number | Number of results returned |
|
||||
| `id` | number | Resource ID |
|
||||
| `first_name` | string | First name |
|
||||
| `last_name` | string | Last name |
|
||||
| `name` | string | Resource name |
|
||||
| `status` | string | Status |
|
||||
| `email_addresses` | json | Email addresses |
|
||||
| `phone_numbers` | json | Phone numbers |
|
||||
| `tags` | json | Tags |
|
||||
| `application_ids` | json | Associated application IDs |
|
||||
| `recruiter` | json | Assigned recruiter |
|
||||
| `coordinator` | json | Assigned coordinator |
|
||||
| `current_stage` | json | Current interview stage |
|
||||
| `source` | json | Application source |
|
||||
| `hiring_team` | json | Hiring team members |
|
||||
| `openings` | json | Job openings |
|
||||
| `custom_fields` | json | Custom field values |
|
||||
| `attachments` | json | File attachments |
|
||||
| `educations` | json | Education history |
|
||||
| `employments` | json | Employment history |
|
||||
| `answers` | json | Application question answers |
|
||||
| `prospect` | boolean | Whether this is a prospect |
|
||||
| `confidential` | boolean | Whether the job is confidential |
|
||||
| `is_private` | boolean | Whether the candidate is private |
|
||||
| `can_email` | boolean | Whether the candidate can be emailed |
|
||||
| `disabled` | boolean | Whether the user is disabled |
|
||||
| `site_admin` | boolean | Whether the user is a site admin |
|
||||
| `primary_email_address` | string | Primary email address |
|
||||
| `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| `users` | array | List of Greenhouse users |
|
||||
| ↳ `id` | number | User ID |
|
||||
| ↳ `name` | string | Full name |
|
||||
| ↳ `first_name` | string | First name |
|
||||
| ↳ `last_name` | string | Last name |
|
||||
| ↳ `primary_email_address` | string | Primary email |
|
||||
| ↳ `disabled` | boolean | Whether the user is disabled |
|
||||
| ↳ `site_admin` | boolean | Whether the user is a site admin |
|
||||
| ↳ `emails` | array | All email addresses |
|
||||
| ↳ `employee_id` | string | Employee ID |
|
||||
| ↳ `linked_candidate_ids` | array | IDs of candidates linked to this user |
|
||||
| ↳ `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| ↳ `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| `count` | number | Number of users returned |
|
||||
|
||||
### `greenhouse_get_user`
|
||||
|
||||
Retrieves a specific Greenhouse user by ID
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Greenhouse Harvest API key |
|
||||
| `userId` | string | Yes | The ID of the user to retrieve |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `candidates` | json | List of candidates |
|
||||
| `jobs` | json | List of jobs |
|
||||
| `applications` | json | List of applications |
|
||||
| `users` | json | List of users |
|
||||
| `departments` | json | List of departments |
|
||||
| `offices` | json | List of offices |
|
||||
| `stages` | json | List of job stages |
|
||||
| `count` | number | Number of results returned |
|
||||
| `id` | number | Resource ID |
|
||||
| `id` | number | User ID |
|
||||
| `name` | string | Full name |
|
||||
| `first_name` | string | First name |
|
||||
| `last_name` | string | Last name |
|
||||
| `name` | string | Resource name |
|
||||
| `status` | string | Status |
|
||||
| `email_addresses` | json | Email addresses |
|
||||
| `phone_numbers` | json | Phone numbers |
|
||||
| `tags` | json | Tags |
|
||||
| `application_ids` | json | Associated application IDs |
|
||||
| `recruiter` | json | Assigned recruiter |
|
||||
| `coordinator` | json | Assigned coordinator |
|
||||
| `current_stage` | json | Current interview stage |
|
||||
| `source` | json | Application source |
|
||||
| `hiring_team` | json | Hiring team members |
|
||||
| `openings` | json | Job openings |
|
||||
| `custom_fields` | json | Custom field values |
|
||||
| `attachments` | json | File attachments |
|
||||
| `educations` | json | Education history |
|
||||
| `employments` | json | Employment history |
|
||||
| `answers` | json | Application question answers |
|
||||
| `prospect` | boolean | Whether this is a prospect |
|
||||
| `confidential` | boolean | Whether the job is confidential |
|
||||
| `is_private` | boolean | Whether the candidate is private |
|
||||
| `can_email` | boolean | Whether the candidate can be emailed |
|
||||
| `primary_email_address` | string | Primary email address |
|
||||
| `disabled` | boolean | Whether the user is disabled |
|
||||
| `site_admin` | boolean | Whether the user is a site admin |
|
||||
| `primary_email_address` | string | Primary email address |
|
||||
| `emails` | array | All email addresses |
|
||||
| `employee_id` | string | Employee ID |
|
||||
| `linked_candidate_ids` | array | IDs of candidates linked to this user |
|
||||
| `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
|
||||
### `greenhouse_list_departments`
|
||||
|
||||
Lists all departments configured in Greenhouse
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Greenhouse Harvest API key |
|
||||
| `per_page` | number | No | Number of results per page \(1-500, default 100\) |
|
||||
| `page` | number | No | Page number for pagination |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `candidates` | json | List of candidates |
|
||||
| `jobs` | json | List of jobs |
|
||||
| `applications` | json | List of applications |
|
||||
| `users` | json | List of users |
|
||||
| `departments` | json | List of departments |
|
||||
| `offices` | json | List of offices |
|
||||
| `stages` | json | List of job stages |
|
||||
| `count` | number | Number of results returned |
|
||||
| `id` | number | Resource ID |
|
||||
| `first_name` | string | First name |
|
||||
| `last_name` | string | Last name |
|
||||
| `name` | string | Resource name |
|
||||
| `status` | string | Status |
|
||||
| `email_addresses` | json | Email addresses |
|
||||
| `phone_numbers` | json | Phone numbers |
|
||||
| `tags` | json | Tags |
|
||||
| `application_ids` | json | Associated application IDs |
|
||||
| `recruiter` | json | Assigned recruiter |
|
||||
| `coordinator` | json | Assigned coordinator |
|
||||
| `current_stage` | json | Current interview stage |
|
||||
| `source` | json | Application source |
|
||||
| `hiring_team` | json | Hiring team members |
|
||||
| `openings` | json | Job openings |
|
||||
| `custom_fields` | json | Custom field values |
|
||||
| `attachments` | json | File attachments |
|
||||
| `educations` | json | Education history |
|
||||
| `employments` | json | Employment history |
|
||||
| `answers` | json | Application question answers |
|
||||
| `prospect` | boolean | Whether this is a prospect |
|
||||
| `confidential` | boolean | Whether the job is confidential |
|
||||
| `is_private` | boolean | Whether the candidate is private |
|
||||
| `can_email` | boolean | Whether the candidate can be emailed |
|
||||
| `disabled` | boolean | Whether the user is disabled |
|
||||
| `site_admin` | boolean | Whether the user is a site admin |
|
||||
| `primary_email_address` | string | Primary email address |
|
||||
| `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| `departments` | array | List of departments |
|
||||
| ↳ `id` | number | Department ID |
|
||||
| ↳ `name` | string | Department name |
|
||||
| ↳ `parent_id` | number | Parent department ID |
|
||||
| ↳ `child_ids` | array | Child department IDs |
|
||||
| ↳ `external_id` | string | External system ID |
|
||||
| `count` | number | Number of departments returned |
|
||||
|
||||
### `greenhouse_list_offices`
|
||||
|
||||
Lists all offices configured in Greenhouse
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Greenhouse Harvest API key |
|
||||
| `per_page` | number | No | Number of results per page \(1-500, default 100\) |
|
||||
| `page` | number | No | Page number for pagination |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `candidates` | json | List of candidates |
|
||||
| `jobs` | json | List of jobs |
|
||||
| `applications` | json | List of applications |
|
||||
| `users` | json | List of users |
|
||||
| `departments` | json | List of departments |
|
||||
| `offices` | json | List of offices |
|
||||
| `stages` | json | List of job stages |
|
||||
| `count` | number | Number of results returned |
|
||||
| `id` | number | Resource ID |
|
||||
| `first_name` | string | First name |
|
||||
| `last_name` | string | Last name |
|
||||
| `name` | string | Resource name |
|
||||
| `status` | string | Status |
|
||||
| `email_addresses` | json | Email addresses |
|
||||
| `phone_numbers` | json | Phone numbers |
|
||||
| `tags` | json | Tags |
|
||||
| `application_ids` | json | Associated application IDs |
|
||||
| `recruiter` | json | Assigned recruiter |
|
||||
| `coordinator` | json | Assigned coordinator |
|
||||
| `current_stage` | json | Current interview stage |
|
||||
| `source` | json | Application source |
|
||||
| `hiring_team` | json | Hiring team members |
|
||||
| `openings` | json | Job openings |
|
||||
| `custom_fields` | json | Custom field values |
|
||||
| `attachments` | json | File attachments |
|
||||
| `educations` | json | Education history |
|
||||
| `employments` | json | Employment history |
|
||||
| `answers` | json | Application question answers |
|
||||
| `prospect` | boolean | Whether this is a prospect |
|
||||
| `confidential` | boolean | Whether the job is confidential |
|
||||
| `is_private` | boolean | Whether the candidate is private |
|
||||
| `can_email` | boolean | Whether the candidate can be emailed |
|
||||
| `disabled` | boolean | Whether the user is disabled |
|
||||
| `site_admin` | boolean | Whether the user is a site admin |
|
||||
| `primary_email_address` | string | Primary email address |
|
||||
| `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| `offices` | array | List of offices |
|
||||
| ↳ `id` | number | Office ID |
|
||||
| ↳ `name` | string | Office name |
|
||||
| ↳ `location` | object | Office location |
|
||||
| ↳ `name` | string | Location name |
|
||||
| ↳ `primary_contact_user_id` | number | Primary contact user ID |
|
||||
| ↳ `parent_id` | number | Parent office ID |
|
||||
| ↳ `child_ids` | array | Child office IDs |
|
||||
| ↳ `external_id` | string | External system ID |
|
||||
| `count` | number | Number of offices returned |
|
||||
|
||||
### `greenhouse_list_job_stages`
|
||||
|
||||
Lists all interview stages for a specific job in Greenhouse
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | Greenhouse Harvest API key |
|
||||
| `jobId` | string | Yes | The job ID to list stages for |
|
||||
| `per_page` | number | No | Number of results per page \(1-500, default 100\) |
|
||||
| `page` | number | No | Page number for pagination |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `candidates` | json | List of candidates |
|
||||
| `jobs` | json | List of jobs |
|
||||
| `applications` | json | List of applications |
|
||||
| `users` | json | List of users |
|
||||
| `departments` | json | List of departments |
|
||||
| `offices` | json | List of offices |
|
||||
| `stages` | json | List of job stages |
|
||||
| `count` | number | Number of results returned |
|
||||
| `id` | number | Resource ID |
|
||||
| `first_name` | string | First name |
|
||||
| `last_name` | string | Last name |
|
||||
| `name` | string | Resource name |
|
||||
| `status` | string | Status |
|
||||
| `email_addresses` | json | Email addresses |
|
||||
| `phone_numbers` | json | Phone numbers |
|
||||
| `tags` | json | Tags |
|
||||
| `application_ids` | json | Associated application IDs |
|
||||
| `recruiter` | json | Assigned recruiter |
|
||||
| `coordinator` | json | Assigned coordinator |
|
||||
| `current_stage` | json | Current interview stage |
|
||||
| `source` | json | Application source |
|
||||
| `hiring_team` | json | Hiring team members |
|
||||
| `openings` | json | Job openings |
|
||||
| `custom_fields` | json | Custom field values |
|
||||
| `attachments` | json | File attachments |
|
||||
| `educations` | json | Education history |
|
||||
| `employments` | json | Employment history |
|
||||
| `answers` | json | Application question answers |
|
||||
| `prospect` | boolean | Whether this is a prospect |
|
||||
| `confidential` | boolean | Whether the job is confidential |
|
||||
| `is_private` | boolean | Whether the candidate is private |
|
||||
| `can_email` | boolean | Whether the candidate can be emailed |
|
||||
| `disabled` | boolean | Whether the user is disabled |
|
||||
| `site_admin` | boolean | Whether the user is a site admin |
|
||||
| `primary_email_address` | string | Primary email address |
|
||||
| `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| `stages` | array | List of job stages in order |
|
||||
| ↳ `id` | number | Stage ID |
|
||||
| ↳ `name` | string | Stage name |
|
||||
| ↳ `created_at` | string | Creation timestamp \(ISO 8601\) |
|
||||
| ↳ `updated_at` | string | Last updated timestamp \(ISO 8601\) |
|
||||
| ↳ `job_id` | number | Associated job ID |
|
||||
| ↳ `priority` | number | Stage order priority |
|
||||
| ↳ `active` | boolean | Whether the stage is active |
|
||||
| ↳ `interviews` | array | Interview steps in this stage |
|
||||
| ↳ `id` | number | Interview ID |
|
||||
| ↳ `name` | string | Interview name |
|
||||
| ↳ `schedulable` | boolean | Whether the interview is schedulable |
|
||||
| ↳ `estimated_minutes` | number | Estimated duration in minutes |
|
||||
| ↳ `default_interviewer_users` | array | Default interviewers |
|
||||
| ↳ `id` | number | User ID |
|
||||
| ↳ `name` | string | Full name |
|
||||
| ↳ `first_name` | string | First name |
|
||||
| ↳ `last_name` | string | Last name |
|
||||
| ↳ `employee_id` | string | Employee ID |
|
||||
| ↳ `interview_kit` | object | Interview kit details |
|
||||
| ↳ `id` | number | Kit ID |
|
||||
| ↳ `content` | string | Kit content \(HTML\) |
|
||||
| ↳ `questions` | array | Interview kit questions |
|
||||
| ↳ `id` | number | Question ID |
|
||||
| ↳ `question` | string | Question text |
|
||||
| `count` | number | Number of stages returned |
|
||||
|
||||
|
||||
|
||||
@@ -6,6 +6,7 @@
|
||||
"airtable",
|
||||
"airweave",
|
||||
"algolia",
|
||||
"amplitude",
|
||||
"apify",
|
||||
"apollo",
|
||||
"arxiv",
|
||||
@@ -50,6 +51,7 @@
|
||||
"google_forms",
|
||||
"google_groups",
|
||||
"google_maps",
|
||||
"google_pagespeed",
|
||||
"google_search",
|
||||
"google_sheets",
|
||||
"google_slides",
|
||||
@@ -97,6 +99,7 @@
|
||||
"onepassword",
|
||||
"openai",
|
||||
"outlook",
|
||||
"pagerduty",
|
||||
"parallel_ai",
|
||||
"perplexity",
|
||||
"pinecone",
|
||||
|
||||
217
apps/docs/content/docs/en/tools/pagerduty.mdx
Normal file
217
apps/docs/content/docs/en/tools/pagerduty.mdx
Normal file
@@ -0,0 +1,217 @@
|
||||
---
|
||||
title: PagerDuty
|
||||
description: Manage incidents and on-call schedules with PagerDuty
|
||||
---
|
||||
|
||||
import { BlockInfoCard } from "@/components/ui/block-info-card"
|
||||
|
||||
<BlockInfoCard
|
||||
type="pagerduty"
|
||||
color="#06AC38"
|
||||
/>
|
||||
|
||||
{/* MANUAL-CONTENT-START:intro */}
|
||||
[PagerDuty](https://www.pagerduty.com/) is a leading incident management platform that helps engineering and operations teams detect, triage, and resolve infrastructure and application issues in real time. PagerDuty integrates with monitoring tools, orchestrates on-call schedules, and ensures the right people are alerted when incidents occur.
|
||||
|
||||
The PagerDuty integration in Sim connects with the PagerDuty REST API v2 using API key authentication, enabling your agents to manage the full incident lifecycle and query on-call information programmatically.
|
||||
|
||||
With the PagerDuty integration, your agents can:
|
||||
|
||||
- **List and filter incidents**: Retrieve incidents filtered by status (triggered, acknowledged, resolved), service, date range, and sort order to monitor your operational health
|
||||
- **Create incidents**: Trigger new incidents on specific services with custom titles, descriptions, urgency levels, and assignees directly from your workflows
|
||||
- **Update incidents**: Acknowledge or resolve incidents, change urgency, and add resolution notes to keep your incident management in sync with automated processes
|
||||
- **Add notes to incidents**: Attach contextual information, investigation findings, or automated diagnostics as notes on existing incidents
|
||||
- **List services**: Query your PagerDuty service catalog to discover service IDs and metadata for use in other operations
|
||||
- **Check on-call schedules**: Retrieve current on-call entries filtered by escalation policy or schedule to determine who is responsible at any given time
|
||||
|
||||
In Sim, the PagerDuty integration enables powerful incident automation scenarios. Your agents can automatically create incidents based on monitoring alerts, enrich incidents with diagnostic data from other tools, resolve incidents when automated remediation succeeds, or build escalation workflows that check on-call schedules and route notifications accordingly. By connecting Sim with PagerDuty, you can build intelligent agents that bridge the gap between detection and response, reducing mean time to resolution and ensuring consistent incident handling across your organization.
|
||||
{/* MANUAL-CONTENT-END */}
|
||||
|
||||
|
||||
## Usage Instructions
|
||||
|
||||
Integrate PagerDuty into your workflow to list, create, and update incidents, add notes, list services, and check on-call schedules.
|
||||
|
||||
|
||||
|
||||
## Tools
|
||||
|
||||
### `pagerduty_list_incidents`
|
||||
|
||||
List incidents from PagerDuty with optional filters.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | PagerDuty REST API Key |
|
||||
| `statuses` | string | No | Comma-separated statuses to filter \(triggered, acknowledged, resolved\) |
|
||||
| `serviceIds` | string | No | Comma-separated service IDs to filter |
|
||||
| `since` | string | No | Start date filter \(ISO 8601 format\) |
|
||||
| `until` | string | No | End date filter \(ISO 8601 format\) |
|
||||
| `sortBy` | string | No | Sort field \(e.g., created_at:desc\) |
|
||||
| `limit` | string | No | Maximum number of results \(max 100\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `incidents` | array | Array of incidents |
|
||||
| ↳ `id` | string | Incident ID |
|
||||
| ↳ `incidentNumber` | number | Incident number |
|
||||
| ↳ `title` | string | Incident title |
|
||||
| ↳ `status` | string | Incident status |
|
||||
| ↳ `urgency` | string | Incident urgency |
|
||||
| ↳ `createdAt` | string | Creation timestamp |
|
||||
| ↳ `updatedAt` | string | Last updated timestamp |
|
||||
| ↳ `serviceName` | string | Service name |
|
||||
| ↳ `serviceId` | string | Service ID |
|
||||
| ↳ `assigneeName` | string | Assignee name |
|
||||
| ↳ `assigneeId` | string | Assignee ID |
|
||||
| ↳ `escalationPolicyName` | string | Escalation policy name |
|
||||
| ↳ `htmlUrl` | string | PagerDuty web URL |
|
||||
| `total` | number | Total number of matching incidents |
|
||||
| `more` | boolean | Whether more results are available |
|
||||
|
||||
### `pagerduty_create_incident`
|
||||
|
||||
Create a new incident in PagerDuty.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | PagerDuty REST API Key |
|
||||
| `fromEmail` | string | Yes | Email address of a valid PagerDuty user |
|
||||
| `title` | string | Yes | Incident title/summary |
|
||||
| `serviceId` | string | Yes | ID of the PagerDuty service |
|
||||
| `urgency` | string | No | Urgency level \(high or low\) |
|
||||
| `body` | string | No | Detailed description of the incident |
|
||||
| `escalationPolicyId` | string | No | Escalation policy ID to assign |
|
||||
| `assigneeId` | string | No | User ID to assign the incident to |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `id` | string | Created incident ID |
|
||||
| `incidentNumber` | number | Incident number |
|
||||
| `title` | string | Incident title |
|
||||
| `status` | string | Incident status |
|
||||
| `urgency` | string | Incident urgency |
|
||||
| `createdAt` | string | Creation timestamp |
|
||||
| `serviceName` | string | Service name |
|
||||
| `serviceId` | string | Service ID |
|
||||
| `htmlUrl` | string | PagerDuty web URL |
|
||||
|
||||
### `pagerduty_update_incident`
|
||||
|
||||
Update an incident in PagerDuty (acknowledge, resolve, change urgency, etc.).
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | PagerDuty REST API Key |
|
||||
| `fromEmail` | string | Yes | Email address of a valid PagerDuty user |
|
||||
| `incidentId` | string | Yes | ID of the incident to update |
|
||||
| `status` | string | No | New status \(acknowledged or resolved\) |
|
||||
| `title` | string | No | New incident title |
|
||||
| `urgency` | string | No | New urgency \(high or low\) |
|
||||
| `escalationLevel` | string | No | Escalation level to escalate to |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `id` | string | Incident ID |
|
||||
| `incidentNumber` | number | Incident number |
|
||||
| `title` | string | Incident title |
|
||||
| `status` | string | Updated status |
|
||||
| `urgency` | string | Updated urgency |
|
||||
| `updatedAt` | string | Last updated timestamp |
|
||||
| `htmlUrl` | string | PagerDuty web URL |
|
||||
|
||||
### `pagerduty_add_note`
|
||||
|
||||
Add a note to an existing PagerDuty incident.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | PagerDuty REST API Key |
|
||||
| `fromEmail` | string | Yes | Email address of a valid PagerDuty user |
|
||||
| `incidentId` | string | Yes | ID of the incident to add the note to |
|
||||
| `content` | string | Yes | Note content text |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `id` | string | Note ID |
|
||||
| `content` | string | Note content |
|
||||
| `createdAt` | string | Creation timestamp |
|
||||
| `userName` | string | Name of the user who created the note |
|
||||
|
||||
### `pagerduty_list_services`
|
||||
|
||||
List services from PagerDuty with optional name filter.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | PagerDuty REST API Key |
|
||||
| `query` | string | No | Filter services by name |
|
||||
| `limit` | string | No | Maximum number of results \(max 100\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `services` | array | Array of services |
|
||||
| ↳ `id` | string | Service ID |
|
||||
| ↳ `name` | string | Service name |
|
||||
| ↳ `description` | string | Service description |
|
||||
| ↳ `status` | string | Service status |
|
||||
| ↳ `escalationPolicyName` | string | Escalation policy name |
|
||||
| ↳ `escalationPolicyId` | string | Escalation policy ID |
|
||||
| ↳ `createdAt` | string | Creation timestamp |
|
||||
| ↳ `htmlUrl` | string | PagerDuty web URL |
|
||||
| `total` | number | Total number of matching services |
|
||||
| `more` | boolean | Whether more results are available |
|
||||
|
||||
### `pagerduty_list_oncalls`
|
||||
|
||||
List current on-call entries from PagerDuty.
|
||||
|
||||
#### Input
|
||||
|
||||
| Parameter | Type | Required | Description |
|
||||
| --------- | ---- | -------- | ----------- |
|
||||
| `apiKey` | string | Yes | PagerDuty REST API Key |
|
||||
| `escalationPolicyIds` | string | No | Comma-separated escalation policy IDs to filter |
|
||||
| `scheduleIds` | string | No | Comma-separated schedule IDs to filter |
|
||||
| `since` | string | No | Start time filter \(ISO 8601 format\) |
|
||||
| `until` | string | No | End time filter \(ISO 8601 format\) |
|
||||
| `limit` | string | No | Maximum number of results \(max 100\) |
|
||||
|
||||
#### Output
|
||||
|
||||
| Parameter | Type | Description |
|
||||
| --------- | ---- | ----------- |
|
||||
| `oncalls` | array | Array of on-call entries |
|
||||
| ↳ `userName` | string | On-call user name |
|
||||
| ↳ `userId` | string | On-call user ID |
|
||||
| ↳ `escalationLevel` | number | Escalation level |
|
||||
| ↳ `escalationPolicyName` | string | Escalation policy name |
|
||||
| ↳ `escalationPolicyId` | string | Escalation policy ID |
|
||||
| ↳ `scheduleName` | string | Schedule name |
|
||||
| ↳ `scheduleId` | string | Schedule ID |
|
||||
| ↳ `start` | string | On-call start time |
|
||||
| ↳ `end` | string | On-call end time |
|
||||
| `total` | number | Total number of matching on-call entries |
|
||||
| `more` | boolean | Whether more results are available |
|
||||
|
||||
|
||||
94
apps/docs/content/docs/es/api-reference/authentication.mdx
Normal file
94
apps/docs/content/docs/es/api-reference/authentication.mdx
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
title: Authentication
|
||||
description: API key types, generation, and how to authenticate requests
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
To access the Sim API, you need an API key. Sim supports two types of API keys — **personal keys** and **workspace keys** — each with different billing and access behaviors.
|
||||
|
||||
## Key Types
|
||||
|
||||
| | **Personal Keys** | **Workspace Keys** |
|
||||
| --- | --- | --- |
|
||||
| **Billed to** | Your individual account | Workspace owner |
|
||||
| **Scope** | Across workspaces you have access to | Shared across the workspace |
|
||||
| **Managed by** | Each user individually | Workspace admins |
|
||||
| **Permissions** | Must be enabled at workspace level | Require admin permissions |
|
||||
|
||||
<Callout type="info">
|
||||
Workspace admins can disable personal API key usage for their workspace. If disabled, only workspace keys can be used.
|
||||
</Callout>
|
||||
|
||||
## Generating API Keys
|
||||
|
||||
To generate a key, open the Sim dashboard and navigate to **Settings**, then go to **Sim Keys** and click **Create**.
|
||||
|
||||
<Callout type="warn">
|
||||
API keys are only shown once when generated. Store your key securely — you will not be able to view it again.
|
||||
</Callout>
|
||||
|
||||
## Using API Keys
|
||||
|
||||
Pass your API key in the `X-API-Key` header with every request:
|
||||
|
||||
<Tabs items={['curl', 'TypeScript', 'Python']}>
|
||||
<Tab value="curl">
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}}'
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="TypeScript">
|
||||
```typescript
|
||||
const response = await fetch(
|
||||
'https://www.sim.ai/api/workflows/{workflowId}/execute',
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY!,
|
||||
},
|
||||
body: JSON.stringify({ inputs: {} }),
|
||||
}
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Python">
|
||||
```python
|
||||
import requests
|
||||
|
||||
response = requests.post(
|
||||
"https://www.sim.ai/api/workflows/{workflowId}/execute",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": os.environ["SIM_API_KEY"],
|
||||
},
|
||||
json={"inputs": {}},
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Where Keys Are Used
|
||||
|
||||
API keys authenticate access to:
|
||||
|
||||
- **Workflow execution** — run deployed workflows via the API
|
||||
- **Logs API** — query workflow execution logs and metrics
|
||||
- **MCP servers** — authenticate connections to deployed MCP servers
|
||||
- **SDKs** — the [Python](/api-reference/python) and [TypeScript](/api-reference/typescript) SDKs use API keys for all operations
|
||||
|
||||
## Security
|
||||
|
||||
- Keys use the `sk-sim-` prefix and are encrypted at rest
|
||||
- Keys can be revoked at any time from the dashboard
|
||||
- Use environment variables to store keys — never hardcode them in source code
|
||||
- For browser-based applications, use a backend proxy to avoid exposing keys to the client
|
||||
|
||||
<Callout type="warn">
|
||||
Never expose your API key in client-side code. Use a server-side proxy to make authenticated requests on behalf of your frontend.
|
||||
</Callout>
|
||||
210
apps/docs/content/docs/es/api-reference/getting-started.mdx
Normal file
210
apps/docs/content/docs/es/api-reference/getting-started.mdx
Normal file
@@ -0,0 +1,210 @@
|
||||
---
|
||||
title: Getting Started
|
||||
description: Base URL, first API call, response format, error handling, and pagination
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
|
||||
## Base URL
|
||||
|
||||
All API requests are made to:
|
||||
|
||||
```
|
||||
https://www.sim.ai
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
<Steps>
|
||||
|
||||
<Step>
|
||||
### Get your API key
|
||||
|
||||
Go to the Sim platform and navigate to **Settings**, then go to **Sim Keys** and click **Create**. See [Authentication](/api-reference/authentication) for details on key types.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Find your workflow ID
|
||||
|
||||
Open a workflow in the Sim editor. The workflow ID is in the URL:
|
||||
|
||||
```
|
||||
https://www.sim.ai/workspace/{workspaceId}/w/{workflowId}
|
||||
```
|
||||
|
||||
You can also use the [List Workflows](/api-reference/workflows/listWorkflows) endpoint to get all workflow IDs in a workspace.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Deploy your workflow
|
||||
|
||||
A workflow must be deployed before it can be executed via the API. Click the **Deploy** button in the editor toolbar, or use the dashboard to manage deployments.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Make your first request
|
||||
|
||||
<Tabs items={['curl', 'TypeScript', 'Python']}>
|
||||
<Tab value="curl">
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}}'
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="TypeScript">
|
||||
```typescript
|
||||
const response = await fetch(
|
||||
`https://www.sim.ai/api/workflows/${workflowId}/execute`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY!,
|
||||
},
|
||||
body: JSON.stringify({ inputs: {} }),
|
||||
}
|
||||
)
|
||||
|
||||
const data = await response.json()
|
||||
console.log(data.output)
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Python">
|
||||
```python
|
||||
import requests
|
||||
import os
|
||||
|
||||
response = requests.post(
|
||||
f"https://www.sim.ai/api/workflows/{workflow_id}/execute",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": os.environ["SIM_API_KEY"],
|
||||
},
|
||||
json={"inputs": {}},
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
print(data["output"])
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
## Sync vs Async Execution
|
||||
|
||||
By default, workflow executions are **synchronous** — the API blocks until the workflow completes and returns the result directly.
|
||||
|
||||
For long-running workflows, use **asynchronous execution** by passing `async: true`:
|
||||
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}, "async": true}'
|
||||
```
|
||||
|
||||
This returns immediately with a `taskId`:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"taskId": "job_abc123",
|
||||
"status": "queued"
|
||||
}
|
||||
```
|
||||
|
||||
Poll the [Get Job Status](/api-reference/workflows/getJobStatus) endpoint until the status is `completed` or `failed`:
|
||||
|
||||
```bash
|
||||
curl https://www.sim.ai/api/jobs/{taskId} \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
```
|
||||
|
||||
<Callout type="info">
|
||||
Job status transitions follow: `queued` → `processing` → `completed` or `failed`. The `output` field is only present when status is `completed`.
|
||||
</Callout>
|
||||
|
||||
## Response Format
|
||||
|
||||
Successful responses include an `output` object with your workflow results and a `limits` object with your current rate limit and usage status:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"output": {
|
||||
"result": "Hello, world!"
|
||||
},
|
||||
"limits": {
|
||||
"workflowExecutionRateLimit": {
|
||||
"sync": {
|
||||
"requestsPerMinute": 60,
|
||||
"maxBurst": 10,
|
||||
"remaining": 59,
|
||||
"resetAt": "2025-01-01T00:01:00Z"
|
||||
},
|
||||
"async": {
|
||||
"requestsPerMinute": 30,
|
||||
"maxBurst": 5,
|
||||
"remaining": 30,
|
||||
"resetAt": "2025-01-01T00:01:00Z"
|
||||
}
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": 1.25,
|
||||
"limit": 50.00,
|
||||
"plan": "pro",
|
||||
"isExceeded": false
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The API uses standard HTTP status codes. Error responses include a human-readable `error` message:
|
||||
|
||||
```json
|
||||
{
|
||||
"error": "Workflow not found"
|
||||
}
|
||||
```
|
||||
|
||||
| Status | Meaning | What to do |
|
||||
| --- | --- | --- |
|
||||
| `400` | Invalid request parameters | Check the `details` array for specific field errors |
|
||||
| `401` | Missing or invalid API key | Verify your `X-API-Key` header |
|
||||
| `403` | Access denied | Check you have permission for this resource |
|
||||
| `404` | Resource not found | Verify the ID exists and belongs to your workspace |
|
||||
| `429` | Rate limit exceeded | Wait for the duration in the `Retry-After` header |
|
||||
|
||||
<Callout type="info">
|
||||
Use the [Get Usage Limits](/api-reference/usage/getUsageLimits) endpoint to check your current rate limit status and billing usage at any time.
|
||||
</Callout>
|
||||
|
||||
## Rate Limits
|
||||
|
||||
Rate limits depend on your subscription plan and apply separately to synchronous and asynchronous executions. Every execution response includes a `limits` object showing your current rate limit status.
|
||||
|
||||
When rate limited, the API returns a `429` response with a `Retry-After` header indicating how many seconds to wait before retrying.
|
||||
|
||||
## Pagination
|
||||
|
||||
List endpoints (workflows, logs, audit logs) use **cursor-based pagination**:
|
||||
|
||||
```bash
|
||||
# First page
|
||||
curl "https://www.sim.ai/api/v1/logs?limit=20" \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
|
||||
# Next page — use the nextCursor from the previous response
|
||||
curl "https://www.sim.ai/api/v1/logs?limit=20&cursor=abc123" \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
```
|
||||
|
||||
The response includes a `nextCursor` field. When `nextCursor` is absent or `null`, you have reached the last page.
|
||||
16
apps/docs/content/docs/es/api-reference/meta.json
Normal file
16
apps/docs/content/docs/es/api-reference/meta.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"title": "API Reference",
|
||||
"root": true,
|
||||
"pages": [
|
||||
"getting-started",
|
||||
"authentication",
|
||||
"---SDKs---",
|
||||
"python",
|
||||
"typescript",
|
||||
"---Endpoints---",
|
||||
"(generated)/workflows",
|
||||
"(generated)/logs",
|
||||
"(generated)/usage",
|
||||
"(generated)/audit-logs"
|
||||
]
|
||||
}
|
||||
766
apps/docs/content/docs/es/api-reference/python.mdx
Normal file
766
apps/docs/content/docs/es/api-reference/python.mdx
Normal file
@@ -0,0 +1,766 @@
|
||||
---
|
||||
title: Python
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
El SDK oficial de Python para Sim te permite ejecutar flujos de trabajo programáticamente desde tus aplicaciones Python utilizando el SDK oficial de Python.
|
||||
|
||||
<Callout type="info">
|
||||
El SDK de Python es compatible con Python 3.8+ con soporte para ejecución asíncrona, limitación automática de velocidad con retroceso exponencial y seguimiento de uso.
|
||||
</Callout>
|
||||
|
||||
## Instalación
|
||||
|
||||
Instala el SDK usando pip:
|
||||
|
||||
```bash
|
||||
pip install simstudio-sdk
|
||||
```
|
||||
|
||||
## Inicio rápido
|
||||
|
||||
Aquí tienes un ejemplo sencillo para empezar:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Initialize the client
|
||||
client = SimStudioClient(
|
||||
api_key="your-api-key-here",
|
||||
base_url="https://sim.ai" # optional, defaults to https://sim.ai
|
||||
)
|
||||
|
||||
# Execute a workflow
|
||||
try:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Workflow executed successfully:", result)
|
||||
except Exception as error:
|
||||
print("Workflow execution failed:", error)
|
||||
```
|
||||
|
||||
## Referencia de la API
|
||||
|
||||
### SimStudioClient
|
||||
|
||||
#### Constructor
|
||||
|
||||
```python
|
||||
SimStudioClient(api_key: str, base_url: str = "https://sim.ai")
|
||||
```
|
||||
|
||||
**Parámetros:**
|
||||
- `api_key` (str): Tu clave API de Sim
|
||||
- `base_url` (str, opcional): URL base para la API de Sim
|
||||
|
||||
#### Métodos
|
||||
|
||||
##### execute_workflow()
|
||||
|
||||
Ejecuta un flujo de trabajo con datos de entrada opcionales.
|
||||
|
||||
```python
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Hello, world!"},
|
||||
timeout=30.0 # 30 seconds
|
||||
)
|
||||
```
|
||||
|
||||
**Parámetros:**
|
||||
- `workflow_id` (str): El ID del flujo de trabajo a ejecutar
|
||||
- `input_data` (dict, opcional): Datos de entrada para pasar al flujo de trabajo
|
||||
- `timeout` (float, opcional): Tiempo de espera en segundos (predeterminado: 30.0)
|
||||
- `stream` (bool, opcional): Habilitar respuestas en streaming (predeterminado: False)
|
||||
- `selected_outputs` (list[str], opcional): Salidas de bloque para transmitir en formato `blockName.attribute` (p. ej., `["agent1.content"]`)
|
||||
- `async_execution` (bool, opcional): Ejecutar de forma asíncrona (predeterminado: False)
|
||||
|
||||
**Devuelve:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
Cuando `async_execution=True`, devuelve inmediatamente un ID de tarea para sondeo. De lo contrario, espera a que se complete.
|
||||
|
||||
##### get_workflow_status()
|
||||
|
||||
Obtener el estado de un flujo de trabajo (estado de implementación, etc.).
|
||||
|
||||
```python
|
||||
status = client.get_workflow_status("workflow-id")
|
||||
print("Is deployed:", status.is_deployed)
|
||||
```
|
||||
|
||||
**Parámetros:**
|
||||
- `workflow_id` (str): El ID del flujo de trabajo
|
||||
|
||||
**Devuelve:** `WorkflowStatus`
|
||||
|
||||
##### validate_workflow()
|
||||
|
||||
Validar que un flujo de trabajo está listo para su ejecución.
|
||||
|
||||
```python
|
||||
is_ready = client.validate_workflow("workflow-id")
|
||||
if is_ready:
|
||||
# Workflow is deployed and ready
|
||||
pass
|
||||
```
|
||||
|
||||
**Parámetros:**
|
||||
- `workflow_id` (str): El ID del flujo de trabajo
|
||||
|
||||
**Devuelve:** `bool`
|
||||
|
||||
##### get_job_status()
|
||||
|
||||
Obtener el estado de una ejecución de trabajo asíncrono.
|
||||
|
||||
```python
|
||||
status = client.get_job_status("task-id-from-async-execution")
|
||||
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
|
||||
if status["status"] == "completed":
|
||||
print("Output:", status["output"])
|
||||
```
|
||||
|
||||
**Parámetros:**
|
||||
- `task_id` (str): El ID de tarea devuelto de la ejecución asíncrona
|
||||
|
||||
**Devuelve:** `Dict[str, Any]`
|
||||
|
||||
**Campos de respuesta:**
|
||||
- `success` (bool): Si la solicitud fue exitosa
|
||||
- `taskId` (str): El ID de la tarea
|
||||
- `status` (str): Uno de `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (dict): Contiene `startedAt`, `completedAt`, y `duration`
|
||||
- `output` (any, opcional): La salida del flujo de trabajo (cuando se completa)
|
||||
- `error` (any, opcional): Detalles del error (cuando falla)
|
||||
- `estimatedDuration` (int, opcional): Duración estimada en milisegundos (cuando está procesando/en cola)
|
||||
|
||||
##### execute_with_retry()
|
||||
|
||||
Ejecutar un flujo de trabajo con reintento automático en errores de límite de velocidad usando retroceso exponencial.
|
||||
|
||||
```python
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Hello"},
|
||||
timeout=30.0,
|
||||
max_retries=3, # Maximum number of retries
|
||||
initial_delay=1.0, # Initial delay in seconds
|
||||
max_delay=30.0, # Maximum delay in seconds
|
||||
backoff_multiplier=2.0 # Exponential backoff multiplier
|
||||
)
|
||||
```
|
||||
|
||||
**Parámetros:**
|
||||
- `workflow_id` (str): El ID del flujo de trabajo a ejecutar
|
||||
- `input_data` (dict, opcional): Datos de entrada para pasar al flujo de trabajo
|
||||
- `timeout` (float, opcional): Tiempo de espera en segundos
|
||||
- `stream` (bool, opcional): Habilitar respuestas en streaming
|
||||
- `selected_outputs` (list, opcional): Salidas de bloque para transmitir
|
||||
- `async_execution` (bool, opcional): Ejecutar de forma asíncrona
|
||||
- `max_retries` (int, opcional): Número máximo de reintentos (predeterminado: 3)
|
||||
- `initial_delay` (float, opcional): Retraso inicial en segundos (predeterminado: 1.0)
|
||||
- `max_delay` (float, opcional): Retraso máximo en segundos (predeterminado: 30.0)
|
||||
- `backoff_multiplier` (float, opcional): Multiplicador de retroceso (predeterminado: 2.0)
|
||||
|
||||
**Devuelve:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
La lógica de reintento utiliza retroceso exponencial (1s → 2s → 4s → 8s...) con fluctuación de ±25% para evitar el efecto de manada. Si la API proporciona un encabezado `retry-after`, se utilizará en su lugar.
|
||||
|
||||
##### get_rate_limit_info()
|
||||
|
||||
Obtiene la información actual del límite de tasa de la última respuesta de la API.
|
||||
|
||||
```python
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
print("Limit:", rate_limit_info.limit)
|
||||
print("Remaining:", rate_limit_info.remaining)
|
||||
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
|
||||
```
|
||||
|
||||
**Devuelve:** `RateLimitInfo | None`
|
||||
|
||||
##### get_usage_limits()
|
||||
|
||||
Obtiene los límites de uso actuales y la información de cuota para tu cuenta.
|
||||
|
||||
```python
|
||||
limits = client.get_usage_limits()
|
||||
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
|
||||
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
|
||||
print("Current period cost:", limits.usage["currentPeriodCost"])
|
||||
print("Plan:", limits.usage["plan"])
|
||||
```
|
||||
|
||||
**Devuelve:** `UsageLimits`
|
||||
|
||||
**Estructura de respuesta:**
|
||||
|
||||
```python
|
||||
{
|
||||
"success": bool,
|
||||
"rateLimit": {
|
||||
"sync": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"async": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"authType": str # 'api' or 'manual'
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": float,
|
||||
"limit": float,
|
||||
"plan": str # e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### set_api_key()
|
||||
|
||||
Actualiza la clave API.
|
||||
|
||||
```python
|
||||
client.set_api_key("new-api-key")
|
||||
```
|
||||
|
||||
##### set_base_url()
|
||||
|
||||
Actualiza la URL base.
|
||||
|
||||
```python
|
||||
client.set_base_url("https://my-custom-domain.com")
|
||||
```
|
||||
|
||||
##### close()
|
||||
|
||||
Cierra la sesión HTTP subyacente.
|
||||
|
||||
```python
|
||||
client.close()
|
||||
```
|
||||
|
||||
## Clases de datos
|
||||
|
||||
### WorkflowExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class WorkflowExecutionResult:
|
||||
success: bool
|
||||
output: Optional[Any] = None
|
||||
error: Optional[str] = None
|
||||
logs: Optional[List[Any]] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
trace_spans: Optional[List[Any]] = None
|
||||
total_duration: Optional[float] = None
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class AsyncExecutionResult:
|
||||
success: bool
|
||||
task_id: str
|
||||
status: str # 'queued'
|
||||
created_at: str
|
||||
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class WorkflowStatus:
|
||||
is_deployed: bool
|
||||
deployed_at: Optional[str] = None
|
||||
needs_redeployment: bool = False
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class RateLimitInfo:
|
||||
limit: int
|
||||
remaining: int
|
||||
reset: int
|
||||
retry_after: Optional[int] = None
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class UsageLimits:
|
||||
success: bool
|
||||
rate_limit: Dict[str, Any]
|
||||
usage: Dict[str, Any]
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```python
|
||||
class SimStudioError(Exception):
|
||||
def __init__(self, message: str, code: Optional[str] = None, status: Optional[int] = None):
|
||||
super().__init__(message)
|
||||
self.code = code
|
||||
self.status = status
|
||||
```
|
||||
|
||||
**Códigos de error comunes:**
|
||||
- `UNAUTHORIZED`: Clave API inválida
|
||||
- `TIMEOUT`: Tiempo de espera agotado
|
||||
- `RATE_LIMIT_EXCEEDED`: Límite de tasa excedido
|
||||
- `USAGE_LIMIT_EXCEEDED`: Límite de uso excedido
|
||||
- `EXECUTION_ERROR`: Ejecución del flujo de trabajo fallida
|
||||
|
||||
## Ejemplos
|
||||
|
||||
### Ejecución básica de flujo de trabajo
|
||||
|
||||
<Steps>
|
||||
<Step title="Inicializar el cliente">
|
||||
Configura el SimStudioClient con tu clave API.
|
||||
</Step>
|
||||
<Step title="Validar el flujo de trabajo">
|
||||
Comprueba si el flujo de trabajo está desplegado y listo para su ejecución.
|
||||
</Step>
|
||||
<Step title="Ejecutar el flujo de trabajo">
|
||||
Ejecuta el flujo de trabajo con tus datos de entrada.
|
||||
</Step>
|
||||
<Step title="Manejar el resultado">
|
||||
Procesa el resultado de la ejecución y gestiona cualquier error.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def run_workflow():
|
||||
try:
|
||||
# Check if workflow is ready
|
||||
is_ready = client.validate_workflow("my-workflow-id")
|
||||
if not is_ready:
|
||||
raise Exception("Workflow is not deployed or ready")
|
||||
|
||||
# Execute the workflow
|
||||
result = client.execute_workflow(
|
||||
"my-workflow-id",
|
||||
input_data={
|
||||
"message": "Process this data",
|
||||
"user_id": "12345"
|
||||
}
|
||||
)
|
||||
|
||||
if result.success:
|
||||
print("Output:", result.output)
|
||||
print("Duration:", result.metadata.get("duration") if result.metadata else None)
|
||||
else:
|
||||
print("Workflow failed:", result.error)
|
||||
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
run_workflow()
|
||||
```
|
||||
|
||||
### Manejo de errores
|
||||
|
||||
Maneja diferentes tipos de errores que pueden ocurrir durante la ejecución del flujo de trabajo:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_error_handling():
|
||||
try:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
return result
|
||||
except SimStudioError as error:
|
||||
if error.code == "UNAUTHORIZED":
|
||||
print("Invalid API key")
|
||||
elif error.code == "TIMEOUT":
|
||||
print("Workflow execution timed out")
|
||||
elif error.code == "USAGE_LIMIT_EXCEEDED":
|
||||
print("Usage limit exceeded")
|
||||
elif error.code == "INVALID_JSON":
|
||||
print("Invalid JSON in request body")
|
||||
else:
|
||||
print(f"Workflow error: {error}")
|
||||
raise
|
||||
except Exception as error:
|
||||
print(f"Unexpected error: {error}")
|
||||
raise
|
||||
```
|
||||
|
||||
### Uso del gestor de contexto
|
||||
|
||||
Usa el cliente como un gestor de contexto para manejar automáticamente la limpieza de recursos:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
# Using context manager to automatically close the session
|
||||
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Result:", result)
|
||||
# Session is automatically closed here
|
||||
```
|
||||
|
||||
### Ejecución de flujos de trabajo por lotes
|
||||
|
||||
Ejecuta múltiples flujos de trabajo de manera eficiente:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_workflows_batch(workflow_data_pairs):
|
||||
"""Execute multiple workflows with different input data."""
|
||||
results = []
|
||||
|
||||
for workflow_id, input_data in workflow_data_pairs:
|
||||
try:
|
||||
# Validate workflow before execution
|
||||
if not client.validate_workflow(workflow_id):
|
||||
print(f"Skipping {workflow_id}: not deployed")
|
||||
continue
|
||||
|
||||
result = client.execute_workflow(workflow_id, input_data)
|
||||
results.append({
|
||||
"workflow_id": workflow_id,
|
||||
"success": result.success,
|
||||
"output": result.output,
|
||||
"error": result.error
|
||||
})
|
||||
|
||||
except Exception as error:
|
||||
results.append({
|
||||
"workflow_id": workflow_id,
|
||||
"success": False,
|
||||
"error": str(error)
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
# Example usage
|
||||
workflows = [
|
||||
("workflow-1", {"type": "analysis", "data": "sample1"}),
|
||||
("workflow-2", {"type": "processing", "data": "sample2"}),
|
||||
]
|
||||
|
||||
results = execute_workflows_batch(workflows)
|
||||
for result in results:
|
||||
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
|
||||
```
|
||||
|
||||
### Ejecución asíncrona de flujos de trabajo
|
||||
|
||||
Ejecuta flujos de trabajo de forma asíncrona para tareas de larga duración:
|
||||
|
||||
```python
|
||||
import os
|
||||
import time
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_async():
|
||||
try:
|
||||
# Start async execution
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"data": "large dataset"},
|
||||
async_execution=True # Execute asynchronously
|
||||
)
|
||||
|
||||
# Check if result is an async execution
|
||||
if hasattr(result, 'task_id'):
|
||||
print(f"Task ID: {result.task_id}")
|
||||
print(f"Status endpoint: {result.links['status']}")
|
||||
|
||||
# Poll for completion
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
while status["status"] in ["queued", "processing"]:
|
||||
print(f"Current status: {status['status']}")
|
||||
time.sleep(2) # Wait 2 seconds
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
if status["status"] == "completed":
|
||||
print("Workflow completed!")
|
||||
print(f"Output: {status['output']}")
|
||||
print(f"Duration: {status['metadata']['duration']}")
|
||||
else:
|
||||
print(f"Workflow failed: {status['error']}")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error: {error}")
|
||||
|
||||
execute_async()
|
||||
```
|
||||
|
||||
### Límite de tasa y reintentos
|
||||
|
||||
Maneja los límites de tasa automáticamente con retroceso exponencial:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_retry_handling():
|
||||
try:
|
||||
# Automatically retries on rate limit
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Process this"},
|
||||
max_retries=5,
|
||||
initial_delay=1.0,
|
||||
max_delay=60.0,
|
||||
backoff_multiplier=2.0
|
||||
)
|
||||
|
||||
print(f"Success: {result}")
|
||||
except SimStudioError as error:
|
||||
if error.code == "RATE_LIMIT_EXCEEDED":
|
||||
print("Rate limit exceeded after all retries")
|
||||
|
||||
# Check rate limit info
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
from datetime import datetime
|
||||
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
|
||||
print(f"Rate limit resets at: {reset_time}")
|
||||
|
||||
execute_with_retry_handling()
|
||||
```
|
||||
|
||||
### Monitoreo de uso
|
||||
|
||||
Monitorea el uso de tu cuenta y sus límites:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def check_usage():
|
||||
try:
|
||||
limits = client.get_usage_limits()
|
||||
|
||||
print("=== Rate Limits ===")
|
||||
print("Sync requests:")
|
||||
print(f" Limit: {limits.rate_limit['sync']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
|
||||
|
||||
print("\nAsync requests:")
|
||||
print(f" Limit: {limits.rate_limit['async']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
|
||||
|
||||
print("\n=== Usage ===")
|
||||
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
|
||||
print(f"Limit: ${limits.usage['limit']:.2f}")
|
||||
print(f"Plan: {limits.usage['plan']}")
|
||||
|
||||
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
|
||||
print(f"Usage: {percent_used:.1f}%")
|
||||
|
||||
if percent_used > 80:
|
||||
print("⚠️ Warning: You are approaching your usage limit!")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error checking usage: {error}")
|
||||
|
||||
check_usage()
|
||||
```
|
||||
|
||||
### Ejecución de flujo de trabajo en streaming
|
||||
|
||||
Ejecuta flujos de trabajo con respuestas en tiempo real:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_streaming():
|
||||
"""Execute workflow with streaming enabled."""
|
||||
try:
|
||||
# Enable streaming for specific block outputs
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Count to five"},
|
||||
stream=True,
|
||||
selected_outputs=["agent1.content"] # Use blockName.attribute format
|
||||
)
|
||||
|
||||
print("Workflow result:", result)
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
execute_with_streaming()
|
||||
```
|
||||
|
||||
La respuesta en streaming sigue el formato de Server-Sent Events (SSE):
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**Ejemplo de streaming con Flask:**
|
||||
|
||||
```python
|
||||
from flask import Flask, Response, stream_with_context
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/stream-workflow')
|
||||
def stream_workflow():
|
||||
"""Stream workflow execution to the client."""
|
||||
|
||||
def generate():
|
||||
response = requests.post(
|
||||
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': os.getenv('SIM_API_KEY')
|
||||
},
|
||||
json={
|
||||
'message': 'Generate a story',
|
||||
'stream': True,
|
||||
'selectedOutputs': ['agent1.content']
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
data = decoded_line[6:] # Remove 'data: ' prefix
|
||||
|
||||
if data == '[DONE]':
|
||||
break
|
||||
|
||||
try:
|
||||
parsed = json.loads(data)
|
||||
if 'chunk' in parsed:
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
elif parsed.get('event') == 'done':
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
print("Execution complete:", parsed.get('metadata'))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
```
|
||||
|
||||
### Configuración del entorno
|
||||
|
||||
Configura el cliente usando variables de entorno:
|
||||
|
||||
<Tabs items={['Development', 'Production']}>
|
||||
<Tab value="Development">
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Development configuration
|
||||
client = SimStudioClient(
|
||||
api_key=os.getenv("SIM_API_KEY")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
</Tab>
|
||||
<Tab value="Production">
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Production configuration with error handling
|
||||
api_key = os.getenv("SIM_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("SIM_API_KEY environment variable is required")
|
||||
|
||||
client = SimStudioClient(
|
||||
api_key=api_key,
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Obtener tu clave API
|
||||
|
||||
<Steps>
|
||||
<Step title="Inicia sesión en Sim">
|
||||
Navega a [Sim](https://sim.ai) e inicia sesión en tu cuenta.
|
||||
</Step>
|
||||
<Step title="Abre tu flujo de trabajo">
|
||||
Navega al flujo de trabajo que quieres ejecutar programáticamente.
|
||||
</Step>
|
||||
<Step title="Despliega tu flujo de trabajo">
|
||||
Haz clic en "Deploy" para desplegar tu flujo de trabajo si aún no ha sido desplegado.
|
||||
</Step>
|
||||
<Step title="Crea o selecciona una clave API">
|
||||
Durante el proceso de despliegue, selecciona o crea una clave API.
|
||||
</Step>
|
||||
<Step title="Copia la clave API">
|
||||
Copia la clave API para usarla en tu aplicación Python.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Requisitos
|
||||
|
||||
- Python 3.8+
|
||||
- requests >= 2.25.0
|
||||
|
||||
## Licencia
|
||||
|
||||
Apache-2.0
|
||||
1052
apps/docs/content/docs/es/api-reference/typescript.mdx
Normal file
1052
apps/docs/content/docs/es/api-reference/typescript.mdx
Normal file
File diff suppressed because it is too large
Load Diff
24
apps/docs/content/docs/es/meta.json
Normal file
24
apps/docs/content/docs/es/meta.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"title": "Sim Documentation",
|
||||
"pages": [
|
||||
"./introduction/index",
|
||||
"./getting-started/index",
|
||||
"./quick-reference/index",
|
||||
"triggers",
|
||||
"blocks",
|
||||
"tools",
|
||||
"connections",
|
||||
"mcp",
|
||||
"copilot",
|
||||
"skills",
|
||||
"knowledgebase",
|
||||
"variables",
|
||||
"credentials",
|
||||
"execution",
|
||||
"permissions",
|
||||
"self-hosting",
|
||||
"./enterprise/index",
|
||||
"./keyboard-shortcuts/index"
|
||||
],
|
||||
"defaultOpen": false
|
||||
}
|
||||
94
apps/docs/content/docs/fr/api-reference/authentication.mdx
Normal file
94
apps/docs/content/docs/fr/api-reference/authentication.mdx
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
title: Authentication
|
||||
description: API key types, generation, and how to authenticate requests
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
To access the Sim API, you need an API key. Sim supports two types of API keys — **personal keys** and **workspace keys** — each with different billing and access behaviors.
|
||||
|
||||
## Key Types
|
||||
|
||||
| | **Personal Keys** | **Workspace Keys** |
|
||||
| --- | --- | --- |
|
||||
| **Billed to** | Your individual account | Workspace owner |
|
||||
| **Scope** | Across workspaces you have access to | Shared across the workspace |
|
||||
| **Managed by** | Each user individually | Workspace admins |
|
||||
| **Permissions** | Must be enabled at workspace level | Require admin permissions |
|
||||
|
||||
<Callout type="info">
|
||||
Workspace admins can disable personal API key usage for their workspace. If disabled, only workspace keys can be used.
|
||||
</Callout>
|
||||
|
||||
## Generating API Keys
|
||||
|
||||
To generate a key, open the Sim dashboard and navigate to **Settings**, then go to **Sim Keys** and click **Create**.
|
||||
|
||||
<Callout type="warn">
|
||||
API keys are only shown once when generated. Store your key securely — you will not be able to view it again.
|
||||
</Callout>
|
||||
|
||||
## Using API Keys
|
||||
|
||||
Pass your API key in the `X-API-Key` header with every request:
|
||||
|
||||
<Tabs items={['curl', 'TypeScript', 'Python']}>
|
||||
<Tab value="curl">
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}}'
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="TypeScript">
|
||||
```typescript
|
||||
const response = await fetch(
|
||||
'https://www.sim.ai/api/workflows/{workflowId}/execute',
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY!,
|
||||
},
|
||||
body: JSON.stringify({ inputs: {} }),
|
||||
}
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Python">
|
||||
```python
|
||||
import requests
|
||||
|
||||
response = requests.post(
|
||||
"https://www.sim.ai/api/workflows/{workflowId}/execute",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": os.environ["SIM_API_KEY"],
|
||||
},
|
||||
json={"inputs": {}},
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Where Keys Are Used
|
||||
|
||||
API keys authenticate access to:
|
||||
|
||||
- **Workflow execution** — run deployed workflows via the API
|
||||
- **Logs API** — query workflow execution logs and metrics
|
||||
- **MCP servers** — authenticate connections to deployed MCP servers
|
||||
- **SDKs** — the [Python](/api-reference/python) and [TypeScript](/api-reference/typescript) SDKs use API keys for all operations
|
||||
|
||||
## Security
|
||||
|
||||
- Keys use the `sk-sim-` prefix and are encrypted at rest
|
||||
- Keys can be revoked at any time from the dashboard
|
||||
- Use environment variables to store keys — never hardcode them in source code
|
||||
- For browser-based applications, use a backend proxy to avoid exposing keys to the client
|
||||
|
||||
<Callout type="warn">
|
||||
Never expose your API key in client-side code. Use a server-side proxy to make authenticated requests on behalf of your frontend.
|
||||
</Callout>
|
||||
210
apps/docs/content/docs/fr/api-reference/getting-started.mdx
Normal file
210
apps/docs/content/docs/fr/api-reference/getting-started.mdx
Normal file
@@ -0,0 +1,210 @@
|
||||
---
|
||||
title: Getting Started
|
||||
description: Base URL, first API call, response format, error handling, and pagination
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
|
||||
## Base URL
|
||||
|
||||
All API requests are made to:
|
||||
|
||||
```
|
||||
https://www.sim.ai
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
<Steps>
|
||||
|
||||
<Step>
|
||||
### Get your API key
|
||||
|
||||
Go to the Sim platform and navigate to **Settings**, then go to **Sim Keys** and click **Create**. See [Authentication](/api-reference/authentication) for details on key types.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Find your workflow ID
|
||||
|
||||
Open a workflow in the Sim editor. The workflow ID is in the URL:
|
||||
|
||||
```
|
||||
https://www.sim.ai/workspace/{workspaceId}/w/{workflowId}
|
||||
```
|
||||
|
||||
You can also use the [List Workflows](/api-reference/workflows/listWorkflows) endpoint to get all workflow IDs in a workspace.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Deploy your workflow
|
||||
|
||||
A workflow must be deployed before it can be executed via the API. Click the **Deploy** button in the editor toolbar, or use the dashboard to manage deployments.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Make your first request
|
||||
|
||||
<Tabs items={['curl', 'TypeScript', 'Python']}>
|
||||
<Tab value="curl">
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}}'
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="TypeScript">
|
||||
```typescript
|
||||
const response = await fetch(
|
||||
`https://www.sim.ai/api/workflows/${workflowId}/execute`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY!,
|
||||
},
|
||||
body: JSON.stringify({ inputs: {} }),
|
||||
}
|
||||
)
|
||||
|
||||
const data = await response.json()
|
||||
console.log(data.output)
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Python">
|
||||
```python
|
||||
import requests
|
||||
import os
|
||||
|
||||
response = requests.post(
|
||||
f"https://www.sim.ai/api/workflows/{workflow_id}/execute",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": os.environ["SIM_API_KEY"],
|
||||
},
|
||||
json={"inputs": {}},
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
print(data["output"])
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
## Sync vs Async Execution
|
||||
|
||||
By default, workflow executions are **synchronous** — the API blocks until the workflow completes and returns the result directly.
|
||||
|
||||
For long-running workflows, use **asynchronous execution** by passing `async: true`:
|
||||
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}, "async": true}'
|
||||
```
|
||||
|
||||
This returns immediately with a `taskId`:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"taskId": "job_abc123",
|
||||
"status": "queued"
|
||||
}
|
||||
```
|
||||
|
||||
Poll the [Get Job Status](/api-reference/workflows/getJobStatus) endpoint until the status is `completed` or `failed`:
|
||||
|
||||
```bash
|
||||
curl https://www.sim.ai/api/jobs/{taskId} \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
```
|
||||
|
||||
<Callout type="info">
|
||||
Job status transitions follow: `queued` → `processing` → `completed` or `failed`. The `output` field is only present when status is `completed`.
|
||||
</Callout>
|
||||
|
||||
## Response Format
|
||||
|
||||
Successful responses include an `output` object with your workflow results and a `limits` object with your current rate limit and usage status:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"output": {
|
||||
"result": "Hello, world!"
|
||||
},
|
||||
"limits": {
|
||||
"workflowExecutionRateLimit": {
|
||||
"sync": {
|
||||
"requestsPerMinute": 60,
|
||||
"maxBurst": 10,
|
||||
"remaining": 59,
|
||||
"resetAt": "2025-01-01T00:01:00Z"
|
||||
},
|
||||
"async": {
|
||||
"requestsPerMinute": 30,
|
||||
"maxBurst": 5,
|
||||
"remaining": 30,
|
||||
"resetAt": "2025-01-01T00:01:00Z"
|
||||
}
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": 1.25,
|
||||
"limit": 50.00,
|
||||
"plan": "pro",
|
||||
"isExceeded": false
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The API uses standard HTTP status codes. Error responses include a human-readable `error` message:
|
||||
|
||||
```json
|
||||
{
|
||||
"error": "Workflow not found"
|
||||
}
|
||||
```
|
||||
|
||||
| Status | Meaning | What to do |
|
||||
| --- | --- | --- |
|
||||
| `400` | Invalid request parameters | Check the `details` array for specific field errors |
|
||||
| `401` | Missing or invalid API key | Verify your `X-API-Key` header |
|
||||
| `403` | Access denied | Check you have permission for this resource |
|
||||
| `404` | Resource not found | Verify the ID exists and belongs to your workspace |
|
||||
| `429` | Rate limit exceeded | Wait for the duration in the `Retry-After` header |
|
||||
|
||||
<Callout type="info">
|
||||
Use the [Get Usage Limits](/api-reference/usage/getUsageLimits) endpoint to check your current rate limit status and billing usage at any time.
|
||||
</Callout>
|
||||
|
||||
## Rate Limits
|
||||
|
||||
Rate limits depend on your subscription plan and apply separately to synchronous and asynchronous executions. Every execution response includes a `limits` object showing your current rate limit status.
|
||||
|
||||
When rate limited, the API returns a `429` response with a `Retry-After` header indicating how many seconds to wait before retrying.
|
||||
|
||||
## Pagination
|
||||
|
||||
List endpoints (workflows, logs, audit logs) use **cursor-based pagination**:
|
||||
|
||||
```bash
|
||||
# First page
|
||||
curl "https://www.sim.ai/api/v1/logs?limit=20" \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
|
||||
# Next page — use the nextCursor from the previous response
|
||||
curl "https://www.sim.ai/api/v1/logs?limit=20&cursor=abc123" \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
```
|
||||
|
||||
The response includes a `nextCursor` field. When `nextCursor` is absent or `null`, you have reached the last page.
|
||||
16
apps/docs/content/docs/fr/api-reference/meta.json
Normal file
16
apps/docs/content/docs/fr/api-reference/meta.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"title": "API Reference",
|
||||
"root": true,
|
||||
"pages": [
|
||||
"getting-started",
|
||||
"authentication",
|
||||
"---SDKs---",
|
||||
"python",
|
||||
"typescript",
|
||||
"---Endpoints---",
|
||||
"(generated)/workflows",
|
||||
"(generated)/logs",
|
||||
"(generated)/usage",
|
||||
"(generated)/audit-logs"
|
||||
]
|
||||
}
|
||||
766
apps/docs/content/docs/fr/api-reference/python.mdx
Normal file
766
apps/docs/content/docs/fr/api-reference/python.mdx
Normal file
@@ -0,0 +1,766 @@
|
||||
---
|
||||
title: Python
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
Le SDK Python officiel pour Sim vous permet d'exécuter des workflows de manière programmatique à partir de vos applications Python en utilisant le SDK Python officiel.
|
||||
|
||||
<Callout type="info">
|
||||
Le SDK Python prend en charge Python 3.8+ avec support d'exécution asynchrone, limitation automatique du débit avec backoff exponentiel, et suivi d'utilisation.
|
||||
</Callout>
|
||||
|
||||
## Installation
|
||||
|
||||
Installez le SDK en utilisant pip :
|
||||
|
||||
```bash
|
||||
pip install simstudio-sdk
|
||||
```
|
||||
|
||||
## Démarrage rapide
|
||||
|
||||
Voici un exemple simple pour commencer :
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Initialize the client
|
||||
client = SimStudioClient(
|
||||
api_key="your-api-key-here",
|
||||
base_url="https://sim.ai" # optional, defaults to https://sim.ai
|
||||
)
|
||||
|
||||
# Execute a workflow
|
||||
try:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Workflow executed successfully:", result)
|
||||
except Exception as error:
|
||||
print("Workflow execution failed:", error)
|
||||
```
|
||||
|
||||
## Référence de l'API
|
||||
|
||||
### SimStudioClient
|
||||
|
||||
#### Constructeur
|
||||
|
||||
```python
|
||||
SimStudioClient(api_key: str, base_url: str = "https://sim.ai")
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `api_key` (str) : Votre clé API Sim
|
||||
- `base_url` (str, facultatif) : URL de base pour l'API Sim
|
||||
|
||||
#### Méthodes
|
||||
|
||||
##### execute_workflow()
|
||||
|
||||
Exécuter un workflow avec des données d'entrée facultatives.
|
||||
|
||||
```python
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Hello, world!"},
|
||||
timeout=30.0 # 30 seconds
|
||||
)
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `workflow_id` (str) : L'identifiant du workflow à exécuter
|
||||
- `input_data` (dict, facultatif) : Données d'entrée à transmettre au workflow
|
||||
- `timeout` (float, facultatif) : Délai d'expiration en secondes (par défaut : 30.0)
|
||||
- `stream` (bool, facultatif) : Activer les réponses en streaming (par défaut : False)
|
||||
- `selected_outputs` (list[str], facultatif) : Sorties de blocs à diffuser au format `blockName.attribute` (par exemple, `["agent1.content"]`)
|
||||
- `async_execution` (bool, facultatif) : Exécuter de manière asynchrone (par défaut : False)
|
||||
|
||||
**Retourne :** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
Lorsque `async_execution=True`, retourne immédiatement un identifiant de tâche pour l'interrogation. Sinon, attend la fin de l'exécution.
|
||||
|
||||
##### get_workflow_status()
|
||||
|
||||
Obtenir le statut d'un workflow (statut de déploiement, etc.).
|
||||
|
||||
```python
|
||||
status = client.get_workflow_status("workflow-id")
|
||||
print("Is deployed:", status.is_deployed)
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `workflow_id` (str) : L'identifiant du workflow
|
||||
|
||||
**Retourne :** `WorkflowStatus`
|
||||
|
||||
##### validate_workflow()
|
||||
|
||||
Valider qu'un workflow est prêt pour l'exécution.
|
||||
|
||||
```python
|
||||
is_ready = client.validate_workflow("workflow-id")
|
||||
if is_ready:
|
||||
# Workflow is deployed and ready
|
||||
pass
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `workflow_id` (str) : L'identifiant du workflow
|
||||
|
||||
**Retourne :** `bool`
|
||||
|
||||
##### get_job_status()
|
||||
|
||||
Obtenir le statut d'une exécution de tâche asynchrone.
|
||||
|
||||
```python
|
||||
status = client.get_job_status("task-id-from-async-execution")
|
||||
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
|
||||
if status["status"] == "completed":
|
||||
print("Output:", status["output"])
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `task_id` (str) : L'identifiant de tâche retourné par l'exécution asynchrone
|
||||
|
||||
**Retourne :** `Dict[str, Any]`
|
||||
|
||||
**Champs de réponse :**
|
||||
- `success` (bool) : Si la requête a réussi
|
||||
- `taskId` (str) : L'identifiant de la tâche
|
||||
- `status` (str) : L'un des états suivants : `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (dict) : Contient `startedAt`, `completedAt`, et `duration`
|
||||
- `output` (any, facultatif) : La sortie du workflow (une fois terminé)
|
||||
- `error` (any, facultatif) : Détails de l'erreur (en cas d'échec)
|
||||
- `estimatedDuration` (int, facultatif) : Durée estimée en millisecondes (lors du traitement/mise en file d'attente)
|
||||
|
||||
##### execute_with_retry()
|
||||
|
||||
Exécuter un workflow avec réessai automatique en cas d'erreurs de limitation de débit, en utilisant un backoff exponentiel.
|
||||
|
||||
```python
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Hello"},
|
||||
timeout=30.0,
|
||||
max_retries=3, # Maximum number of retries
|
||||
initial_delay=1.0, # Initial delay in seconds
|
||||
max_delay=30.0, # Maximum delay in seconds
|
||||
backoff_multiplier=2.0 # Exponential backoff multiplier
|
||||
)
|
||||
```
|
||||
|
||||
**Paramètres :**
|
||||
- `workflow_id` (str) : L'identifiant du workflow à exécuter
|
||||
- `input_data` (dict, facultatif) : Données d'entrée à transmettre au workflow
|
||||
- `timeout` (float, facultatif) : Délai d'expiration en secondes
|
||||
- `stream` (bool, facultatif) : Activer les réponses en streaming
|
||||
- `selected_outputs` (list, facultatif) : Sorties de blocs à diffuser
|
||||
- `async_execution` (bool, facultatif) : Exécuter de manière asynchrone
|
||||
- `max_retries` (int, facultatif) : Nombre maximum de tentatives (par défaut : 3)
|
||||
- `initial_delay` (float, facultatif) : Délai initial en secondes (par défaut : 1.0)
|
||||
- `max_delay` (float, facultatif) : Délai maximum en secondes (par défaut : 30.0)
|
||||
- `backoff_multiplier` (float, facultatif) : Multiplicateur de backoff (par défaut : 2.0)
|
||||
|
||||
**Retourne :** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
La logique de nouvelle tentative utilise un backoff exponentiel (1s → 2s → 4s → 8s...) avec une variation aléatoire de ±25% pour éviter l'effet de horde. Si l'API fournit un en-tête `retry-after`, celui-ci sera utilisé à la place.
|
||||
|
||||
##### get_rate_limit_info()
|
||||
|
||||
Obtenir les informations actuelles sur les limites de débit à partir de la dernière réponse de l'API.
|
||||
|
||||
```python
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
print("Limit:", rate_limit_info.limit)
|
||||
print("Remaining:", rate_limit_info.remaining)
|
||||
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
|
||||
```
|
||||
|
||||
**Retourne :** `RateLimitInfo | None`
|
||||
|
||||
##### get_usage_limits()
|
||||
|
||||
Obtenir les limites d'utilisation actuelles et les informations de quota pour votre compte.
|
||||
|
||||
```python
|
||||
limits = client.get_usage_limits()
|
||||
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
|
||||
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
|
||||
print("Current period cost:", limits.usage["currentPeriodCost"])
|
||||
print("Plan:", limits.usage["plan"])
|
||||
```
|
||||
|
||||
**Retourne :** `UsageLimits`
|
||||
|
||||
**Structure de la réponse :**
|
||||
|
||||
```python
|
||||
{
|
||||
"success": bool,
|
||||
"rateLimit": {
|
||||
"sync": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"async": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"authType": str # 'api' or 'manual'
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": float,
|
||||
"limit": float,
|
||||
"plan": str # e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### set_api_key()
|
||||
|
||||
Mettre à jour la clé API.
|
||||
|
||||
```python
|
||||
client.set_api_key("new-api-key")
|
||||
```
|
||||
|
||||
##### set_base_url()
|
||||
|
||||
Mettre à jour l'URL de base.
|
||||
|
||||
```python
|
||||
client.set_base_url("https://my-custom-domain.com")
|
||||
```
|
||||
|
||||
##### close()
|
||||
|
||||
Fermer la session HTTP sous-jacente.
|
||||
|
||||
```python
|
||||
client.close()
|
||||
```
|
||||
|
||||
## Classes de données
|
||||
|
||||
### WorkflowExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class WorkflowExecutionResult:
|
||||
success: bool
|
||||
output: Optional[Any] = None
|
||||
error: Optional[str] = None
|
||||
logs: Optional[List[Any]] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
trace_spans: Optional[List[Any]] = None
|
||||
total_duration: Optional[float] = None
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class AsyncExecutionResult:
|
||||
success: bool
|
||||
task_id: str
|
||||
status: str # 'queued'
|
||||
created_at: str
|
||||
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class WorkflowStatus:
|
||||
is_deployed: bool
|
||||
deployed_at: Optional[str] = None
|
||||
needs_redeployment: bool = False
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class RateLimitInfo:
|
||||
limit: int
|
||||
remaining: int
|
||||
reset: int
|
||||
retry_after: Optional[int] = None
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class UsageLimits:
|
||||
success: bool
|
||||
rate_limit: Dict[str, Any]
|
||||
usage: Dict[str, Any]
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```python
|
||||
class SimStudioError(Exception):
|
||||
def __init__(self, message: str, code: Optional[str] = None, status: Optional[int] = None):
|
||||
super().__init__(message)
|
||||
self.code = code
|
||||
self.status = status
|
||||
```
|
||||
|
||||
**Codes d'erreur courants :**
|
||||
- `UNAUTHORIZED` : Clé API invalide
|
||||
- `TIMEOUT` : Délai d'attente de la requête dépassé
|
||||
- `RATE_LIMIT_EXCEEDED` : Limite de débit dépassée
|
||||
- `USAGE_LIMIT_EXCEEDED` : Limite d'utilisation dépassée
|
||||
- `EXECUTION_ERROR` : Échec de l'exécution du workflow
|
||||
|
||||
## Exemples
|
||||
|
||||
### Exécution basique d'un workflow
|
||||
|
||||
<Steps>
|
||||
<Step title="Initialiser le client">
|
||||
Configurez le SimStudioClient avec votre clé API.
|
||||
</Step>
|
||||
<Step title="Valider le workflow">
|
||||
Vérifiez si le workflow est déployé et prêt pour l'exécution.
|
||||
</Step>
|
||||
<Step title="Exécuter le workflow">
|
||||
Lancez le workflow avec vos données d'entrée.
|
||||
</Step>
|
||||
<Step title="Gérer le résultat">
|
||||
Traitez le résultat de l'exécution et gérez les éventuelles erreurs.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def run_workflow():
|
||||
try:
|
||||
# Check if workflow is ready
|
||||
is_ready = client.validate_workflow("my-workflow-id")
|
||||
if not is_ready:
|
||||
raise Exception("Workflow is not deployed or ready")
|
||||
|
||||
# Execute the workflow
|
||||
result = client.execute_workflow(
|
||||
"my-workflow-id",
|
||||
input_data={
|
||||
"message": "Process this data",
|
||||
"user_id": "12345"
|
||||
}
|
||||
)
|
||||
|
||||
if result.success:
|
||||
print("Output:", result.output)
|
||||
print("Duration:", result.metadata.get("duration") if result.metadata else None)
|
||||
else:
|
||||
print("Workflow failed:", result.error)
|
||||
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
run_workflow()
|
||||
```
|
||||
|
||||
### Gestion des erreurs
|
||||
|
||||
Gérez différents types d'erreurs qui peuvent survenir pendant l'exécution du workflow :
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_error_handling():
|
||||
try:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
return result
|
||||
except SimStudioError as error:
|
||||
if error.code == "UNAUTHORIZED":
|
||||
print("Invalid API key")
|
||||
elif error.code == "TIMEOUT":
|
||||
print("Workflow execution timed out")
|
||||
elif error.code == "USAGE_LIMIT_EXCEEDED":
|
||||
print("Usage limit exceeded")
|
||||
elif error.code == "INVALID_JSON":
|
||||
print("Invalid JSON in request body")
|
||||
else:
|
||||
print(f"Workflow error: {error}")
|
||||
raise
|
||||
except Exception as error:
|
||||
print(f"Unexpected error: {error}")
|
||||
raise
|
||||
```
|
||||
|
||||
### Utilisation du gestionnaire de contexte
|
||||
|
||||
Utilisez le client comme gestionnaire de contexte pour gérer automatiquement le nettoyage des ressources :
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
# Using context manager to automatically close the session
|
||||
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Result:", result)
|
||||
# Session is automatically closed here
|
||||
```
|
||||
|
||||
### Exécution de workflows par lots
|
||||
|
||||
Exécutez plusieurs workflows efficacement :
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_workflows_batch(workflow_data_pairs):
|
||||
"""Execute multiple workflows with different input data."""
|
||||
results = []
|
||||
|
||||
for workflow_id, input_data in workflow_data_pairs:
|
||||
try:
|
||||
# Validate workflow before execution
|
||||
if not client.validate_workflow(workflow_id):
|
||||
print(f"Skipping {workflow_id}: not deployed")
|
||||
continue
|
||||
|
||||
result = client.execute_workflow(workflow_id, input_data)
|
||||
results.append({
|
||||
"workflow_id": workflow_id,
|
||||
"success": result.success,
|
||||
"output": result.output,
|
||||
"error": result.error
|
||||
})
|
||||
|
||||
except Exception as error:
|
||||
results.append({
|
||||
"workflow_id": workflow_id,
|
||||
"success": False,
|
||||
"error": str(error)
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
# Example usage
|
||||
workflows = [
|
||||
("workflow-1", {"type": "analysis", "data": "sample1"}),
|
||||
("workflow-2", {"type": "processing", "data": "sample2"}),
|
||||
]
|
||||
|
||||
results = execute_workflows_batch(workflows)
|
||||
for result in results:
|
||||
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
|
||||
```
|
||||
|
||||
### Exécution asynchrone de workflow
|
||||
|
||||
Exécutez des workflows de manière asynchrone pour les tâches de longue durée :
|
||||
|
||||
```python
|
||||
import os
|
||||
import time
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_async():
|
||||
try:
|
||||
# Start async execution
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"data": "large dataset"},
|
||||
async_execution=True # Execute asynchronously
|
||||
)
|
||||
|
||||
# Check if result is an async execution
|
||||
if hasattr(result, 'task_id'):
|
||||
print(f"Task ID: {result.task_id}")
|
||||
print(f"Status endpoint: {result.links['status']}")
|
||||
|
||||
# Poll for completion
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
while status["status"] in ["queued", "processing"]:
|
||||
print(f"Current status: {status['status']}")
|
||||
time.sleep(2) # Wait 2 seconds
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
if status["status"] == "completed":
|
||||
print("Workflow completed!")
|
||||
print(f"Output: {status['output']}")
|
||||
print(f"Duration: {status['metadata']['duration']}")
|
||||
else:
|
||||
print(f"Workflow failed: {status['error']}")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error: {error}")
|
||||
|
||||
execute_async()
|
||||
```
|
||||
|
||||
### Limitation de débit et nouvelle tentative
|
||||
|
||||
Gérez les limites de débit automatiquement avec un retrait exponentiel :
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_retry_handling():
|
||||
try:
|
||||
# Automatically retries on rate limit
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Process this"},
|
||||
max_retries=5,
|
||||
initial_delay=1.0,
|
||||
max_delay=60.0,
|
||||
backoff_multiplier=2.0
|
||||
)
|
||||
|
||||
print(f"Success: {result}")
|
||||
except SimStudioError as error:
|
||||
if error.code == "RATE_LIMIT_EXCEEDED":
|
||||
print("Rate limit exceeded after all retries")
|
||||
|
||||
# Check rate limit info
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
from datetime import datetime
|
||||
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
|
||||
print(f"Rate limit resets at: {reset_time}")
|
||||
|
||||
execute_with_retry_handling()
|
||||
```
|
||||
|
||||
### Surveillance de l'utilisation
|
||||
|
||||
Surveillez l'utilisation et les limites de votre compte :
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def check_usage():
|
||||
try:
|
||||
limits = client.get_usage_limits()
|
||||
|
||||
print("=== Rate Limits ===")
|
||||
print("Sync requests:")
|
||||
print(f" Limit: {limits.rate_limit['sync']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
|
||||
|
||||
print("\nAsync requests:")
|
||||
print(f" Limit: {limits.rate_limit['async']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
|
||||
|
||||
print("\n=== Usage ===")
|
||||
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
|
||||
print(f"Limit: ${limits.usage['limit']:.2f}")
|
||||
print(f"Plan: {limits.usage['plan']}")
|
||||
|
||||
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
|
||||
print(f"Usage: {percent_used:.1f}%")
|
||||
|
||||
if percent_used > 80:
|
||||
print("⚠️ Warning: You are approaching your usage limit!")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error checking usage: {error}")
|
||||
|
||||
check_usage()
|
||||
```
|
||||
|
||||
### Exécution de workflow en streaming
|
||||
|
||||
Exécutez des workflows avec des réponses en streaming en temps réel :
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_streaming():
|
||||
"""Execute workflow with streaming enabled."""
|
||||
try:
|
||||
# Enable streaming for specific block outputs
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Count to five"},
|
||||
stream=True,
|
||||
selected_outputs=["agent1.content"] # Use blockName.attribute format
|
||||
)
|
||||
|
||||
print("Workflow result:", result)
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
execute_with_streaming()
|
||||
```
|
||||
|
||||
La réponse en streaming suit le format Server-Sent Events (SSE) :
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**Exemple de streaming avec Flask :**
|
||||
|
||||
```python
|
||||
from flask import Flask, Response, stream_with_context
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/stream-workflow')
|
||||
def stream_workflow():
|
||||
"""Stream workflow execution to the client."""
|
||||
|
||||
def generate():
|
||||
response = requests.post(
|
||||
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': os.getenv('SIM_API_KEY')
|
||||
},
|
||||
json={
|
||||
'message': 'Generate a story',
|
||||
'stream': True,
|
||||
'selectedOutputs': ['agent1.content']
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
data = decoded_line[6:] # Remove 'data: ' prefix
|
||||
|
||||
if data == '[DONE]':
|
||||
break
|
||||
|
||||
try:
|
||||
parsed = json.loads(data)
|
||||
if 'chunk' in parsed:
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
elif parsed.get('event') == 'done':
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
print("Execution complete:", parsed.get('metadata'))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
```
|
||||
|
||||
### Configuration de l'environnement
|
||||
|
||||
Configurez le client en utilisant des variables d'environnement :
|
||||
|
||||
<Tabs items={['Development', 'Production']}>
|
||||
<Tab value="Development">
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Development configuration
|
||||
client = SimStudioClient(
|
||||
api_key=os.getenv("SIM_API_KEY")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
</Tab>
|
||||
<Tab value="Production">
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Production configuration with error handling
|
||||
api_key = os.getenv("SIM_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("SIM_API_KEY environment variable is required")
|
||||
|
||||
client = SimStudioClient(
|
||||
api_key=api_key,
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Obtention de votre clé API
|
||||
|
||||
<Steps>
|
||||
<Step title="Connectez-vous à Sim">
|
||||
Accédez à [Sim](https://sim.ai) et connectez-vous à votre compte.
|
||||
</Step>
|
||||
<Step title="Ouvrez votre workflow">
|
||||
Accédez au workflow que vous souhaitez exécuter par programmation.
|
||||
</Step>
|
||||
<Step title="Déployez votre workflow">
|
||||
Cliquez sur "Déployer" pour déployer votre workflow s'il n'a pas encore été déployé.
|
||||
</Step>
|
||||
<Step title="Créez ou sélectionnez une clé API">
|
||||
Pendant le processus de déploiement, sélectionnez ou créez une clé API.
|
||||
</Step>
|
||||
<Step title="Copiez la clé API">
|
||||
Copiez la clé API pour l'utiliser dans votre application Python.
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## Prérequis
|
||||
|
||||
- Python 3.8+
|
||||
- requests >= 2.25.0
|
||||
|
||||
## Licence
|
||||
|
||||
Apache-2.0
|
||||
1052
apps/docs/content/docs/fr/api-reference/typescript.mdx
Normal file
1052
apps/docs/content/docs/fr/api-reference/typescript.mdx
Normal file
File diff suppressed because it is too large
Load Diff
24
apps/docs/content/docs/fr/meta.json
Normal file
24
apps/docs/content/docs/fr/meta.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"title": "Sim Documentation",
|
||||
"pages": [
|
||||
"./introduction/index",
|
||||
"./getting-started/index",
|
||||
"./quick-reference/index",
|
||||
"triggers",
|
||||
"blocks",
|
||||
"tools",
|
||||
"connections",
|
||||
"mcp",
|
||||
"copilot",
|
||||
"skills",
|
||||
"knowledgebase",
|
||||
"variables",
|
||||
"credentials",
|
||||
"execution",
|
||||
"permissions",
|
||||
"self-hosting",
|
||||
"./enterprise/index",
|
||||
"./keyboard-shortcuts/index"
|
||||
],
|
||||
"defaultOpen": false
|
||||
}
|
||||
94
apps/docs/content/docs/ja/api-reference/authentication.mdx
Normal file
94
apps/docs/content/docs/ja/api-reference/authentication.mdx
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
title: Authentication
|
||||
description: API key types, generation, and how to authenticate requests
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
To access the Sim API, you need an API key. Sim supports two types of API keys — **personal keys** and **workspace keys** — each with different billing and access behaviors.
|
||||
|
||||
## Key Types
|
||||
|
||||
| | **Personal Keys** | **Workspace Keys** |
|
||||
| --- | --- | --- |
|
||||
| **Billed to** | Your individual account | Workspace owner |
|
||||
| **Scope** | Across workspaces you have access to | Shared across the workspace |
|
||||
| **Managed by** | Each user individually | Workspace admins |
|
||||
| **Permissions** | Must be enabled at workspace level | Require admin permissions |
|
||||
|
||||
<Callout type="info">
|
||||
Workspace admins can disable personal API key usage for their workspace. If disabled, only workspace keys can be used.
|
||||
</Callout>
|
||||
|
||||
## Generating API Keys
|
||||
|
||||
To generate a key, open the Sim dashboard and navigate to **Settings**, then go to **Sim Keys** and click **Create**.
|
||||
|
||||
<Callout type="warn">
|
||||
API keys are only shown once when generated. Store your key securely — you will not be able to view it again.
|
||||
</Callout>
|
||||
|
||||
## Using API Keys
|
||||
|
||||
Pass your API key in the `X-API-Key` header with every request:
|
||||
|
||||
<Tabs items={['curl', 'TypeScript', 'Python']}>
|
||||
<Tab value="curl">
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}}'
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="TypeScript">
|
||||
```typescript
|
||||
const response = await fetch(
|
||||
'https://www.sim.ai/api/workflows/{workflowId}/execute',
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY!,
|
||||
},
|
||||
body: JSON.stringify({ inputs: {} }),
|
||||
}
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Python">
|
||||
```python
|
||||
import requests
|
||||
|
||||
response = requests.post(
|
||||
"https://www.sim.ai/api/workflows/{workflowId}/execute",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": os.environ["SIM_API_KEY"],
|
||||
},
|
||||
json={"inputs": {}},
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Where Keys Are Used
|
||||
|
||||
API keys authenticate access to:
|
||||
|
||||
- **Workflow execution** — run deployed workflows via the API
|
||||
- **Logs API** — query workflow execution logs and metrics
|
||||
- **MCP servers** — authenticate connections to deployed MCP servers
|
||||
- **SDKs** — the [Python](/api-reference/python) and [TypeScript](/api-reference/typescript) SDKs use API keys for all operations
|
||||
|
||||
## Security
|
||||
|
||||
- Keys use the `sk-sim-` prefix and are encrypted at rest
|
||||
- Keys can be revoked at any time from the dashboard
|
||||
- Use environment variables to store keys — never hardcode them in source code
|
||||
- For browser-based applications, use a backend proxy to avoid exposing keys to the client
|
||||
|
||||
<Callout type="warn">
|
||||
Never expose your API key in client-side code. Use a server-side proxy to make authenticated requests on behalf of your frontend.
|
||||
</Callout>
|
||||
210
apps/docs/content/docs/ja/api-reference/getting-started.mdx
Normal file
210
apps/docs/content/docs/ja/api-reference/getting-started.mdx
Normal file
@@ -0,0 +1,210 @@
|
||||
---
|
||||
title: Getting Started
|
||||
description: Base URL, first API call, response format, error handling, and pagination
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
|
||||
## Base URL
|
||||
|
||||
All API requests are made to:
|
||||
|
||||
```
|
||||
https://www.sim.ai
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
<Steps>
|
||||
|
||||
<Step>
|
||||
### Get your API key
|
||||
|
||||
Go to the Sim platform and navigate to **Settings**, then go to **Sim Keys** and click **Create**. See [Authentication](/api-reference/authentication) for details on key types.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Find your workflow ID
|
||||
|
||||
Open a workflow in the Sim editor. The workflow ID is in the URL:
|
||||
|
||||
```
|
||||
https://www.sim.ai/workspace/{workspaceId}/w/{workflowId}
|
||||
```
|
||||
|
||||
You can also use the [List Workflows](/api-reference/workflows/listWorkflows) endpoint to get all workflow IDs in a workspace.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Deploy your workflow
|
||||
|
||||
A workflow must be deployed before it can be executed via the API. Click the **Deploy** button in the editor toolbar, or use the dashboard to manage deployments.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Make your first request
|
||||
|
||||
<Tabs items={['curl', 'TypeScript', 'Python']}>
|
||||
<Tab value="curl">
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}}'
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="TypeScript">
|
||||
```typescript
|
||||
const response = await fetch(
|
||||
`https://www.sim.ai/api/workflows/${workflowId}/execute`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY!,
|
||||
},
|
||||
body: JSON.stringify({ inputs: {} }),
|
||||
}
|
||||
)
|
||||
|
||||
const data = await response.json()
|
||||
console.log(data.output)
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Python">
|
||||
```python
|
||||
import requests
|
||||
import os
|
||||
|
||||
response = requests.post(
|
||||
f"https://www.sim.ai/api/workflows/{workflow_id}/execute",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": os.environ["SIM_API_KEY"],
|
||||
},
|
||||
json={"inputs": {}},
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
print(data["output"])
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
## Sync vs Async Execution
|
||||
|
||||
By default, workflow executions are **synchronous** — the API blocks until the workflow completes and returns the result directly.
|
||||
|
||||
For long-running workflows, use **asynchronous execution** by passing `async: true`:
|
||||
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}, "async": true}'
|
||||
```
|
||||
|
||||
This returns immediately with a `taskId`:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"taskId": "job_abc123",
|
||||
"status": "queued"
|
||||
}
|
||||
```
|
||||
|
||||
Poll the [Get Job Status](/api-reference/workflows/getJobStatus) endpoint until the status is `completed` or `failed`:
|
||||
|
||||
```bash
|
||||
curl https://www.sim.ai/api/jobs/{taskId} \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
```
|
||||
|
||||
<Callout type="info">
|
||||
Job status transitions follow: `queued` → `processing` → `completed` or `failed`. The `output` field is only present when status is `completed`.
|
||||
</Callout>
|
||||
|
||||
## Response Format
|
||||
|
||||
Successful responses include an `output` object with your workflow results and a `limits` object with your current rate limit and usage status:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"output": {
|
||||
"result": "Hello, world!"
|
||||
},
|
||||
"limits": {
|
||||
"workflowExecutionRateLimit": {
|
||||
"sync": {
|
||||
"requestsPerMinute": 60,
|
||||
"maxBurst": 10,
|
||||
"remaining": 59,
|
||||
"resetAt": "2025-01-01T00:01:00Z"
|
||||
},
|
||||
"async": {
|
||||
"requestsPerMinute": 30,
|
||||
"maxBurst": 5,
|
||||
"remaining": 30,
|
||||
"resetAt": "2025-01-01T00:01:00Z"
|
||||
}
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": 1.25,
|
||||
"limit": 50.00,
|
||||
"plan": "pro",
|
||||
"isExceeded": false
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The API uses standard HTTP status codes. Error responses include a human-readable `error` message:
|
||||
|
||||
```json
|
||||
{
|
||||
"error": "Workflow not found"
|
||||
}
|
||||
```
|
||||
|
||||
| Status | Meaning | What to do |
|
||||
| --- | --- | --- |
|
||||
| `400` | Invalid request parameters | Check the `details` array for specific field errors |
|
||||
| `401` | Missing or invalid API key | Verify your `X-API-Key` header |
|
||||
| `403` | Access denied | Check you have permission for this resource |
|
||||
| `404` | Resource not found | Verify the ID exists and belongs to your workspace |
|
||||
| `429` | Rate limit exceeded | Wait for the duration in the `Retry-After` header |
|
||||
|
||||
<Callout type="info">
|
||||
Use the [Get Usage Limits](/api-reference/usage/getUsageLimits) endpoint to check your current rate limit status and billing usage at any time.
|
||||
</Callout>
|
||||
|
||||
## Rate Limits
|
||||
|
||||
Rate limits depend on your subscription plan and apply separately to synchronous and asynchronous executions. Every execution response includes a `limits` object showing your current rate limit status.
|
||||
|
||||
When rate limited, the API returns a `429` response with a `Retry-After` header indicating how many seconds to wait before retrying.
|
||||
|
||||
## Pagination
|
||||
|
||||
List endpoints (workflows, logs, audit logs) use **cursor-based pagination**:
|
||||
|
||||
```bash
|
||||
# First page
|
||||
curl "https://www.sim.ai/api/v1/logs?limit=20" \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
|
||||
# Next page — use the nextCursor from the previous response
|
||||
curl "https://www.sim.ai/api/v1/logs?limit=20&cursor=abc123" \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
```
|
||||
|
||||
The response includes a `nextCursor` field. When `nextCursor` is absent or `null`, you have reached the last page.
|
||||
16
apps/docs/content/docs/ja/api-reference/meta.json
Normal file
16
apps/docs/content/docs/ja/api-reference/meta.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"title": "API Reference",
|
||||
"root": true,
|
||||
"pages": [
|
||||
"getting-started",
|
||||
"authentication",
|
||||
"---SDKs---",
|
||||
"python",
|
||||
"typescript",
|
||||
"---Endpoints---",
|
||||
"(generated)/workflows",
|
||||
"(generated)/logs",
|
||||
"(generated)/usage",
|
||||
"(generated)/audit-logs"
|
||||
]
|
||||
}
|
||||
766
apps/docs/content/docs/ja/api-reference/python.mdx
Normal file
766
apps/docs/content/docs/ja/api-reference/python.mdx
Normal file
@@ -0,0 +1,766 @@
|
||||
---
|
||||
title: Python
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
Simの公式Python SDKを使用すると、公式Python SDKを使用してPythonアプリケーションからプログラムでワークフローを実行できます。
|
||||
|
||||
<Callout type="info">
|
||||
Python SDKはPython 3.8以上をサポートし、非同期実行、指数バックオフによる自動レート制限、使用状況追跡機能を提供します。
|
||||
</Callout>
|
||||
|
||||
## インストール
|
||||
|
||||
pipを使用してSDKをインストールします:
|
||||
|
||||
```bash
|
||||
pip install simstudio-sdk
|
||||
```
|
||||
|
||||
## クイックスタート
|
||||
|
||||
以下は、始めるための簡単な例です:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Initialize the client
|
||||
client = SimStudioClient(
|
||||
api_key="your-api-key-here",
|
||||
base_url="https://sim.ai" # optional, defaults to https://sim.ai
|
||||
)
|
||||
|
||||
# Execute a workflow
|
||||
try:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Workflow executed successfully:", result)
|
||||
except Exception as error:
|
||||
print("Workflow execution failed:", error)
|
||||
```
|
||||
|
||||
## APIリファレンス
|
||||
|
||||
### SimStudioClient
|
||||
|
||||
#### コンストラクタ
|
||||
|
||||
```python
|
||||
SimStudioClient(api_key: str, base_url: str = "https://sim.ai")
|
||||
```
|
||||
|
||||
**パラメータ:**
|
||||
- `api_key` (str): SimのAPIキー
|
||||
- `base_url` (str, オプション): Sim APIのベースURL
|
||||
|
||||
#### メソッド
|
||||
|
||||
##### execute_workflow()
|
||||
|
||||
オプションの入力データでワークフローを実行します。
|
||||
|
||||
```python
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Hello, world!"},
|
||||
timeout=30.0 # 30 seconds
|
||||
)
|
||||
```
|
||||
|
||||
**パラメータ:**
|
||||
- `workflow_id` (str): 実行するワークフローのID
|
||||
- `input_data` (dict, オプション): ワークフローに渡す入力データ
|
||||
- `timeout` (float, オプション): タイムアウト(秒)(デフォルト: 30.0)
|
||||
- `stream` (bool, オプション): ストリーミングレスポンスを有効にする(デフォルト: False)
|
||||
- `selected_outputs` (list[str], オプション): `blockName.attribute`形式でストリーミングするブロック出力(例: `["agent1.content"]`)
|
||||
- `async_execution` (bool, オプション): 非同期実行(デフォルト: False)
|
||||
|
||||
**戻り値:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
`async_execution=True`の場合、ポーリング用のタスクIDをすぐに返します。それ以外の場合は、完了を待ちます。
|
||||
|
||||
##### get_workflow_status()
|
||||
|
||||
ワークフローのステータス(デプロイメントステータスなど)を取得します。
|
||||
|
||||
```python
|
||||
status = client.get_workflow_status("workflow-id")
|
||||
print("Is deployed:", status.is_deployed)
|
||||
```
|
||||
|
||||
**パラメータ:**
|
||||
- `workflow_id` (str): ワークフローのID
|
||||
|
||||
**戻り値:** `WorkflowStatus`
|
||||
|
||||
##### validate_workflow()
|
||||
|
||||
ワークフローが実行準備ができているかを検証します。
|
||||
|
||||
```python
|
||||
is_ready = client.validate_workflow("workflow-id")
|
||||
if is_ready:
|
||||
# Workflow is deployed and ready
|
||||
pass
|
||||
```
|
||||
|
||||
**パラメータ:**
|
||||
- `workflow_id` (str): ワークフローのID
|
||||
|
||||
**戻り値:** `bool`
|
||||
|
||||
##### get_job_status()
|
||||
|
||||
非同期ジョブ実行のステータスを取得します。
|
||||
|
||||
```python
|
||||
status = client.get_job_status("task-id-from-async-execution")
|
||||
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
|
||||
if status["status"] == "completed":
|
||||
print("Output:", status["output"])
|
||||
```
|
||||
|
||||
**パラメータ:**
|
||||
- `task_id` (str): 非同期実行から返されたタスクID
|
||||
|
||||
**戻り値:** `Dict[str, Any]`
|
||||
|
||||
**レスポンスフィールド:**
|
||||
- `success` (bool): リクエストが成功したかどうか
|
||||
- `taskId` (str): タスクID
|
||||
- `status` (str): 次のいずれか: `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (dict): `startedAt`, `completedAt`, `duration`を含む
|
||||
- `output` (any, オプション): ワークフロー出力(完了時)
|
||||
- `error` (any, オプション): エラー詳細(失敗時)
|
||||
- `estimatedDuration` (int, オプション): 推定所要時間(ミリ秒)(処理中/キュー時)
|
||||
|
||||
##### execute_with_retry()
|
||||
|
||||
指数バックオフを使用してレート制限エラーで自動的に再試行するワークフロー実行。
|
||||
|
||||
```python
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Hello"},
|
||||
timeout=30.0,
|
||||
max_retries=3, # Maximum number of retries
|
||||
initial_delay=1.0, # Initial delay in seconds
|
||||
max_delay=30.0, # Maximum delay in seconds
|
||||
backoff_multiplier=2.0 # Exponential backoff multiplier
|
||||
)
|
||||
```
|
||||
|
||||
**パラメータ:**
|
||||
- `workflow_id` (str): 実行するワークフローのID
|
||||
- `input_data` (dict, オプション): ワークフローに渡す入力データ
|
||||
- `timeout` (float, オプション): タイムアウト(秒)
|
||||
- `stream` (bool, オプション): ストリーミングレスポンスを有効にする
|
||||
- `selected_outputs` (list, オプション): ストリーミングするブロック出力
|
||||
- `async_execution` (bool, オプション): 非同期実行
|
||||
- `max_retries` (int, オプション): 最大再試行回数(デフォルト: 3)
|
||||
- `initial_delay` (float, オプション): 初期遅延(秒)(デフォルト: 1.0)
|
||||
- `max_delay` (float, オプション): 最大遅延(秒)(デフォルト: 30.0)
|
||||
- `backoff_multiplier` (float, オプション): バックオフ乗数(デフォルト: 2.0)
|
||||
|
||||
**戻り値:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
リトライロジックは、サンダリングハード問題を防ぐために±25%のジッターを伴う指数バックオフ(1秒→2秒→4秒→8秒...)を使用します。APIが `retry-after` ヘッダーを提供する場合、代わりにそれが使用されます。
|
||||
|
||||
##### get_rate_limit_info()
|
||||
|
||||
最後のAPIレスポンスから現在のレート制限情報を取得します。
|
||||
|
||||
```python
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
print("Limit:", rate_limit_info.limit)
|
||||
print("Remaining:", rate_limit_info.remaining)
|
||||
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
|
||||
```
|
||||
|
||||
**戻り値:** `RateLimitInfo | None`
|
||||
|
||||
##### get_usage_limits()
|
||||
|
||||
アカウントの現在の使用制限とクォータ情報を取得します。
|
||||
|
||||
```python
|
||||
limits = client.get_usage_limits()
|
||||
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
|
||||
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
|
||||
print("Current period cost:", limits.usage["currentPeriodCost"])
|
||||
print("Plan:", limits.usage["plan"])
|
||||
```
|
||||
|
||||
**戻り値:** `UsageLimits`
|
||||
|
||||
**レスポンス構造:**
|
||||
|
||||
```python
|
||||
{
|
||||
"success": bool,
|
||||
"rateLimit": {
|
||||
"sync": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"async": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"authType": str # 'api' or 'manual'
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": float,
|
||||
"limit": float,
|
||||
"plan": str # e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### set_api_key()
|
||||
|
||||
APIキーを更新します。
|
||||
|
||||
```python
|
||||
client.set_api_key("new-api-key")
|
||||
```
|
||||
|
||||
##### set_base_url()
|
||||
|
||||
ベースURLを更新します。
|
||||
|
||||
```python
|
||||
client.set_base_url("https://my-custom-domain.com")
|
||||
```
|
||||
|
||||
##### close()
|
||||
|
||||
基盤となるHTTPセッションを閉じます。
|
||||
|
||||
```python
|
||||
client.close()
|
||||
```
|
||||
|
||||
## データクラス
|
||||
|
||||
### WorkflowExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class WorkflowExecutionResult:
|
||||
success: bool
|
||||
output: Optional[Any] = None
|
||||
error: Optional[str] = None
|
||||
logs: Optional[List[Any]] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
trace_spans: Optional[List[Any]] = None
|
||||
total_duration: Optional[float] = None
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class AsyncExecutionResult:
|
||||
success: bool
|
||||
task_id: str
|
||||
status: str # 'queued'
|
||||
created_at: str
|
||||
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class WorkflowStatus:
|
||||
is_deployed: bool
|
||||
deployed_at: Optional[str] = None
|
||||
needs_redeployment: bool = False
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class RateLimitInfo:
|
||||
limit: int
|
||||
remaining: int
|
||||
reset: int
|
||||
retry_after: Optional[int] = None
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class UsageLimits:
|
||||
success: bool
|
||||
rate_limit: Dict[str, Any]
|
||||
usage: Dict[str, Any]
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```python
|
||||
class SimStudioError(Exception):
|
||||
def __init__(self, message: str, code: Optional[str] = None, status: Optional[int] = None):
|
||||
super().__init__(message)
|
||||
self.code = code
|
||||
self.status = status
|
||||
```
|
||||
|
||||
**一般的なエラーコード:**
|
||||
- `UNAUTHORIZED`: 無効なAPIキー
|
||||
- `TIMEOUT`: リクエストがタイムアウトしました
|
||||
- `RATE_LIMIT_EXCEEDED`: レート制限を超えました
|
||||
- `USAGE_LIMIT_EXCEEDED`: 使用制限を超えました
|
||||
- `EXECUTION_ERROR`: ワークフローの実行に失敗しました
|
||||
|
||||
## 例
|
||||
|
||||
### 基本的なワークフロー実行
|
||||
|
||||
<Steps>
|
||||
<Step title="クライアントの初期化">
|
||||
APIキーを使用してSimStudioClientをセットアップします。
|
||||
</Step>
|
||||
<Step title="ワークフローの検証">
|
||||
ワークフローがデプロイされ、実行準備ができているか確認します。
|
||||
</Step>
|
||||
<Step title="ワークフローの実行">
|
||||
入力データでワークフローを実行します。
|
||||
</Step>
|
||||
<Step title="結果の処理">
|
||||
実行結果を処理し、エラーがあれば対処します。
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def run_workflow():
|
||||
try:
|
||||
# Check if workflow is ready
|
||||
is_ready = client.validate_workflow("my-workflow-id")
|
||||
if not is_ready:
|
||||
raise Exception("Workflow is not deployed or ready")
|
||||
|
||||
# Execute the workflow
|
||||
result = client.execute_workflow(
|
||||
"my-workflow-id",
|
||||
input_data={
|
||||
"message": "Process this data",
|
||||
"user_id": "12345"
|
||||
}
|
||||
)
|
||||
|
||||
if result.success:
|
||||
print("Output:", result.output)
|
||||
print("Duration:", result.metadata.get("duration") if result.metadata else None)
|
||||
else:
|
||||
print("Workflow failed:", result.error)
|
||||
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
run_workflow()
|
||||
```
|
||||
|
||||
### エラー処理
|
||||
|
||||
ワークフロー実行中に発生する可能性のある様々なタイプのエラーを処理します:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_error_handling():
|
||||
try:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
return result
|
||||
except SimStudioError as error:
|
||||
if error.code == "UNAUTHORIZED":
|
||||
print("Invalid API key")
|
||||
elif error.code == "TIMEOUT":
|
||||
print("Workflow execution timed out")
|
||||
elif error.code == "USAGE_LIMIT_EXCEEDED":
|
||||
print("Usage limit exceeded")
|
||||
elif error.code == "INVALID_JSON":
|
||||
print("Invalid JSON in request body")
|
||||
else:
|
||||
print(f"Workflow error: {error}")
|
||||
raise
|
||||
except Exception as error:
|
||||
print(f"Unexpected error: {error}")
|
||||
raise
|
||||
```
|
||||
|
||||
### コンテキストマネージャーの使用
|
||||
|
||||
リソースのクリーンアップを自動的に処理するためにクライアントをコンテキストマネージャーとして使用します:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
# Using context manager to automatically close the session
|
||||
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Result:", result)
|
||||
# Session is automatically closed here
|
||||
```
|
||||
|
||||
### バッチワークフロー実行
|
||||
|
||||
複数のワークフローを効率的に実行します:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_workflows_batch(workflow_data_pairs):
|
||||
"""Execute multiple workflows with different input data."""
|
||||
results = []
|
||||
|
||||
for workflow_id, input_data in workflow_data_pairs:
|
||||
try:
|
||||
# Validate workflow before execution
|
||||
if not client.validate_workflow(workflow_id):
|
||||
print(f"Skipping {workflow_id}: not deployed")
|
||||
continue
|
||||
|
||||
result = client.execute_workflow(workflow_id, input_data)
|
||||
results.append({
|
||||
"workflow_id": workflow_id,
|
||||
"success": result.success,
|
||||
"output": result.output,
|
||||
"error": result.error
|
||||
})
|
||||
|
||||
except Exception as error:
|
||||
results.append({
|
||||
"workflow_id": workflow_id,
|
||||
"success": False,
|
||||
"error": str(error)
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
# Example usage
|
||||
workflows = [
|
||||
("workflow-1", {"type": "analysis", "data": "sample1"}),
|
||||
("workflow-2", {"type": "processing", "data": "sample2"}),
|
||||
]
|
||||
|
||||
results = execute_workflows_batch(workflows)
|
||||
for result in results:
|
||||
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
|
||||
```
|
||||
|
||||
### 非同期ワークフロー実行
|
||||
|
||||
長時間実行されるタスクのためにワークフローを非同期で実行します:
|
||||
|
||||
```python
|
||||
import os
|
||||
import time
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_async():
|
||||
try:
|
||||
# Start async execution
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"data": "large dataset"},
|
||||
async_execution=True # Execute asynchronously
|
||||
)
|
||||
|
||||
# Check if result is an async execution
|
||||
if hasattr(result, 'task_id'):
|
||||
print(f"Task ID: {result.task_id}")
|
||||
print(f"Status endpoint: {result.links['status']}")
|
||||
|
||||
# Poll for completion
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
while status["status"] in ["queued", "processing"]:
|
||||
print(f"Current status: {status['status']}")
|
||||
time.sleep(2) # Wait 2 seconds
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
if status["status"] == "completed":
|
||||
print("Workflow completed!")
|
||||
print(f"Output: {status['output']}")
|
||||
print(f"Duration: {status['metadata']['duration']}")
|
||||
else:
|
||||
print(f"Workflow failed: {status['error']}")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error: {error}")
|
||||
|
||||
execute_async()
|
||||
```
|
||||
|
||||
### レート制限とリトライ
|
||||
|
||||
指数バックオフを使用して自動的にレート制限を処理します:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_retry_handling():
|
||||
try:
|
||||
# Automatically retries on rate limit
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Process this"},
|
||||
max_retries=5,
|
||||
initial_delay=1.0,
|
||||
max_delay=60.0,
|
||||
backoff_multiplier=2.0
|
||||
)
|
||||
|
||||
print(f"Success: {result}")
|
||||
except SimStudioError as error:
|
||||
if error.code == "RATE_LIMIT_EXCEEDED":
|
||||
print("Rate limit exceeded after all retries")
|
||||
|
||||
# Check rate limit info
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
from datetime import datetime
|
||||
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
|
||||
print(f"Rate limit resets at: {reset_time}")
|
||||
|
||||
execute_with_retry_handling()
|
||||
```
|
||||
|
||||
### 使用状況モニタリング
|
||||
|
||||
アカウントの使用状況と制限をモニタリングします:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def check_usage():
|
||||
try:
|
||||
limits = client.get_usage_limits()
|
||||
|
||||
print("=== Rate Limits ===")
|
||||
print("Sync requests:")
|
||||
print(f" Limit: {limits.rate_limit['sync']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
|
||||
|
||||
print("\nAsync requests:")
|
||||
print(f" Limit: {limits.rate_limit['async']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
|
||||
|
||||
print("\n=== Usage ===")
|
||||
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
|
||||
print(f"Limit: ${limits.usage['limit']:.2f}")
|
||||
print(f"Plan: {limits.usage['plan']}")
|
||||
|
||||
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
|
||||
print(f"Usage: {percent_used:.1f}%")
|
||||
|
||||
if percent_used > 80:
|
||||
print("⚠️ Warning: You are approaching your usage limit!")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error checking usage: {error}")
|
||||
|
||||
check_usage()
|
||||
```
|
||||
|
||||
### ワークフローの実行ストリーミング
|
||||
|
||||
リアルタイムのストリーミングレスポンスでワークフローを実行します:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_streaming():
|
||||
"""Execute workflow with streaming enabled."""
|
||||
try:
|
||||
# Enable streaming for specific block outputs
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Count to five"},
|
||||
stream=True,
|
||||
selected_outputs=["agent1.content"] # Use blockName.attribute format
|
||||
)
|
||||
|
||||
print("Workflow result:", result)
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
execute_with_streaming()
|
||||
```
|
||||
|
||||
ストリーミングレスポンスはServer-Sent Events(SSE)形式に従います:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**Flaskストリーミングの例:**
|
||||
|
||||
```python
|
||||
from flask import Flask, Response, stream_with_context
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/stream-workflow')
|
||||
def stream_workflow():
|
||||
"""Stream workflow execution to the client."""
|
||||
|
||||
def generate():
|
||||
response = requests.post(
|
||||
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': os.getenv('SIM_API_KEY')
|
||||
},
|
||||
json={
|
||||
'message': 'Generate a story',
|
||||
'stream': True,
|
||||
'selectedOutputs': ['agent1.content']
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
data = decoded_line[6:] # Remove 'data: ' prefix
|
||||
|
||||
if data == '[DONE]':
|
||||
break
|
||||
|
||||
try:
|
||||
parsed = json.loads(data)
|
||||
if 'chunk' in parsed:
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
elif parsed.get('event') == 'done':
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
print("Execution complete:", parsed.get('metadata'))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
```
|
||||
|
||||
### 環境設定
|
||||
|
||||
環境変数を使用してクライアントを設定します:
|
||||
|
||||
<Tabs items={['Development', 'Production']}>
|
||||
<Tab value="Development">
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Development configuration
|
||||
client = SimStudioClient(
|
||||
api_key=os.getenv("SIM_API_KEY")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
</Tab>
|
||||
<Tab value="Production">
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Production configuration with error handling
|
||||
api_key = os.getenv("SIM_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("SIM_API_KEY environment variable is required")
|
||||
|
||||
client = SimStudioClient(
|
||||
api_key=api_key,
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## APIキーの取得方法
|
||||
|
||||
<Steps>
|
||||
<Step title="Simにログイン">
|
||||
[Sim](https://sim.ai)に移動してアカウントにログインします。
|
||||
</Step>
|
||||
<Step title="ワークフローを開く">
|
||||
プログラムで実行したいワークフローに移動します。
|
||||
</Step>
|
||||
<Step title="ワークフローをデプロイする">
|
||||
まだデプロイされていない場合は、「デプロイ」をクリックしてワークフローをデプロイします。
|
||||
</Step>
|
||||
<Step title="APIキーを作成または選択する">
|
||||
デプロイプロセス中に、APIキーを選択または作成します。
|
||||
</Step>
|
||||
<Step title="APIキーをコピーする">
|
||||
Pythonアプリケーションで使用するAPIキーをコピーします。
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## 要件
|
||||
|
||||
- Python 3.8+
|
||||
- requests >= 2.25.0
|
||||
|
||||
## ライセンス
|
||||
|
||||
Apache-2.0
|
||||
1052
apps/docs/content/docs/ja/api-reference/typescript.mdx
Normal file
1052
apps/docs/content/docs/ja/api-reference/typescript.mdx
Normal file
File diff suppressed because it is too large
Load Diff
24
apps/docs/content/docs/ja/meta.json
Normal file
24
apps/docs/content/docs/ja/meta.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"title": "Sim Documentation",
|
||||
"pages": [
|
||||
"./introduction/index",
|
||||
"./getting-started/index",
|
||||
"./quick-reference/index",
|
||||
"triggers",
|
||||
"blocks",
|
||||
"tools",
|
||||
"connections",
|
||||
"mcp",
|
||||
"copilot",
|
||||
"skills",
|
||||
"knowledgebase",
|
||||
"variables",
|
||||
"credentials",
|
||||
"execution",
|
||||
"permissions",
|
||||
"self-hosting",
|
||||
"./enterprise/index",
|
||||
"./keyboard-shortcuts/index"
|
||||
],
|
||||
"defaultOpen": false
|
||||
}
|
||||
94
apps/docs/content/docs/zh/api-reference/authentication.mdx
Normal file
94
apps/docs/content/docs/zh/api-reference/authentication.mdx
Normal file
@@ -0,0 +1,94 @@
|
||||
---
|
||||
title: Authentication
|
||||
description: API key types, generation, and how to authenticate requests
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
To access the Sim API, you need an API key. Sim supports two types of API keys — **personal keys** and **workspace keys** — each with different billing and access behaviors.
|
||||
|
||||
## Key Types
|
||||
|
||||
| | **Personal Keys** | **Workspace Keys** |
|
||||
| --- | --- | --- |
|
||||
| **Billed to** | Your individual account | Workspace owner |
|
||||
| **Scope** | Across workspaces you have access to | Shared across the workspace |
|
||||
| **Managed by** | Each user individually | Workspace admins |
|
||||
| **Permissions** | Must be enabled at workspace level | Require admin permissions |
|
||||
|
||||
<Callout type="info">
|
||||
Workspace admins can disable personal API key usage for their workspace. If disabled, only workspace keys can be used.
|
||||
</Callout>
|
||||
|
||||
## Generating API Keys
|
||||
|
||||
To generate a key, open the Sim platform and navigate to **Settings**, then go to **Sim Keys** and click **Create**.
|
||||
|
||||
<Callout type="warn">
|
||||
API keys are only shown once when generated. Store your key securely — you will not be able to view it again.
|
||||
</Callout>
|
||||
|
||||
## Using API Keys
|
||||
|
||||
Pass your API key in the `X-API-Key` header with every request:
|
||||
|
||||
<Tabs items={['curl', 'TypeScript', 'Python']}>
|
||||
<Tab value="curl">
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}}'
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="TypeScript">
|
||||
```typescript
|
||||
const response = await fetch(
|
||||
'https://www.sim.ai/api/workflows/{workflowId}/execute',
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY!,
|
||||
},
|
||||
body: JSON.stringify({ inputs: {} }),
|
||||
}
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Python">
|
||||
```python
|
||||
import requests
|
||||
|
||||
response = requests.post(
|
||||
"https://www.sim.ai/api/workflows/{workflowId}/execute",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": os.environ["SIM_API_KEY"],
|
||||
},
|
||||
json={"inputs": {}},
|
||||
)
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## Where Keys Are Used
|
||||
|
||||
API keys authenticate access to:
|
||||
|
||||
- **Workflow execution** — run deployed workflows via the API
|
||||
- **Logs API** — query workflow execution logs and metrics
|
||||
- **MCP servers** — authenticate connections to deployed MCP servers
|
||||
- **SDKs** — the [Python](/api-reference/python) and [TypeScript](/api-reference/typescript) SDKs use API keys for all operations
|
||||
|
||||
## Security
|
||||
|
||||
- Keys use the `sk-sim-` prefix and are encrypted at rest
|
||||
- Keys can be revoked at any time from the dashboard
|
||||
- Use environment variables to store keys — never hardcode them in source code
|
||||
- For browser-based applications, use a backend proxy to avoid exposing keys to the client
|
||||
|
||||
<Callout type="warn">
|
||||
Never expose your API key in client-side code. Use a server-side proxy to make authenticated requests on behalf of your frontend.
|
||||
</Callout>
|
||||
210
apps/docs/content/docs/zh/api-reference/getting-started.mdx
Normal file
210
apps/docs/content/docs/zh/api-reference/getting-started.mdx
Normal file
@@ -0,0 +1,210 @@
|
||||
---
|
||||
title: Getting Started
|
||||
description: Base URL, first API call, response format, error handling, and pagination
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
|
||||
## Base URL
|
||||
|
||||
All API requests are made to:
|
||||
|
||||
```
|
||||
https://www.sim.ai
|
||||
```
|
||||
|
||||
## Quick Start
|
||||
|
||||
<Steps>
|
||||
|
||||
<Step>
|
||||
### Get your API key
|
||||
|
||||
Go to the Sim platform and navigate to **Settings**, then go to **Sim Keys** and click **Create**. See [Authentication](/api-reference/authentication) for details on key types.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Find your workflow ID
|
||||
|
||||
Open a workflow in the Sim editor. The workflow ID is in the URL:
|
||||
|
||||
```
|
||||
https://www.sim.ai/workspace/{workspaceId}/w/{workflowId}
|
||||
```
|
||||
|
||||
You can also use the [List Workflows](/api-reference/workflows/listWorkflows) endpoint to get all workflow IDs in a workspace.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Deploy your workflow
|
||||
|
||||
A workflow must be deployed before it can be executed via the API. Click the **Deploy** button in the editor toolbar, or use the dashboard to manage deployments.
|
||||
</Step>
|
||||
|
||||
<Step>
|
||||
### Make your first request
|
||||
|
||||
<Tabs items={['curl', 'TypeScript', 'Python']}>
|
||||
<Tab value="curl">
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}}'
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="TypeScript">
|
||||
```typescript
|
||||
const response = await fetch(
|
||||
`https://www.sim.ai/api/workflows/${workflowId}/execute`,
|
||||
{
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': process.env.SIM_API_KEY!,
|
||||
},
|
||||
body: JSON.stringify({ inputs: {} }),
|
||||
}
|
||||
)
|
||||
|
||||
const data = await response.json()
|
||||
console.log(data.output)
|
||||
```
|
||||
</Tab>
|
||||
<Tab value="Python">
|
||||
```python
|
||||
import requests
|
||||
import os
|
||||
|
||||
response = requests.post(
|
||||
f"https://www.sim.ai/api/workflows/{workflow_id}/execute",
|
||||
headers={
|
||||
"Content-Type": "application/json",
|
||||
"X-API-Key": os.environ["SIM_API_KEY"],
|
||||
},
|
||||
json={"inputs": {}},
|
||||
)
|
||||
|
||||
data = response.json()
|
||||
print(data["output"])
|
||||
```
|
||||
</Tab>
|
||||
</Tabs>
|
||||
</Step>
|
||||
|
||||
</Steps>
|
||||
|
||||
## Sync vs Async Execution
|
||||
|
||||
By default, workflow executions are **synchronous** — the API blocks until the workflow completes and returns the result directly.
|
||||
|
||||
For long-running workflows, use **asynchronous execution** by passing `async: true`:
|
||||
|
||||
```bash
|
||||
curl -X POST https://www.sim.ai/api/workflows/{workflowId}/execute \
|
||||
-H "Content-Type: application/json" \
|
||||
-H "X-API-Key: YOUR_API_KEY" \
|
||||
-d '{"inputs": {}, "async": true}'
|
||||
```
|
||||
|
||||
This returns immediately with a `taskId`:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"taskId": "job_abc123",
|
||||
"status": "queued"
|
||||
}
|
||||
```
|
||||
|
||||
Poll the [Get Job Status](/api-reference/workflows/getJobStatus) endpoint until the status is `completed` or `failed`:
|
||||
|
||||
```bash
|
||||
curl https://www.sim.ai/api/jobs/{taskId} \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
```
|
||||
|
||||
<Callout type="info">
|
||||
Job status transitions follow: `queued` → `processing` → `completed` or `failed`. The `output` field is only present when status is `completed`.
|
||||
</Callout>
|
||||
|
||||
## Response Format
|
||||
|
||||
Successful responses include an `output` object with your workflow results and a `limits` object with your current rate limit and usage status:
|
||||
|
||||
```json
|
||||
{
|
||||
"success": true,
|
||||
"output": {
|
||||
"result": "Hello, world!"
|
||||
},
|
||||
"limits": {
|
||||
"workflowExecutionRateLimit": {
|
||||
"sync": {
|
||||
"requestsPerMinute": 60,
|
||||
"maxBurst": 10,
|
||||
"remaining": 59,
|
||||
"resetAt": "2025-01-01T00:01:00Z"
|
||||
},
|
||||
"async": {
|
||||
"requestsPerMinute": 30,
|
||||
"maxBurst": 5,
|
||||
"remaining": 30,
|
||||
"resetAt": "2025-01-01T00:01:00Z"
|
||||
}
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": 1.25,
|
||||
"limit": 50.00,
|
||||
"plan": "pro",
|
||||
"isExceeded": false
|
||||
}
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The API uses standard HTTP status codes. Error responses include a human-readable `error` message:
|
||||
|
||||
```json
|
||||
{
|
||||
"error": "Workflow not found"
|
||||
}
|
||||
```
|
||||
|
||||
| Status | Meaning | What to do |
|
||||
| --- | --- | --- |
|
||||
| `400` | Invalid request parameters | Check the `details` array for specific field errors |
|
||||
| `401` | Missing or invalid API key | Verify your `X-API-Key` header |
|
||||
| `403` | Access denied | Check you have permission for this resource |
|
||||
| `404` | Resource not found | Verify the ID exists and belongs to your workspace |
|
||||
| `429` | Rate limit exceeded | Wait for the duration in the `Retry-After` header |
|
||||
|
||||
<Callout type="info">
|
||||
Use the [Get Usage Limits](/api-reference/usage/getUsageLimits) endpoint to check your current rate limit status and billing usage at any time.
|
||||
</Callout>
|
||||
|
||||
## Rate Limits
|
||||
|
||||
Rate limits depend on your subscription plan and apply separately to synchronous and asynchronous executions. Every execution response includes a `limits` object showing your current rate limit status.
|
||||
|
||||
When rate limited, the API returns a `429` response with a `Retry-After` header indicating how many seconds to wait before retrying.
|
||||
|
||||
## Pagination
|
||||
|
||||
List endpoints (workflows, logs, audit logs) use **cursor-based pagination**:
|
||||
|
||||
```bash
|
||||
# First page
|
||||
curl "https://www.sim.ai/api/v1/logs?limit=20" \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
|
||||
# Next page — use the nextCursor from the previous response
|
||||
curl "https://www.sim.ai/api/v1/logs?limit=20&cursor=abc123" \
|
||||
-H "X-API-Key: YOUR_API_KEY"
|
||||
```
|
||||
|
||||
The response includes a `nextCursor` field. When `nextCursor` is absent or `null`, you have reached the last page.
|
||||
16
apps/docs/content/docs/zh/api-reference/meta.json
Normal file
16
apps/docs/content/docs/zh/api-reference/meta.json
Normal file
@@ -0,0 +1,16 @@
|
||||
{
|
||||
"title": "API Reference",
|
||||
"root": true,
|
||||
"pages": [
|
||||
"getting-started",
|
||||
"authentication",
|
||||
"---SDKs---",
|
||||
"python",
|
||||
"typescript",
|
||||
"---Endpoints---",
|
||||
"(generated)/workflows",
|
||||
"(generated)/logs",
|
||||
"(generated)/usage",
|
||||
"(generated)/audit-logs"
|
||||
]
|
||||
}
|
||||
766
apps/docs/content/docs/zh/api-reference/python.mdx
Normal file
766
apps/docs/content/docs/zh/api-reference/python.mdx
Normal file
@@ -0,0 +1,766 @@
|
||||
---
|
||||
title: Python
|
||||
---
|
||||
|
||||
import { Callout } from 'fumadocs-ui/components/callout'
|
||||
import { Card, Cards } from 'fumadocs-ui/components/card'
|
||||
import { Step, Steps } from 'fumadocs-ui/components/steps'
|
||||
import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
|
||||
|
||||
官方的 Python SDK 允许您通过 Python 应用程序以编程方式执行工作流。
|
||||
|
||||
<Callout type="info">
|
||||
Python SDK 支持 Python 3.8+,具备异步执行支持、自动速率限制(带指数退避)以及使用情况跟踪功能。
|
||||
</Callout>
|
||||
|
||||
## 安装
|
||||
|
||||
使用 pip 安装 SDK:
|
||||
|
||||
```bash
|
||||
pip install simstudio-sdk
|
||||
```
|
||||
|
||||
## 快速开始
|
||||
|
||||
以下是一个简单的示例,帮助您快速入门:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Initialize the client
|
||||
client = SimStudioClient(
|
||||
api_key="your-api-key-here",
|
||||
base_url="https://sim.ai" # optional, defaults to https://sim.ai
|
||||
)
|
||||
|
||||
# Execute a workflow
|
||||
try:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Workflow executed successfully:", result)
|
||||
except Exception as error:
|
||||
print("Workflow execution failed:", error)
|
||||
```
|
||||
|
||||
## API 参考
|
||||
|
||||
### SimStudioClient
|
||||
|
||||
#### 构造函数
|
||||
|
||||
```python
|
||||
SimStudioClient(api_key: str, base_url: str = "https://sim.ai")
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `api_key` (str): 您的 Sim API 密钥
|
||||
- `base_url` (str, 可选): Sim API 的基础 URL
|
||||
|
||||
#### 方法
|
||||
|
||||
##### execute_workflow()
|
||||
|
||||
执行带有可选输入数据的工作流。
|
||||
|
||||
```python
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Hello, world!"},
|
||||
timeout=30.0 # 30 seconds
|
||||
)
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `workflow_id` (str): 要执行的工作流 ID
|
||||
- `input_data` (dict, optional): 传递给工作流的输入数据
|
||||
- `timeout` (float, optional): 超时时间(以秒为单位,默认值:30.0)
|
||||
- `stream` (bool, optional): 启用流式响应(默认值:False)
|
||||
- `selected_outputs` (list[str], optional): 以 `blockName.attribute` 格式阻止输出流(例如,`["agent1.content"]`)
|
||||
- `async_execution` (bool, optional): 异步执行(默认值:False)
|
||||
|
||||
**返回值:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
当 `async_execution=True` 时,立即返回任务 ID 以供轮询。否则,等待完成。
|
||||
|
||||
##### get_workflow_status()
|
||||
|
||||
获取工作流的状态(部署状态等)。
|
||||
|
||||
```python
|
||||
status = client.get_workflow_status("workflow-id")
|
||||
print("Is deployed:", status.is_deployed)
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `workflow_id` (str): 工作流的 ID
|
||||
|
||||
**返回值:** `WorkflowStatus`
|
||||
|
||||
##### validate_workflow()
|
||||
|
||||
验证工作流是否已准备好执行。
|
||||
|
||||
```python
|
||||
is_ready = client.validate_workflow("workflow-id")
|
||||
if is_ready:
|
||||
# Workflow is deployed and ready
|
||||
pass
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `workflow_id` (str): 工作流的 ID
|
||||
|
||||
**返回值:** `bool`
|
||||
|
||||
##### get_job_status()
|
||||
|
||||
获取异步任务执行的状态。
|
||||
|
||||
```python
|
||||
status = client.get_job_status("task-id-from-async-execution")
|
||||
print("Status:", status["status"]) # 'queued', 'processing', 'completed', 'failed'
|
||||
if status["status"] == "completed":
|
||||
print("Output:", status["output"])
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `task_id` (str): 异步执行返回的任务 ID
|
||||
|
||||
**返回值:** `Dict[str, Any]`
|
||||
|
||||
**响应字段:**
|
||||
- `success` (bool): 请求是否成功
|
||||
- `taskId` (str): 任务 ID
|
||||
- `status` (str): 可能的值包括 `'queued'`, `'processing'`, `'completed'`, `'failed'`, `'cancelled'`
|
||||
- `metadata` (dict): 包含 `startedAt`, `completedAt` 和 `duration`
|
||||
- `output` (any, optional): 工作流输出(完成时)
|
||||
- `error` (any, optional): 错误详情(失败时)
|
||||
- `estimatedDuration` (int, optional): 估计持续时间(以毫秒为单位,处理中/排队时)
|
||||
|
||||
##### execute_with_retry()
|
||||
|
||||
使用指数退避在速率限制错误上自动重试执行工作流。
|
||||
|
||||
```python
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Hello"},
|
||||
timeout=30.0,
|
||||
max_retries=3, # Maximum number of retries
|
||||
initial_delay=1.0, # Initial delay in seconds
|
||||
max_delay=30.0, # Maximum delay in seconds
|
||||
backoff_multiplier=2.0 # Exponential backoff multiplier
|
||||
)
|
||||
```
|
||||
|
||||
**参数:**
|
||||
- `workflow_id` (str): 要执行的工作流 ID
|
||||
- `input_data` (dict, optional): 传递给工作流的输入数据
|
||||
- `timeout` (float, optional): 超时时间(以秒为单位)
|
||||
- `stream` (bool, optional): 启用流式响应
|
||||
- `selected_outputs` (list, optional): 阻止输出流
|
||||
- `async_execution` (bool, optional): 异步执行
|
||||
- `max_retries` (int, optional): 最大重试次数(默认值:3)
|
||||
- `initial_delay` (float, optional): 初始延迟时间(以秒为单位,默认值:1.0)
|
||||
- `max_delay` (float, optional): 最大延迟时间(以秒为单位,默认值:30.0)
|
||||
- `backoff_multiplier` (float, optional): 退避倍数(默认值:2.0)
|
||||
|
||||
**返回值:** `WorkflowExecutionResult | AsyncExecutionResult`
|
||||
|
||||
重试逻辑使用指数退避(1 秒 → 2 秒 → 4 秒 → 8 秒...),并带有 ±25% 的抖动以防止惊群效应。如果 API 提供了 `retry-after` 标头,则会使用该标头。
|
||||
|
||||
##### get_rate_limit_info()
|
||||
|
||||
从上一次 API 响应中获取当前的速率限制信息。
|
||||
|
||||
```python
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
print("Limit:", rate_limit_info.limit)
|
||||
print("Remaining:", rate_limit_info.remaining)
|
||||
print("Reset:", datetime.fromtimestamp(rate_limit_info.reset))
|
||||
```
|
||||
|
||||
**返回值:** `RateLimitInfo | None`
|
||||
|
||||
##### get_usage_limits()
|
||||
|
||||
获取您的账户当前的使用限制和配额信息。
|
||||
|
||||
```python
|
||||
limits = client.get_usage_limits()
|
||||
print("Sync requests remaining:", limits.rate_limit["sync"]["remaining"])
|
||||
print("Async requests remaining:", limits.rate_limit["async"]["remaining"])
|
||||
print("Current period cost:", limits.usage["currentPeriodCost"])
|
||||
print("Plan:", limits.usage["plan"])
|
||||
```
|
||||
|
||||
**返回值:** `UsageLimits`
|
||||
|
||||
**响应结构:**
|
||||
|
||||
```python
|
||||
{
|
||||
"success": bool,
|
||||
"rateLimit": {
|
||||
"sync": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"async": {
|
||||
"isLimited": bool,
|
||||
"limit": int,
|
||||
"remaining": int,
|
||||
"resetAt": str
|
||||
},
|
||||
"authType": str # 'api' or 'manual'
|
||||
},
|
||||
"usage": {
|
||||
"currentPeriodCost": float,
|
||||
"limit": float,
|
||||
"plan": str # e.g., 'free', 'pro'
|
||||
}
|
||||
}
|
||||
```
|
||||
|
||||
##### set_api_key()
|
||||
|
||||
更新 API 密钥。
|
||||
|
||||
```python
|
||||
client.set_api_key("new-api-key")
|
||||
```
|
||||
|
||||
##### set_base_url()
|
||||
|
||||
更新基础 URL。
|
||||
|
||||
```python
|
||||
client.set_base_url("https://my-custom-domain.com")
|
||||
```
|
||||
|
||||
##### close()
|
||||
|
||||
关闭底层 HTTP 会话。
|
||||
|
||||
```python
|
||||
client.close()
|
||||
```
|
||||
|
||||
## 数据类
|
||||
|
||||
### WorkflowExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class WorkflowExecutionResult:
|
||||
success: bool
|
||||
output: Optional[Any] = None
|
||||
error: Optional[str] = None
|
||||
logs: Optional[List[Any]] = None
|
||||
metadata: Optional[Dict[str, Any]] = None
|
||||
trace_spans: Optional[List[Any]] = None
|
||||
total_duration: Optional[float] = None
|
||||
```
|
||||
|
||||
### AsyncExecutionResult
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class AsyncExecutionResult:
|
||||
success: bool
|
||||
task_id: str
|
||||
status: str # 'queued'
|
||||
created_at: str
|
||||
links: Dict[str, str] # e.g., {"status": "/api/jobs/{taskId}"}
|
||||
```
|
||||
|
||||
### WorkflowStatus
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class WorkflowStatus:
|
||||
is_deployed: bool
|
||||
deployed_at: Optional[str] = None
|
||||
needs_redeployment: bool = False
|
||||
```
|
||||
|
||||
### RateLimitInfo
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class RateLimitInfo:
|
||||
limit: int
|
||||
remaining: int
|
||||
reset: int
|
||||
retry_after: Optional[int] = None
|
||||
```
|
||||
|
||||
### UsageLimits
|
||||
|
||||
```python
|
||||
@dataclass
|
||||
class UsageLimits:
|
||||
success: bool
|
||||
rate_limit: Dict[str, Any]
|
||||
usage: Dict[str, Any]
|
||||
```
|
||||
|
||||
### SimStudioError
|
||||
|
||||
```python
|
||||
class SimStudioError(Exception):
|
||||
def __init__(self, message: str, code: Optional[str] = None, status: Optional[int] = None):
|
||||
super().__init__(message)
|
||||
self.code = code
|
||||
self.status = status
|
||||
```
|
||||
|
||||
**常见错误代码:**
|
||||
- `UNAUTHORIZED`: 无效的 API 密钥
|
||||
- `TIMEOUT`: 请求超时
|
||||
- `RATE_LIMIT_EXCEEDED`: 超出速率限制
|
||||
- `USAGE_LIMIT_EXCEEDED`: 超出使用限制
|
||||
- `EXECUTION_ERROR`: 工作流执行失败
|
||||
|
||||
## 示例
|
||||
|
||||
### 基本工作流执行
|
||||
|
||||
<Steps>
|
||||
<Step title="初始化客户端">
|
||||
使用您的 API 密钥设置 SimStudioClient。
|
||||
</Step>
|
||||
<Step title="验证工作流">
|
||||
检查工作流是否已部署并准备好执行。
|
||||
</Step>
|
||||
<Step title="执行工作流">
|
||||
使用您的输入数据运行工作流。
|
||||
</Step>
|
||||
<Step title="处理结果">
|
||||
处理执行结果并处理任何错误。
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def run_workflow():
|
||||
try:
|
||||
# Check if workflow is ready
|
||||
is_ready = client.validate_workflow("my-workflow-id")
|
||||
if not is_ready:
|
||||
raise Exception("Workflow is not deployed or ready")
|
||||
|
||||
# Execute the workflow
|
||||
result = client.execute_workflow(
|
||||
"my-workflow-id",
|
||||
input_data={
|
||||
"message": "Process this data",
|
||||
"user_id": "12345"
|
||||
}
|
||||
)
|
||||
|
||||
if result.success:
|
||||
print("Output:", result.output)
|
||||
print("Duration:", result.metadata.get("duration") if result.metadata else None)
|
||||
else:
|
||||
print("Workflow failed:", result.error)
|
||||
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
run_workflow()
|
||||
```
|
||||
|
||||
### 错误处理
|
||||
|
||||
处理工作流执行过程中可能发生的不同类型的错误:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_error_handling():
|
||||
try:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
return result
|
||||
except SimStudioError as error:
|
||||
if error.code == "UNAUTHORIZED":
|
||||
print("Invalid API key")
|
||||
elif error.code == "TIMEOUT":
|
||||
print("Workflow execution timed out")
|
||||
elif error.code == "USAGE_LIMIT_EXCEEDED":
|
||||
print("Usage limit exceeded")
|
||||
elif error.code == "INVALID_JSON":
|
||||
print("Invalid JSON in request body")
|
||||
else:
|
||||
print(f"Workflow error: {error}")
|
||||
raise
|
||||
except Exception as error:
|
||||
print(f"Unexpected error: {error}")
|
||||
raise
|
||||
```
|
||||
|
||||
### 上下文管理器的使用
|
||||
|
||||
将客户端用作上下文管理器以自动处理资源清理:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
# Using context manager to automatically close the session
|
||||
with SimStudioClient(api_key=os.getenv("SIM_API_KEY")) as client:
|
||||
result = client.execute_workflow("workflow-id")
|
||||
print("Result:", result)
|
||||
# Session is automatically closed here
|
||||
```
|
||||
|
||||
### 批量工作流执行
|
||||
|
||||
高效地执行多个工作流:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_workflows_batch(workflow_data_pairs):
|
||||
"""Execute multiple workflows with different input data."""
|
||||
results = []
|
||||
|
||||
for workflow_id, input_data in workflow_data_pairs:
|
||||
try:
|
||||
# Validate workflow before execution
|
||||
if not client.validate_workflow(workflow_id):
|
||||
print(f"Skipping {workflow_id}: not deployed")
|
||||
continue
|
||||
|
||||
result = client.execute_workflow(workflow_id, input_data)
|
||||
results.append({
|
||||
"workflow_id": workflow_id,
|
||||
"success": result.success,
|
||||
"output": result.output,
|
||||
"error": result.error
|
||||
})
|
||||
|
||||
except Exception as error:
|
||||
results.append({
|
||||
"workflow_id": workflow_id,
|
||||
"success": False,
|
||||
"error": str(error)
|
||||
})
|
||||
|
||||
return results
|
||||
|
||||
# Example usage
|
||||
workflows = [
|
||||
("workflow-1", {"type": "analysis", "data": "sample1"}),
|
||||
("workflow-2", {"type": "processing", "data": "sample2"}),
|
||||
]
|
||||
|
||||
results = execute_workflows_batch(workflows)
|
||||
for result in results:
|
||||
print(f"Workflow {result['workflow_id']}: {'Success' if result['success'] else 'Failed'}")
|
||||
```
|
||||
|
||||
### 异步工作流执行
|
||||
|
||||
为长时间运行的任务异步执行工作流:
|
||||
|
||||
```python
|
||||
import os
|
||||
import time
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_async():
|
||||
try:
|
||||
# Start async execution
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"data": "large dataset"},
|
||||
async_execution=True # Execute asynchronously
|
||||
)
|
||||
|
||||
# Check if result is an async execution
|
||||
if hasattr(result, 'task_id'):
|
||||
print(f"Task ID: {result.task_id}")
|
||||
print(f"Status endpoint: {result.links['status']}")
|
||||
|
||||
# Poll for completion
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
while status["status"] in ["queued", "processing"]:
|
||||
print(f"Current status: {status['status']}")
|
||||
time.sleep(2) # Wait 2 seconds
|
||||
status = client.get_job_status(result.task_id)
|
||||
|
||||
if status["status"] == "completed":
|
||||
print("Workflow completed!")
|
||||
print(f"Output: {status['output']}")
|
||||
print(f"Duration: {status['metadata']['duration']}")
|
||||
else:
|
||||
print(f"Workflow failed: {status['error']}")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error: {error}")
|
||||
|
||||
execute_async()
|
||||
```
|
||||
|
||||
### 速率限制与重试
|
||||
|
||||
通过指数退避自动处理速率限制:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient, SimStudioError
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_retry_handling():
|
||||
try:
|
||||
# Automatically retries on rate limit
|
||||
result = client.execute_with_retry(
|
||||
"workflow-id",
|
||||
input_data={"message": "Process this"},
|
||||
max_retries=5,
|
||||
initial_delay=1.0,
|
||||
max_delay=60.0,
|
||||
backoff_multiplier=2.0
|
||||
)
|
||||
|
||||
print(f"Success: {result}")
|
||||
except SimStudioError as error:
|
||||
if error.code == "RATE_LIMIT_EXCEEDED":
|
||||
print("Rate limit exceeded after all retries")
|
||||
|
||||
# Check rate limit info
|
||||
rate_limit_info = client.get_rate_limit_info()
|
||||
if rate_limit_info:
|
||||
from datetime import datetime
|
||||
reset_time = datetime.fromtimestamp(rate_limit_info.reset)
|
||||
print(f"Rate limit resets at: {reset_time}")
|
||||
|
||||
execute_with_retry_handling()
|
||||
```
|
||||
|
||||
### 使用监控
|
||||
|
||||
监控您的账户使用情况和限制:
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def check_usage():
|
||||
try:
|
||||
limits = client.get_usage_limits()
|
||||
|
||||
print("=== Rate Limits ===")
|
||||
print("Sync requests:")
|
||||
print(f" Limit: {limits.rate_limit['sync']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['sync']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['sync']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['sync']['isLimited']}")
|
||||
|
||||
print("\nAsync requests:")
|
||||
print(f" Limit: {limits.rate_limit['async']['limit']}")
|
||||
print(f" Remaining: {limits.rate_limit['async']['remaining']}")
|
||||
print(f" Resets at: {limits.rate_limit['async']['resetAt']}")
|
||||
print(f" Is limited: {limits.rate_limit['async']['isLimited']}")
|
||||
|
||||
print("\n=== Usage ===")
|
||||
print(f"Current period cost: ${limits.usage['currentPeriodCost']:.2f}")
|
||||
print(f"Limit: ${limits.usage['limit']:.2f}")
|
||||
print(f"Plan: {limits.usage['plan']}")
|
||||
|
||||
percent_used = (limits.usage['currentPeriodCost'] / limits.usage['limit']) * 100
|
||||
print(f"Usage: {percent_used:.1f}%")
|
||||
|
||||
if percent_used > 80:
|
||||
print("⚠️ Warning: You are approaching your usage limit!")
|
||||
|
||||
except Exception as error:
|
||||
print(f"Error checking usage: {error}")
|
||||
|
||||
check_usage()
|
||||
```
|
||||
|
||||
### 流式工作流执行
|
||||
|
||||
通过实时流式响应执行工作流:
|
||||
|
||||
```python
|
||||
from simstudio import SimStudioClient
|
||||
import os
|
||||
|
||||
client = SimStudioClient(api_key=os.getenv("SIM_API_KEY"))
|
||||
|
||||
def execute_with_streaming():
|
||||
"""Execute workflow with streaming enabled."""
|
||||
try:
|
||||
# Enable streaming for specific block outputs
|
||||
result = client.execute_workflow(
|
||||
"workflow-id",
|
||||
input_data={"message": "Count to five"},
|
||||
stream=True,
|
||||
selected_outputs=["agent1.content"] # Use blockName.attribute format
|
||||
)
|
||||
|
||||
print("Workflow result:", result)
|
||||
except Exception as error:
|
||||
print("Error:", error)
|
||||
|
||||
execute_with_streaming()
|
||||
```
|
||||
|
||||
流式响应遵循服务器发送事件 (SSE) 格式:
|
||||
|
||||
```
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":"One"}
|
||||
|
||||
data: {"blockId":"7b7735b9-19e5-4bd6-818b-46aae2596e9f","chunk":", two"}
|
||||
|
||||
data: {"event":"done","success":true,"output":{},"metadata":{"duration":610}}
|
||||
|
||||
data: [DONE]
|
||||
```
|
||||
|
||||
**Flask 流式示例:**
|
||||
|
||||
```python
|
||||
from flask import Flask, Response, stream_with_context
|
||||
import requests
|
||||
import json
|
||||
import os
|
||||
|
||||
app = Flask(__name__)
|
||||
|
||||
@app.route('/stream-workflow')
|
||||
def stream_workflow():
|
||||
"""Stream workflow execution to the client."""
|
||||
|
||||
def generate():
|
||||
response = requests.post(
|
||||
'https://sim.ai/api/workflows/WORKFLOW_ID/execute',
|
||||
headers={
|
||||
'Content-Type': 'application/json',
|
||||
'X-API-Key': os.getenv('SIM_API_KEY')
|
||||
},
|
||||
json={
|
||||
'message': 'Generate a story',
|
||||
'stream': True,
|
||||
'selectedOutputs': ['agent1.content']
|
||||
},
|
||||
stream=True
|
||||
)
|
||||
|
||||
for line in response.iter_lines():
|
||||
if line:
|
||||
decoded_line = line.decode('utf-8')
|
||||
if decoded_line.startswith('data: '):
|
||||
data = decoded_line[6:] # Remove 'data: ' prefix
|
||||
|
||||
if data == '[DONE]':
|
||||
break
|
||||
|
||||
try:
|
||||
parsed = json.loads(data)
|
||||
if 'chunk' in parsed:
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
elif parsed.get('event') == 'done':
|
||||
yield f"data: {json.dumps(parsed)}\n\n"
|
||||
print("Execution complete:", parsed.get('metadata'))
|
||||
except json.JSONDecodeError:
|
||||
pass
|
||||
|
||||
return Response(
|
||||
stream_with_context(generate()),
|
||||
mimetype='text/event-stream'
|
||||
)
|
||||
|
||||
if __name__ == '__main__':
|
||||
app.run(debug=True)
|
||||
```
|
||||
|
||||
### 环境配置
|
||||
|
||||
使用环境变量配置客户端:
|
||||
|
||||
<Tabs items={['开发', '生产']}>
|
||||
<Tab value="开发">
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Development configuration
|
||||
client = SimStudioClient(
|
||||
api_key=os.getenv("SIM_API_KEY")
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
</Tab>
|
||||
<Tab value="生产">
|
||||
|
||||
```python
|
||||
import os
|
||||
from simstudio import SimStudioClient
|
||||
|
||||
# Production configuration with error handling
|
||||
api_key = os.getenv("SIM_API_KEY")
|
||||
if not api_key:
|
||||
raise ValueError("SIM_API_KEY environment variable is required")
|
||||
|
||||
client = SimStudioClient(
|
||||
api_key=api_key,
|
||||
base_url=os.getenv("SIM_BASE_URL", "https://sim.ai")
|
||||
)
|
||||
```
|
||||
|
||||
</Tab>
|
||||
</Tabs>
|
||||
|
||||
## 获取您的 API 密钥
|
||||
|
||||
<Steps>
|
||||
<Step title="登录 Sim">
|
||||
前往 [Sim](https://sim.ai) 并登录您的账户。
|
||||
</Step>
|
||||
<Step title="打开您的工作流">
|
||||
前往您想要以编程方式执行的工作流。
|
||||
</Step>
|
||||
<Step title="部署您的工作流">
|
||||
如果尚未部署,请点击“部署”以部署您的工作流。
|
||||
</Step>
|
||||
<Step title="创建或选择一个 API 密钥">
|
||||
在部署过程中,选择或创建一个 API 密钥。
|
||||
</Step>
|
||||
<Step title="复制 API 密钥">
|
||||
复制 API 密钥以在您的 Python 应用程序中使用。
|
||||
</Step>
|
||||
</Steps>
|
||||
|
||||
## 系统要求
|
||||
|
||||
- Python 3.8+
|
||||
- requests >= 2.25.0
|
||||
|
||||
## 许可证
|
||||
|
||||
Apache-2.0
|
||||
1052
apps/docs/content/docs/zh/api-reference/typescript.mdx
Normal file
1052
apps/docs/content/docs/zh/api-reference/typescript.mdx
Normal file
File diff suppressed because it is too large
Load Diff
24
apps/docs/content/docs/zh/meta.json
Normal file
24
apps/docs/content/docs/zh/meta.json
Normal file
@@ -0,0 +1,24 @@
|
||||
{
|
||||
"title": "Sim Documentation",
|
||||
"pages": [
|
||||
"./introduction/index",
|
||||
"./getting-started/index",
|
||||
"./quick-reference/index",
|
||||
"triggers",
|
||||
"blocks",
|
||||
"tools",
|
||||
"connections",
|
||||
"mcp",
|
||||
"copilot",
|
||||
"skills",
|
||||
"knowledgebase",
|
||||
"variables",
|
||||
"credentials",
|
||||
"execution",
|
||||
"permissions",
|
||||
"self-hosting",
|
||||
"./enterprise/index",
|
||||
"./keyboard-shortcuts/index"
|
||||
],
|
||||
"defaultOpen": false
|
||||
}
|
||||
@@ -2,7 +2,8 @@ import type { InferPageType } from 'fumadocs-core/source'
|
||||
import type { PageData, source } from '@/lib/source'
|
||||
|
||||
export async function getLLMText(page: InferPageType<typeof source>) {
|
||||
const data = page.data as PageData
|
||||
const data = page.data as unknown as PageData
|
||||
if (typeof data.getText !== 'function') return ''
|
||||
const processed = await data.getText('processed')
|
||||
return `# ${data.title} (${page.url})
|
||||
|
||||
|
||||
132
apps/docs/lib/openapi.ts
Normal file
132
apps/docs/lib/openapi.ts
Normal file
@@ -0,0 +1,132 @@
|
||||
import { readFileSync } from 'node:fs'
|
||||
import { join } from 'node:path'
|
||||
import { createOpenAPI } from 'fumadocs-openapi/server'
|
||||
|
||||
export const openapi = createOpenAPI({
|
||||
input: ['./openapi.json'],
|
||||
})
|
||||
|
||||
interface OpenAPIOperation {
|
||||
path: string
|
||||
method: string
|
||||
}
|
||||
|
||||
function resolveRef(ref: string, spec: Record<string, unknown>): unknown {
|
||||
const parts = ref.replace('#/', '').split('/')
|
||||
let current: unknown = spec
|
||||
for (const part of parts) {
|
||||
if (current && typeof current === 'object') {
|
||||
current = (current as Record<string, unknown>)[part]
|
||||
} else {
|
||||
return undefined
|
||||
}
|
||||
}
|
||||
return current
|
||||
}
|
||||
|
||||
function resolveRefs(obj: unknown, spec: Record<string, unknown>, depth = 0): unknown {
|
||||
if (depth > 10) return obj
|
||||
if (Array.isArray(obj)) {
|
||||
return obj.map((item) => resolveRefs(item, spec, depth + 1))
|
||||
}
|
||||
if (obj && typeof obj === 'object') {
|
||||
const record = obj as Record<string, unknown>
|
||||
if ('$ref' in record && typeof record.$ref === 'string') {
|
||||
const resolved = resolveRef(record.$ref, spec)
|
||||
return resolveRefs(resolved, spec, depth + 1)
|
||||
}
|
||||
const result: Record<string, unknown> = {}
|
||||
for (const [key, value] of Object.entries(record)) {
|
||||
result[key] = resolveRefs(value, spec, depth + 1)
|
||||
}
|
||||
return result
|
||||
}
|
||||
return obj
|
||||
}
|
||||
|
||||
function formatSchema(schema: unknown): string {
|
||||
return JSON.stringify(schema, null, 2)
|
||||
}
|
||||
|
||||
let cachedSpec: Record<string, unknown> | null = null
|
||||
|
||||
function getSpec(): Record<string, unknown> {
|
||||
if (!cachedSpec) {
|
||||
const specPath = join(process.cwd(), 'openapi.json')
|
||||
cachedSpec = JSON.parse(readFileSync(specPath, 'utf8')) as Record<string, unknown>
|
||||
}
|
||||
return cachedSpec
|
||||
}
|
||||
|
||||
export function getApiSpecContent(
|
||||
title: string,
|
||||
description: string | undefined,
|
||||
operations: OpenAPIOperation[]
|
||||
): string {
|
||||
const spec = getSpec()
|
||||
|
||||
if (!operations || operations.length === 0) {
|
||||
return `# ${title}\n\n${description || ''}`
|
||||
}
|
||||
|
||||
const op = operations[0]
|
||||
const method = op.method.toUpperCase()
|
||||
const pathObj = (spec.paths as Record<string, Record<string, unknown>>)?.[op.path]
|
||||
const operation = pathObj?.[op.method.toLowerCase()] as Record<string, unknown> | undefined
|
||||
|
||||
if (!operation) {
|
||||
return `# ${title}\n\n${description || ''}`
|
||||
}
|
||||
|
||||
const resolved = resolveRefs(operation, spec) as Record<string, unknown>
|
||||
const lines: string[] = []
|
||||
|
||||
lines.push(`# ${title}`)
|
||||
lines.push(`\`${method} ${op.path}\``)
|
||||
|
||||
if (resolved.description) {
|
||||
lines.push(`## Description\n${resolved.description}`)
|
||||
}
|
||||
|
||||
const parameters = resolved.parameters as Array<Record<string, unknown>> | undefined
|
||||
if (parameters && parameters.length > 0) {
|
||||
lines.push('## Parameters')
|
||||
for (const param of parameters) {
|
||||
const required = param.required ? ' (required)' : ''
|
||||
const schemaType = param.schema
|
||||
? ` — \`${(param.schema as Record<string, unknown>).type || 'string'}\``
|
||||
: ''
|
||||
lines.push(
|
||||
`- **${param.name}** (${param.in})${required}${schemaType}: ${param.description || ''}`
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
const requestBody = resolved.requestBody as Record<string, unknown> | undefined
|
||||
if (requestBody) {
|
||||
lines.push('## Request Body')
|
||||
if (requestBody.description) {
|
||||
lines.push(String(requestBody.description))
|
||||
}
|
||||
const content = requestBody.content as Record<string, Record<string, unknown>> | undefined
|
||||
const jsonContent = content?.['application/json']
|
||||
if (jsonContent?.schema) {
|
||||
lines.push(`\`\`\`json\n${formatSchema(jsonContent.schema)}\n\`\`\``)
|
||||
}
|
||||
}
|
||||
|
||||
const responses = resolved.responses as Record<string, Record<string, unknown>> | undefined
|
||||
if (responses) {
|
||||
lines.push('## Responses')
|
||||
for (const [status, response] of Object.entries(responses)) {
|
||||
lines.push(`### ${status} — ${response.description || ''}`)
|
||||
const content = response.content as Record<string, Record<string, unknown>> | undefined
|
||||
const jsonContent = content?.['application/json']
|
||||
if (jsonContent?.schema) {
|
||||
lines.push(`\`\`\`json\n${formatSchema(jsonContent.schema)}\n\`\`\``)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return lines.join('\n\n')
|
||||
}
|
||||
@@ -1,13 +1,93 @@
|
||||
import { type InferPageType, loader } from 'fumadocs-core/source'
|
||||
import { createElement, Fragment } from 'react'
|
||||
import { type InferPageType, loader, multiple } from 'fumadocs-core/source'
|
||||
import type { DocData, DocMethods } from 'fumadocs-mdx/runtime/types'
|
||||
import { openapiSource } from 'fumadocs-openapi/server'
|
||||
import { docs } from '@/.source/server'
|
||||
import { i18n } from './i18n'
|
||||
import { openapi } from './openapi'
|
||||
|
||||
export const source = loader({
|
||||
baseUrl: '/',
|
||||
source: docs.toFumadocsSource(),
|
||||
i18n,
|
||||
})
|
||||
const METHOD_COLORS: Record<string, string> = {
|
||||
GET: 'text-green-600 dark:text-green-400',
|
||||
HEAD: 'text-green-600 dark:text-green-400',
|
||||
OPTIONS: 'text-green-600 dark:text-green-400',
|
||||
POST: 'text-blue-600 dark:text-blue-400',
|
||||
PUT: 'text-yellow-600 dark:text-yellow-400',
|
||||
PATCH: 'text-orange-600 dark:text-orange-400',
|
||||
DELETE: 'text-red-600 dark:text-red-400',
|
||||
}
|
||||
|
||||
/**
|
||||
* Custom openapi plugin that places method badges BEFORE the page name
|
||||
* in the sidebar (like Mintlify/Gumloop) instead of after.
|
||||
*/
|
||||
function openapiPluginBadgeLeft() {
|
||||
return {
|
||||
name: 'fumadocs:openapi-badge-left',
|
||||
enforce: 'pre' as const,
|
||||
transformPageTree: {
|
||||
file(
|
||||
this: {
|
||||
storage: {
|
||||
read: (path: string) => { format: string; data: Record<string, unknown> } | undefined
|
||||
}
|
||||
},
|
||||
node: { name: React.ReactNode },
|
||||
filePath: string | undefined
|
||||
) {
|
||||
if (!filePath) return node
|
||||
const file = this.storage.read(filePath)
|
||||
if (!file || file.format !== 'page') return node
|
||||
const openApiData = file.data._openapi as { method?: string; webhook?: boolean } | undefined
|
||||
if (!openApiData || typeof openApiData !== 'object') return node
|
||||
if (openApiData.webhook) {
|
||||
node.name = createElement(
|
||||
Fragment,
|
||||
null,
|
||||
node.name,
|
||||
' ',
|
||||
createElement(
|
||||
'span',
|
||||
{
|
||||
className:
|
||||
'ms-auto border border-current px-1 rounded-lg text-xs text-nowrap font-mono',
|
||||
},
|
||||
'Webhook'
|
||||
)
|
||||
)
|
||||
} else if (openApiData.method) {
|
||||
const method = openApiData.method.toUpperCase()
|
||||
const colorClass = METHOD_COLORS[method] ?? METHOD_COLORS.GET
|
||||
node.name = createElement(
|
||||
Fragment,
|
||||
null,
|
||||
createElement(
|
||||
'span',
|
||||
{ className: `font-mono font-medium me-1.5 text-[10px] text-nowrap ${colorClass}` },
|
||||
method
|
||||
),
|
||||
node.name
|
||||
)
|
||||
}
|
||||
return node
|
||||
},
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
export const source = loader(
|
||||
multiple({
|
||||
docs: docs.toFumadocsSource(),
|
||||
openapi: await openapiSource(openapi, {
|
||||
baseDir: 'en/api-reference/(generated)',
|
||||
groupBy: 'tag',
|
||||
}),
|
||||
}),
|
||||
{
|
||||
baseUrl: '/',
|
||||
i18n,
|
||||
plugins: [openapiPluginBadgeLeft() as never],
|
||||
}
|
||||
)
|
||||
|
||||
/** Full page data type including MDX content and metadata */
|
||||
export type PageData = DocData &
|
||||
|
||||
1893
apps/docs/openapi.json
Normal file
1893
apps/docs/openapi.json
Normal file
File diff suppressed because it is too large
Load Diff
@@ -17,15 +17,17 @@
|
||||
"class-variance-authority": "^0.7.1",
|
||||
"clsx": "^2.1.1",
|
||||
"drizzle-orm": "^0.44.5",
|
||||
"fumadocs-core": "16.2.3",
|
||||
"fumadocs-mdx": "14.1.0",
|
||||
"fumadocs-ui": "16.2.3",
|
||||
"fumadocs-core": "16.6.7",
|
||||
"fumadocs-mdx": "14.2.8",
|
||||
"fumadocs-openapi": "10.3.13",
|
||||
"fumadocs-ui": "16.6.7",
|
||||
"lucide-react": "^0.511.0",
|
||||
"next": "16.1.6",
|
||||
"next-themes": "^0.4.6",
|
||||
"postgres": "^3.4.5",
|
||||
"react": "19.2.1",
|
||||
"react-dom": "19.2.1",
|
||||
"shiki": "4.0.0",
|
||||
"tailwind-merge": "^3.0.2"
|
||||
},
|
||||
"devDependencies": {
|
||||
|
||||
@@ -108,7 +108,6 @@ export function ChatDeploy({
|
||||
onVersionActivated,
|
||||
}: ChatDeployProps) {
|
||||
const [imageUrl, setImageUrl] = useState<string | null>(null)
|
||||
const [isDeleting, setIsDeleting] = useState(false)
|
||||
const [internalShowDeleteConfirmation, setInternalShowDeleteConfirmation] = useState(false)
|
||||
|
||||
const showDeleteConfirmation =
|
||||
@@ -122,6 +121,7 @@ export function ChatDeploy({
|
||||
const [formData, setFormData] = useState<ChatFormData>(initialFormData)
|
||||
const [errors, setErrors] = useState<FormErrors>({})
|
||||
const formRef = useRef<HTMLFormElement>(null)
|
||||
const [formInitCounter, setFormInitCounter] = useState(0)
|
||||
|
||||
const createChatMutation = useCreateChat()
|
||||
const updateChatMutation = useUpdateChat()
|
||||
@@ -222,13 +222,20 @@ export function ChatDeploy({
|
||||
|
||||
setChatSubmitting(true)
|
||||
|
||||
const isNewChat = !existingChat?.id
|
||||
|
||||
// Open window before async operation to avoid popup blockers
|
||||
const newTab = isNewChat ? window.open('', '_blank') : null
|
||||
|
||||
try {
|
||||
if (!validateForm(!!existingChat)) {
|
||||
newTab?.close()
|
||||
setChatSubmitting(false)
|
||||
return
|
||||
}
|
||||
|
||||
if (!isIdentifierValid && formData.identifier !== existingChat?.identifier) {
|
||||
newTab?.close()
|
||||
setError('identifier', 'Please wait for identifier validation to complete')
|
||||
setChatSubmitting(false)
|
||||
return
|
||||
@@ -257,13 +264,18 @@ export function ChatDeploy({
|
||||
onDeployed?.()
|
||||
onVersionActivated?.()
|
||||
|
||||
if (chatUrl) {
|
||||
window.open(chatUrl, '_blank', 'noopener,noreferrer')
|
||||
if (newTab && chatUrl) {
|
||||
newTab.opener = null
|
||||
newTab.location.href = chatUrl
|
||||
} else if (newTab) {
|
||||
newTab.close()
|
||||
}
|
||||
|
||||
setHasInitializedForm(false)
|
||||
await onRefetchChat()
|
||||
setHasInitializedForm(false)
|
||||
setFormInitCounter((c) => c + 1)
|
||||
} catch (error: any) {
|
||||
newTab?.close()
|
||||
if (error.message?.includes('identifier')) {
|
||||
setError('identifier', error.message)
|
||||
} else {
|
||||
@@ -278,8 +290,6 @@ export function ChatDeploy({
|
||||
if (!existingChat || !existingChat.id) return
|
||||
|
||||
try {
|
||||
setIsDeleting(true)
|
||||
|
||||
await deleteChatMutation.mutateAsync({
|
||||
chatId: existingChat.id,
|
||||
workflowId,
|
||||
@@ -287,6 +297,7 @@ export function ChatDeploy({
|
||||
|
||||
setImageUrl(null)
|
||||
setHasInitializedForm(false)
|
||||
setFormInitCounter((c) => c + 1)
|
||||
await onRefetchChat()
|
||||
|
||||
onDeploymentComplete?.()
|
||||
@@ -294,7 +305,6 @@ export function ChatDeploy({
|
||||
logger.error('Failed to delete chat:', error)
|
||||
setError('general', error.message || 'An unexpected error occurred while deleting')
|
||||
} finally {
|
||||
setIsDeleting(false)
|
||||
setShowDeleteConfirmation(false)
|
||||
}
|
||||
}
|
||||
@@ -363,7 +373,7 @@ export function ChatDeploy({
|
||||
</div>
|
||||
|
||||
<AuthSelector
|
||||
key={existingChat?.id ?? 'new'}
|
||||
key={`${existingChat?.id ?? 'new'}-${formInitCounter}`}
|
||||
authType={formData.authType}
|
||||
password={formData.password}
|
||||
emails={formData.emails}
|
||||
@@ -424,12 +434,16 @@ export function ChatDeploy({
|
||||
<Button
|
||||
variant='default'
|
||||
onClick={() => setShowDeleteConfirmation(false)}
|
||||
disabled={isDeleting}
|
||||
disabled={deleteChatMutation.isPending}
|
||||
>
|
||||
Cancel
|
||||
</Button>
|
||||
<Button variant='destructive' onClick={handleDelete} disabled={isDeleting}>
|
||||
{isDeleting ? 'Deleting...' : 'Delete'}
|
||||
<Button
|
||||
variant='destructive'
|
||||
onClick={handleDelete}
|
||||
disabled={deleteChatMutation.isPending}
|
||||
>
|
||||
{deleteChatMutation.isPending ? 'Deleting...' : 'Delete'}
|
||||
</Button>
|
||||
</ModalFooter>
|
||||
</ModalContent>
|
||||
@@ -620,6 +634,12 @@ function AuthSelector({
|
||||
emails.map((email) => ({ value: email, isValid: true }))
|
||||
)
|
||||
|
||||
useEffect(() => {
|
||||
if (!copySuccess) return
|
||||
const timer = setTimeout(() => setCopySuccess(false), 2000)
|
||||
return () => clearTimeout(timer)
|
||||
}, [copySuccess])
|
||||
|
||||
const handleGeneratePassword = () => {
|
||||
const newPassword = generatePassword(24)
|
||||
onPasswordChange(newPassword)
|
||||
@@ -628,7 +648,6 @@ function AuthSelector({
|
||||
const copyToClipboard = (text: string) => {
|
||||
navigator.clipboard.writeText(text)
|
||||
setCopySuccess(true)
|
||||
setTimeout(() => setCopySuccess(false), 2000)
|
||||
}
|
||||
|
||||
const addEmail = (email: string): boolean => {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
'use client'
|
||||
|
||||
import { useCallback, useEffect, useMemo, useState } from 'react'
|
||||
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { useQueryClient } from '@tanstack/react-query'
|
||||
import {
|
||||
@@ -113,6 +113,7 @@ export function DeployModal({
|
||||
const [showA2aDeleteConfirm, setShowA2aDeleteConfirm] = useState(false)
|
||||
|
||||
const [chatSuccess, setChatSuccess] = useState(false)
|
||||
const chatSuccessTimeoutRef = useRef<ReturnType<typeof setTimeout> | null>(null)
|
||||
|
||||
const [isCreateKeyModalOpen, setIsCreateKeyModalOpen] = useState(false)
|
||||
const [isApiInfoModalOpen, setIsApiInfoModalOpen] = useState(false)
|
||||
@@ -232,6 +233,12 @@ export function DeployModal({
|
||||
setActiveTab('general')
|
||||
setDeployError(null)
|
||||
setDeployWarnings([])
|
||||
setChatSuccess(false)
|
||||
}
|
||||
return () => {
|
||||
if (chatSuccessTimeoutRef.current) {
|
||||
clearTimeout(chatSuccessTimeoutRef.current)
|
||||
}
|
||||
}
|
||||
}, [open, workflowId])
|
||||
|
||||
@@ -377,15 +384,16 @@ export function DeployModal({
|
||||
const handleChatDeployed = useCallback(async () => {
|
||||
if (!workflowId) return
|
||||
|
||||
queryClient.invalidateQueries({ queryKey: deploymentKeys.info(workflowId) })
|
||||
queryClient.invalidateQueries({ queryKey: deploymentKeys.versions(workflowId) })
|
||||
queryClient.invalidateQueries({ queryKey: deploymentKeys.chatStatus(workflowId) })
|
||||
|
||||
await refetchDeployedState()
|
||||
useWorkflowRegistry.getState().setWorkflowNeedsRedeployment(workflowId, false)
|
||||
|
||||
if (chatSuccessTimeoutRef.current) {
|
||||
clearTimeout(chatSuccessTimeoutRef.current)
|
||||
}
|
||||
setChatSuccess(true)
|
||||
setTimeout(() => setChatSuccess(false), 2000)
|
||||
chatSuccessTimeoutRef.current = setTimeout(() => setChatSuccess(false), 2000)
|
||||
}, [workflowId, queryClient, refetchDeployedState])
|
||||
|
||||
const handleRefetchChat = useCallback(async () => {
|
||||
@@ -394,14 +402,7 @@ export function DeployModal({
|
||||
|
||||
const handleChatFormSubmit = useCallback(() => {
|
||||
const form = document.getElementById('chat-deploy-form') as HTMLFormElement
|
||||
if (form) {
|
||||
const updateTrigger = form.querySelector('[data-update-trigger]') as HTMLButtonElement
|
||||
if (updateTrigger) {
|
||||
updateTrigger.click()
|
||||
} else {
|
||||
form.requestSubmit()
|
||||
}
|
||||
}
|
||||
form?.requestSubmit()
|
||||
}, [])
|
||||
|
||||
const handleChatDelete = useCallback(() => {
|
||||
|
||||
745
apps/sim/blocks/blocks/amplitude.ts
Normal file
745
apps/sim/blocks/blocks/amplitude.ts
Normal file
@@ -0,0 +1,745 @@
|
||||
import { AmplitudeIcon } from '@/components/icons'
|
||||
import { AuthMode, type BlockConfig } from '@/blocks/types'
|
||||
|
||||
export const AmplitudeBlock: BlockConfig = {
|
||||
type: 'amplitude',
|
||||
name: 'Amplitude',
|
||||
description: 'Track events and query analytics from Amplitude',
|
||||
longDescription:
|
||||
'Integrate Amplitude into your workflow to track events, identify users and groups, search for users, query analytics, and retrieve revenue data.',
|
||||
docsLink: 'https://docs.sim.ai/tools/amplitude',
|
||||
category: 'tools',
|
||||
bgColor: '#1B1F3B',
|
||||
icon: AmplitudeIcon,
|
||||
authMode: AuthMode.ApiKey,
|
||||
|
||||
subBlocks: [
|
||||
{
|
||||
id: 'operation',
|
||||
title: 'Operation',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'Send Event', id: 'send_event' },
|
||||
{ label: 'Identify User', id: 'identify_user' },
|
||||
{ label: 'Group Identify', id: 'group_identify' },
|
||||
{ label: 'User Search', id: 'user_search' },
|
||||
{ label: 'User Activity', id: 'user_activity' },
|
||||
{ label: 'User Profile', id: 'user_profile' },
|
||||
{ label: 'Event Segmentation', id: 'event_segmentation' },
|
||||
{ label: 'Get Active Users', id: 'get_active_users' },
|
||||
{ label: 'Real-time Active Users', id: 'realtime_active_users' },
|
||||
{ label: 'List Events', id: 'list_events' },
|
||||
{ label: 'Get Revenue', id: 'get_revenue' },
|
||||
],
|
||||
value: () => 'send_event',
|
||||
},
|
||||
|
||||
// API Key (required for all operations)
|
||||
{
|
||||
id: 'apiKey',
|
||||
title: 'API Key',
|
||||
type: 'short-input',
|
||||
required: true,
|
||||
placeholder: 'Enter your Amplitude API Key',
|
||||
password: true,
|
||||
condition: {
|
||||
field: 'operation',
|
||||
value: 'user_profile',
|
||||
not: true,
|
||||
},
|
||||
},
|
||||
|
||||
// API Key for user_profile (not required - uses only secretKey)
|
||||
// User Profile uses Api-Key header with secret key only
|
||||
|
||||
// Secret Key (required for Dashboard REST API operations + User Profile)
|
||||
{
|
||||
id: 'secretKey',
|
||||
title: 'Secret Key',
|
||||
type: 'short-input',
|
||||
required: {
|
||||
field: 'operation',
|
||||
value: [
|
||||
'user_search',
|
||||
'user_activity',
|
||||
'user_profile',
|
||||
'event_segmentation',
|
||||
'get_active_users',
|
||||
'realtime_active_users',
|
||||
'list_events',
|
||||
'get_revenue',
|
||||
],
|
||||
},
|
||||
placeholder: 'Enter your Amplitude Secret Key',
|
||||
password: true,
|
||||
condition: {
|
||||
field: 'operation',
|
||||
value: [
|
||||
'user_search',
|
||||
'user_activity',
|
||||
'user_profile',
|
||||
'event_segmentation',
|
||||
'get_active_users',
|
||||
'realtime_active_users',
|
||||
'list_events',
|
||||
'get_revenue',
|
||||
],
|
||||
},
|
||||
},
|
||||
|
||||
// --- Send Event fields ---
|
||||
{
|
||||
id: 'eventType',
|
||||
title: 'Event Type',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'send_event' },
|
||||
placeholder: 'e.g., page_view, purchase, signup',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
},
|
||||
{
|
||||
id: 'userId',
|
||||
title: 'User ID',
|
||||
type: 'short-input',
|
||||
placeholder: 'User identifier',
|
||||
condition: { field: 'operation', value: ['send_event', 'identify_user'] },
|
||||
},
|
||||
{
|
||||
id: 'profileUserId',
|
||||
title: 'User ID',
|
||||
type: 'short-input',
|
||||
placeholder: 'External user ID (required if no Device ID)',
|
||||
condition: { field: 'operation', value: 'user_profile' },
|
||||
},
|
||||
{
|
||||
id: 'deviceId',
|
||||
title: 'Device ID',
|
||||
type: 'short-input',
|
||||
placeholder: 'Device identifier',
|
||||
condition: { field: 'operation', value: ['send_event', 'identify_user'] },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'profileDeviceId',
|
||||
title: 'Device ID',
|
||||
type: 'short-input',
|
||||
placeholder: 'Device ID (required if no User ID)',
|
||||
condition: { field: 'operation', value: 'user_profile' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'eventProperties',
|
||||
title: 'Event Properties',
|
||||
type: 'long-input',
|
||||
placeholder: '{"button": "signup", "page": "/home"}',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate a JSON object of event properties for an Amplitude event. Return ONLY the JSON object - no explanations, no extra text.',
|
||||
generationType: 'json-object',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'sendEventUserProperties',
|
||||
title: 'User Properties',
|
||||
type: 'long-input',
|
||||
placeholder: '{"$set": {"plan": "premium"}}',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate a JSON object of user properties for Amplitude. Use $set, $setOnce, $add, $append, or $unset operations. Return ONLY the JSON object - no explanations, no extra text.',
|
||||
generationType: 'json-object',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'platform',
|
||||
title: 'Platform',
|
||||
type: 'short-input',
|
||||
placeholder: 'e.g., Web, iOS, Android',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'appVersion',
|
||||
title: 'App Version',
|
||||
type: 'short-input',
|
||||
placeholder: 'e.g., 1.0.0',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'insertId',
|
||||
title: 'Insert ID',
|
||||
type: 'short-input',
|
||||
placeholder: 'Unique ID for deduplication',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'price',
|
||||
title: 'Price',
|
||||
type: 'short-input',
|
||||
placeholder: '9.99',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'quantity',
|
||||
title: 'Quantity',
|
||||
type: 'short-input',
|
||||
placeholder: '1',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'revenue',
|
||||
title: 'Revenue',
|
||||
type: 'short-input',
|
||||
placeholder: '9.99',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'productId',
|
||||
title: 'Product ID',
|
||||
type: 'short-input',
|
||||
placeholder: 'Product identifier',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'revenueType',
|
||||
title: 'Revenue Type',
|
||||
type: 'short-input',
|
||||
placeholder: 'e.g., purchase, refund',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'country',
|
||||
title: 'Country',
|
||||
type: 'short-input',
|
||||
placeholder: 'Two-letter country code (e.g., US)',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'language',
|
||||
title: 'Language',
|
||||
type: 'short-input',
|
||||
placeholder: 'Language code (e.g., en)',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'ip',
|
||||
title: 'IP Address',
|
||||
type: 'short-input',
|
||||
placeholder: 'IP for geo-location (use "$remote" for request IP)',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'time',
|
||||
title: 'Timestamp',
|
||||
type: 'short-input',
|
||||
placeholder: 'Milliseconds since epoch',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate a timestamp in milliseconds since epoch for the current time. Return ONLY the number - no explanations, no extra text.',
|
||||
generationType: 'timestamp',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'sessionId',
|
||||
title: 'Session ID',
|
||||
type: 'short-input',
|
||||
placeholder: 'Session start time in milliseconds (-1 for no session)',
|
||||
condition: { field: 'operation', value: 'send_event' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
|
||||
// --- Identify User fields ---
|
||||
{
|
||||
id: 'identifyUserProperties',
|
||||
title: 'User Properties',
|
||||
type: 'long-input',
|
||||
required: { field: 'operation', value: 'identify_user' },
|
||||
placeholder: '{"$set": {"plan": "premium", "company": "Acme"}}',
|
||||
condition: { field: 'operation', value: 'identify_user' },
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate a JSON object of user properties for Amplitude Identify API. Use $set, $setOnce, $add, $append, or $unset operations. Return ONLY the JSON object - no explanations, no extra text.',
|
||||
generationType: 'json-object',
|
||||
},
|
||||
},
|
||||
|
||||
// --- Group Identify fields ---
|
||||
{
|
||||
id: 'groupType',
|
||||
title: 'Group Type',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'group_identify' },
|
||||
placeholder: 'e.g., company, org_id',
|
||||
condition: { field: 'operation', value: 'group_identify' },
|
||||
},
|
||||
{
|
||||
id: 'groupValue',
|
||||
title: 'Group Value',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'group_identify' },
|
||||
placeholder: 'e.g., Acme Corp',
|
||||
condition: { field: 'operation', value: 'group_identify' },
|
||||
},
|
||||
{
|
||||
id: 'groupProperties',
|
||||
title: 'Group Properties',
|
||||
type: 'long-input',
|
||||
required: { field: 'operation', value: 'group_identify' },
|
||||
placeholder: '{"$set": {"industry": "tech", "employee_count": 500}}',
|
||||
condition: { field: 'operation', value: 'group_identify' },
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate a JSON object of group properties for Amplitude Group Identify API. Use $set, $setOnce, $add, $append, or $unset operations. Return ONLY the JSON object - no explanations, no extra text.',
|
||||
generationType: 'json-object',
|
||||
},
|
||||
},
|
||||
|
||||
// --- User Search fields ---
|
||||
{
|
||||
id: 'searchUser',
|
||||
title: 'User',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'user_search' },
|
||||
placeholder: 'User ID, Device ID, or Amplitude ID',
|
||||
condition: { field: 'operation', value: 'user_search' },
|
||||
},
|
||||
|
||||
// --- User Activity fields ---
|
||||
{
|
||||
id: 'amplitudeId',
|
||||
title: 'Amplitude ID',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'user_activity' },
|
||||
placeholder: 'Amplitude internal user ID',
|
||||
condition: { field: 'operation', value: 'user_activity' },
|
||||
},
|
||||
{
|
||||
id: 'activityOffset',
|
||||
title: 'Offset',
|
||||
type: 'short-input',
|
||||
placeholder: '0',
|
||||
condition: { field: 'operation', value: 'user_activity' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'activityLimit',
|
||||
title: 'Limit',
|
||||
type: 'short-input',
|
||||
placeholder: '1000',
|
||||
condition: { field: 'operation', value: 'user_activity' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'activityDirection',
|
||||
title: 'Direction',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'Latest First', id: 'latest' },
|
||||
{ label: 'Earliest First', id: 'earliest' },
|
||||
],
|
||||
value: () => 'latest',
|
||||
condition: { field: 'operation', value: 'user_activity' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
|
||||
// --- User Profile fields ---
|
||||
{
|
||||
id: 'getAmpProps',
|
||||
title: 'Include User Properties',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'No', id: 'false' },
|
||||
{ label: 'Yes', id: 'true' },
|
||||
],
|
||||
value: () => 'false',
|
||||
condition: { field: 'operation', value: 'user_profile' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'getCohortIds',
|
||||
title: 'Include Cohort IDs',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'No', id: 'false' },
|
||||
{ label: 'Yes', id: 'true' },
|
||||
],
|
||||
value: () => 'false',
|
||||
condition: { field: 'operation', value: 'user_profile' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'getComputations',
|
||||
title: 'Include Computed Properties',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'No', id: 'false' },
|
||||
{ label: 'Yes', id: 'true' },
|
||||
],
|
||||
value: () => 'false',
|
||||
condition: { field: 'operation', value: 'user_profile' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
|
||||
// --- Event Segmentation fields ---
|
||||
{
|
||||
id: 'segmentationEventType',
|
||||
title: 'Event Type',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'event_segmentation' },
|
||||
placeholder: 'Event type to analyze',
|
||||
condition: { field: 'operation', value: 'event_segmentation' },
|
||||
},
|
||||
{
|
||||
id: 'segmentationStart',
|
||||
title: 'Start Date',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'event_segmentation' },
|
||||
placeholder: 'YYYYMMDD',
|
||||
condition: { field: 'operation', value: 'event_segmentation' },
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate a date in YYYYMMDD format. Return ONLY the date string - no explanations, no extra text.',
|
||||
generationType: 'timestamp',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'segmentationEnd',
|
||||
title: 'End Date',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'event_segmentation' },
|
||||
placeholder: 'YYYYMMDD',
|
||||
condition: { field: 'operation', value: 'event_segmentation' },
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate a date in YYYYMMDD format. Return ONLY the date string - no explanations, no extra text.',
|
||||
generationType: 'timestamp',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'segmentationMetric',
|
||||
title: 'Metric',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'Uniques', id: 'uniques' },
|
||||
{ label: 'Totals', id: 'totals' },
|
||||
{ label: '% DAU', id: 'pct_dau' },
|
||||
{ label: 'Average', id: 'average' },
|
||||
{ label: 'Histogram', id: 'histogram' },
|
||||
{ label: 'Sums', id: 'sums' },
|
||||
{ label: 'Value Average', id: 'value_avg' },
|
||||
{ label: 'Formula', id: 'formula' },
|
||||
],
|
||||
value: () => 'uniques',
|
||||
condition: { field: 'operation', value: 'event_segmentation' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'segmentationInterval',
|
||||
title: 'Interval',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'Daily', id: '1' },
|
||||
{ label: 'Weekly', id: '7' },
|
||||
{ label: 'Monthly', id: '30' },
|
||||
],
|
||||
value: () => '1',
|
||||
condition: { field: 'operation', value: 'event_segmentation' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'segmentationGroupBy',
|
||||
title: 'Group By',
|
||||
type: 'short-input',
|
||||
placeholder: 'Property name (prefix custom with "gp:")',
|
||||
condition: { field: 'operation', value: 'event_segmentation' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'segmentationLimit',
|
||||
title: 'Limit',
|
||||
type: 'short-input',
|
||||
placeholder: 'Max group-by values (max 1000)',
|
||||
condition: { field: 'operation', value: 'event_segmentation' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
|
||||
// --- Get Active Users fields ---
|
||||
{
|
||||
id: 'activeUsersStart',
|
||||
title: 'Start Date',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'get_active_users' },
|
||||
placeholder: 'YYYYMMDD',
|
||||
condition: { field: 'operation', value: 'get_active_users' },
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate a date in YYYYMMDD format. Return ONLY the date string - no explanations, no extra text.',
|
||||
generationType: 'timestamp',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'activeUsersEnd',
|
||||
title: 'End Date',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'get_active_users' },
|
||||
placeholder: 'YYYYMMDD',
|
||||
condition: { field: 'operation', value: 'get_active_users' },
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate a date in YYYYMMDD format. Return ONLY the date string - no explanations, no extra text.',
|
||||
generationType: 'timestamp',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'activeUsersMetric',
|
||||
title: 'Metric',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'Active Users', id: 'active' },
|
||||
{ label: 'New Users', id: 'new' },
|
||||
],
|
||||
value: () => 'active',
|
||||
condition: { field: 'operation', value: 'get_active_users' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'activeUsersInterval',
|
||||
title: 'Interval',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'Daily', id: '1' },
|
||||
{ label: 'Weekly', id: '7' },
|
||||
{ label: 'Monthly', id: '30' },
|
||||
],
|
||||
value: () => '1',
|
||||
condition: { field: 'operation', value: 'get_active_users' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
|
||||
// --- Get Revenue fields ---
|
||||
{
|
||||
id: 'revenueStart',
|
||||
title: 'Start Date',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'get_revenue' },
|
||||
placeholder: 'YYYYMMDD',
|
||||
condition: { field: 'operation', value: 'get_revenue' },
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate a date in YYYYMMDD format. Return ONLY the date string - no explanations, no extra text.',
|
||||
generationType: 'timestamp',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'revenueEnd',
|
||||
title: 'End Date',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'get_revenue' },
|
||||
placeholder: 'YYYYMMDD',
|
||||
condition: { field: 'operation', value: 'get_revenue' },
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate a date in YYYYMMDD format. Return ONLY the date string - no explanations, no extra text.',
|
||||
generationType: 'timestamp',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'revenueMetric',
|
||||
title: 'Metric',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'ARPU', id: '0' },
|
||||
{ label: 'ARPPU', id: '1' },
|
||||
{ label: 'Total Revenue', id: '2' },
|
||||
{ label: 'Paying Users', id: '3' },
|
||||
],
|
||||
value: () => '2',
|
||||
condition: { field: 'operation', value: 'get_revenue' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'revenueInterval',
|
||||
title: 'Interval',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'Daily', id: '1' },
|
||||
{ label: 'Weekly', id: '7' },
|
||||
{ label: 'Monthly', id: '30' },
|
||||
],
|
||||
value: () => '1',
|
||||
condition: { field: 'operation', value: 'get_revenue' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
],
|
||||
|
||||
tools: {
|
||||
access: [
|
||||
'amplitude_send_event',
|
||||
'amplitude_identify_user',
|
||||
'amplitude_group_identify',
|
||||
'amplitude_user_search',
|
||||
'amplitude_user_activity',
|
||||
'amplitude_user_profile',
|
||||
'amplitude_event_segmentation',
|
||||
'amplitude_get_active_users',
|
||||
'amplitude_realtime_active_users',
|
||||
'amplitude_list_events',
|
||||
'amplitude_get_revenue',
|
||||
],
|
||||
config: {
|
||||
tool: (params) => `amplitude_${params.operation}`,
|
||||
params: (params) => {
|
||||
const result: Record<string, unknown> = {}
|
||||
|
||||
switch (params.operation) {
|
||||
case 'send_event':
|
||||
if (params.sendEventUserProperties)
|
||||
result.userProperties = params.sendEventUserProperties
|
||||
break
|
||||
|
||||
case 'identify_user':
|
||||
if (params.identifyUserProperties) result.userProperties = params.identifyUserProperties
|
||||
break
|
||||
|
||||
case 'user_search':
|
||||
if (params.searchUser) result.user = params.searchUser
|
||||
break
|
||||
|
||||
case 'user_activity':
|
||||
if (params.activityOffset) result.offset = params.activityOffset
|
||||
if (params.activityLimit) result.limit = params.activityLimit
|
||||
if (params.activityDirection) result.direction = params.activityDirection
|
||||
break
|
||||
|
||||
case 'user_profile':
|
||||
if (params.profileUserId) result.userId = params.profileUserId
|
||||
if (params.profileDeviceId) result.deviceId = params.profileDeviceId
|
||||
break
|
||||
|
||||
case 'event_segmentation':
|
||||
if (params.segmentationEventType) result.eventType = params.segmentationEventType
|
||||
if (params.segmentationStart) result.start = params.segmentationStart
|
||||
if (params.segmentationEnd) result.end = params.segmentationEnd
|
||||
if (params.segmentationMetric) result.metric = params.segmentationMetric
|
||||
if (params.segmentationInterval) result.interval = params.segmentationInterval
|
||||
if (params.segmentationGroupBy) result.groupBy = params.segmentationGroupBy
|
||||
if (params.segmentationLimit) result.limit = params.segmentationLimit
|
||||
break
|
||||
|
||||
case 'get_active_users':
|
||||
if (params.activeUsersStart) result.start = params.activeUsersStart
|
||||
if (params.activeUsersEnd) result.end = params.activeUsersEnd
|
||||
if (params.activeUsersMetric) result.metric = params.activeUsersMetric
|
||||
if (params.activeUsersInterval) result.interval = params.activeUsersInterval
|
||||
break
|
||||
|
||||
case 'get_revenue':
|
||||
if (params.revenueStart) result.start = params.revenueStart
|
||||
if (params.revenueEnd) result.end = params.revenueEnd
|
||||
if (params.revenueMetric) result.metric = params.revenueMetric
|
||||
if (params.revenueInterval) result.interval = params.revenueInterval
|
||||
break
|
||||
}
|
||||
|
||||
return result
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
inputs: {
|
||||
operation: { type: 'string', description: 'Operation to perform' },
|
||||
apiKey: { type: 'string', description: 'Amplitude API Key' },
|
||||
secretKey: { type: 'string', description: 'Amplitude Secret Key' },
|
||||
eventType: { type: 'string', description: 'Event type name' },
|
||||
userId: { type: 'string', description: 'User ID' },
|
||||
deviceId: { type: 'string', description: 'Device ID' },
|
||||
eventProperties: { type: 'string', description: 'Event properties JSON' },
|
||||
sendEventUserProperties: { type: 'string', description: 'User properties for send event' },
|
||||
identifyUserProperties: { type: 'string', description: 'User properties for identify' },
|
||||
groupType: { type: 'string', description: 'Group type classification' },
|
||||
groupValue: { type: 'string', description: 'Group identifier value' },
|
||||
groupProperties: { type: 'string', description: 'Group properties JSON' },
|
||||
searchUser: { type: 'string', description: 'User to search for' },
|
||||
amplitudeId: { type: 'string', description: 'Amplitude internal user ID' },
|
||||
profileUserId: { type: 'string', description: 'User ID for profile lookup' },
|
||||
profileDeviceId: { type: 'string', description: 'Device ID for profile lookup' },
|
||||
segmentationEventType: { type: 'string', description: 'Event type to analyze' },
|
||||
segmentationStart: { type: 'string', description: 'Segmentation start date' },
|
||||
segmentationEnd: { type: 'string', description: 'Segmentation end date' },
|
||||
activeUsersStart: { type: 'string', description: 'Active users start date' },
|
||||
activeUsersEnd: { type: 'string', description: 'Active users end date' },
|
||||
revenueStart: { type: 'string', description: 'Revenue start date' },
|
||||
revenueEnd: { type: 'string', description: 'Revenue end date' },
|
||||
},
|
||||
|
||||
outputs: {
|
||||
code: {
|
||||
type: 'number',
|
||||
description: 'Response status code',
|
||||
},
|
||||
message: {
|
||||
type: 'string',
|
||||
description: 'Response message (identify_user, group_identify)',
|
||||
},
|
||||
eventsIngested: {
|
||||
type: 'number',
|
||||
description: 'Number of events ingested (send_event)',
|
||||
},
|
||||
matches: {
|
||||
type: 'json',
|
||||
description: 'User search matches (amplitudeId, userId)',
|
||||
},
|
||||
events: {
|
||||
type: 'json',
|
||||
description: 'Event list (list_events, user_activity)',
|
||||
},
|
||||
userData: {
|
||||
type: 'json',
|
||||
description: 'User metadata (user_activity)',
|
||||
},
|
||||
series: {
|
||||
type: 'json',
|
||||
description: 'Time-series data (segmentation, active_users, revenue, realtime)',
|
||||
},
|
||||
seriesLabels: {
|
||||
type: 'json',
|
||||
description: 'Labels for each data series (segmentation, realtime, revenue)',
|
||||
},
|
||||
seriesMeta: {
|
||||
type: 'json',
|
||||
description: 'Metadata labels for data series (active_users)',
|
||||
},
|
||||
seriesCollapsed: {
|
||||
type: 'json',
|
||||
description: 'Collapsed aggregate totals per series (segmentation)',
|
||||
},
|
||||
xValues: {
|
||||
type: 'json',
|
||||
description: 'X-axis date/time values for chart data',
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -12,7 +12,7 @@ export const DatabricksBlock: BlockConfig<DatabricksResponse> = {
|
||||
'Connect to Databricks to execute SQL queries against SQL warehouses, trigger and monitor job runs, manage clusters, and retrieve run outputs. Requires a Personal Access Token and workspace host URL.',
|
||||
docsLink: 'https://docs.sim.ai/tools/databricks',
|
||||
category: 'tools',
|
||||
bgColor: '#FF3621',
|
||||
bgColor: '#F9F7F4',
|
||||
icon: DatabricksIcon,
|
||||
subBlocks: [
|
||||
{
|
||||
|
||||
86
apps/sim/blocks/blocks/google_pagespeed.ts
Normal file
86
apps/sim/blocks/blocks/google_pagespeed.ts
Normal file
@@ -0,0 +1,86 @@
|
||||
import { GooglePagespeedIcon } from '@/components/icons'
|
||||
import { AuthMode, type BlockConfig } from '@/blocks/types'
|
||||
import type { GooglePagespeedAnalyzeResponse } from '@/tools/google_pagespeed/types'
|
||||
|
||||
export const GooglePagespeedBlock: BlockConfig<GooglePagespeedAnalyzeResponse> = {
|
||||
type: 'google_pagespeed',
|
||||
name: 'Google PageSpeed',
|
||||
description: 'Analyze webpage performance with Google PageSpeed Insights',
|
||||
longDescription:
|
||||
'Analyze web pages for performance, accessibility, SEO, and best practices using Google PageSpeed Insights API powered by Lighthouse.',
|
||||
docsLink: 'https://docs.sim.ai/tools/google_pagespeed',
|
||||
category: 'tools',
|
||||
bgColor: '#E0E0E0',
|
||||
icon: GooglePagespeedIcon,
|
||||
authMode: AuthMode.ApiKey,
|
||||
|
||||
subBlocks: [
|
||||
{
|
||||
id: 'url',
|
||||
title: 'URL',
|
||||
type: 'short-input',
|
||||
required: true,
|
||||
placeholder: 'https://example.com',
|
||||
},
|
||||
{
|
||||
id: 'strategy',
|
||||
title: 'Strategy',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'Desktop', id: 'desktop' },
|
||||
{ label: 'Mobile', id: 'mobile' },
|
||||
],
|
||||
value: () => 'desktop',
|
||||
},
|
||||
{
|
||||
id: 'category',
|
||||
title: 'Categories',
|
||||
type: 'short-input',
|
||||
placeholder: 'performance, accessibility, best-practices, seo',
|
||||
mode: 'advanced',
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate a comma-separated list of Google PageSpeed Insights categories to analyze. Valid values are: performance, accessibility, best-practices, seo. Return ONLY the comma-separated list - no explanations, no extra text.',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'locale',
|
||||
title: 'Locale',
|
||||
type: 'short-input',
|
||||
placeholder: 'en',
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'apiKey',
|
||||
title: 'API Key',
|
||||
type: 'short-input',
|
||||
required: true,
|
||||
placeholder: 'Enter your Google PageSpeed API key',
|
||||
password: true,
|
||||
},
|
||||
],
|
||||
|
||||
tools: {
|
||||
access: ['google_pagespeed_analyze'],
|
||||
config: {
|
||||
tool: () => 'google_pagespeed_analyze',
|
||||
},
|
||||
},
|
||||
|
||||
inputs: {
|
||||
url: { type: 'string', description: 'URL to analyze' },
|
||||
strategy: { type: 'string', description: 'Analysis strategy (desktop or mobile)' },
|
||||
category: { type: 'string', description: 'Comma-separated categories to analyze' },
|
||||
locale: { type: 'string', description: 'Locale for results' },
|
||||
apiKey: { type: 'string', description: 'Google PageSpeed API key' },
|
||||
},
|
||||
|
||||
outputs: {
|
||||
response: {
|
||||
type: 'json',
|
||||
description:
|
||||
'PageSpeed analysis results including category scores (performanceScore, accessibilityScore, bestPracticesScore, seoScore), Core Web Vitals display values and numeric values (firstContentfulPaint, largestContentfulPaint, totalBlockingTime, cumulativeLayoutShift, speedIndex, interactive), and metadata (finalUrl, overallCategory, analysisTimestamp, lighthouseVersion)',
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -154,7 +154,9 @@ Example:
|
||||
{
|
||||
"clxf1nxlb000t0ml79ajwcsj0": true,
|
||||
"clxf2q43u00010mlh12q9ggx1": false
|
||||
}`,
|
||||
}
|
||||
|
||||
Return ONLY the JSON object - no explanations, no extra text.`,
|
||||
placeholder: 'Describe the mailing list subscriptions...',
|
||||
},
|
||||
},
|
||||
@@ -183,7 +185,9 @@ Example:
|
||||
"signupDate": "2024-01-15T00:00:00Z",
|
||||
"isActive": true,
|
||||
"seats": 5
|
||||
}`,
|
||||
}
|
||||
|
||||
Return ONLY the JSON object - no explanations, no extra text.`,
|
||||
placeholder: 'Describe the custom properties...',
|
||||
},
|
||||
},
|
||||
@@ -221,7 +225,9 @@ Example:
|
||||
"name": "John Smith",
|
||||
"confirmationUrl": "https://example.com/confirm?token=abc123",
|
||||
"expiresIn": 24
|
||||
}`,
|
||||
}
|
||||
|
||||
Return ONLY the JSON object - no explanations, no extra text.`,
|
||||
placeholder: 'Describe the template variables...',
|
||||
},
|
||||
},
|
||||
@@ -261,7 +267,9 @@ Example:
|
||||
"contentType": "application/pdf",
|
||||
"data": "JVBERi0xLjQK..."
|
||||
}
|
||||
]`,
|
||||
]
|
||||
|
||||
Return ONLY the JSON array - no explanations, no extra text.`,
|
||||
placeholder: 'Describe the attachments...',
|
||||
},
|
||||
},
|
||||
@@ -300,7 +308,9 @@ Example:
|
||||
"amount": 49.99,
|
||||
"currency": "USD",
|
||||
"isUpgrade": true
|
||||
}`,
|
||||
}
|
||||
|
||||
Return ONLY the JSON object - no explanations, no extra text.`,
|
||||
placeholder: 'Describe the event properties...',
|
||||
},
|
||||
},
|
||||
@@ -349,6 +359,7 @@ Example:
|
||||
{ label: 'Boolean', id: 'boolean' },
|
||||
{ label: 'Date', id: 'date' },
|
||||
],
|
||||
value: () => 'string',
|
||||
condition: {
|
||||
field: 'operation',
|
||||
value: 'create_contact_property',
|
||||
@@ -363,6 +374,7 @@ Example:
|
||||
{ label: 'All Properties', id: 'all' },
|
||||
{ label: 'Custom Only', id: 'custom' },
|
||||
],
|
||||
value: () => 'all',
|
||||
condition: {
|
||||
field: 'operation',
|
||||
value: 'list_contact_properties',
|
||||
@@ -497,23 +509,28 @@ Example:
|
||||
outputs: {
|
||||
success: { type: 'boolean', description: 'Whether the operation succeeded' },
|
||||
id: { type: 'string', description: 'Contact ID (create/update operations)' },
|
||||
contacts: { type: 'json', description: 'Array of matching contacts (find operation)' },
|
||||
contacts: {
|
||||
type: 'json',
|
||||
description:
|
||||
'Array of matching contacts (id, email, firstName, lastName, source, subscribed, userGroup, userId, mailingLists, optInStatus)',
|
||||
},
|
||||
message: { type: 'string', description: 'Status message (delete operation)' },
|
||||
mailingLists: {
|
||||
type: 'json',
|
||||
description: 'Array of mailing lists (list mailing lists operation)',
|
||||
description: 'Array of mailing lists (id, name, description, isPublic)',
|
||||
},
|
||||
transactionalEmails: {
|
||||
type: 'json',
|
||||
description: 'Array of transactional email templates (list transactional emails operation)',
|
||||
description: 'Array of transactional email templates (id, name, lastUpdated, dataVariables)',
|
||||
},
|
||||
pagination: {
|
||||
type: 'json',
|
||||
description: 'Pagination info (list transactional emails operation)',
|
||||
description:
|
||||
'Pagination info (totalResults, returnedResults, perPage, totalPages, nextCursor, nextPage)',
|
||||
},
|
||||
properties: {
|
||||
type: 'json',
|
||||
description: 'Array of contact properties (list contact properties operation)',
|
||||
description: 'Array of contact properties (key, label, type)',
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
482
apps/sim/blocks/blocks/pagerduty.ts
Normal file
482
apps/sim/blocks/blocks/pagerduty.ts
Normal file
@@ -0,0 +1,482 @@
|
||||
import { PagerDutyIcon } from '@/components/icons'
|
||||
import { AuthMode, type BlockConfig } from '@/blocks/types'
|
||||
|
||||
export const PagerDutyBlock: BlockConfig = {
|
||||
type: 'pagerduty',
|
||||
name: 'PagerDuty',
|
||||
description: 'Manage incidents and on-call schedules with PagerDuty',
|
||||
longDescription:
|
||||
'Integrate PagerDuty into your workflow to list, create, and update incidents, add notes, list services, and check on-call schedules.',
|
||||
docsLink: 'https://docs.sim.ai/tools/pagerduty',
|
||||
category: 'tools',
|
||||
bgColor: '#06AC38',
|
||||
icon: PagerDutyIcon,
|
||||
authMode: AuthMode.ApiKey,
|
||||
|
||||
subBlocks: [
|
||||
{
|
||||
id: 'operation',
|
||||
title: 'Operation',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'List Incidents', id: 'list_incidents' },
|
||||
{ label: 'Create Incident', id: 'create_incident' },
|
||||
{ label: 'Update Incident', id: 'update_incident' },
|
||||
{ label: 'Add Note', id: 'add_note' },
|
||||
{ label: 'List Services', id: 'list_services' },
|
||||
{ label: 'List On-Calls', id: 'list_oncalls' },
|
||||
],
|
||||
value: () => 'list_incidents',
|
||||
},
|
||||
|
||||
{
|
||||
id: 'apiKey',
|
||||
title: 'API Key',
|
||||
type: 'short-input',
|
||||
required: true,
|
||||
placeholder: 'Enter your PagerDuty REST API Key',
|
||||
password: true,
|
||||
},
|
||||
|
||||
{
|
||||
id: 'fromEmail',
|
||||
title: 'From Email',
|
||||
type: 'short-input',
|
||||
required: {
|
||||
field: 'operation',
|
||||
value: ['create_incident', 'update_incident', 'add_note'],
|
||||
},
|
||||
placeholder: 'Valid PagerDuty user email (required for write operations)',
|
||||
condition: {
|
||||
field: 'operation',
|
||||
value: ['create_incident', 'update_incident', 'add_note'],
|
||||
},
|
||||
},
|
||||
|
||||
// --- List Incidents fields ---
|
||||
{
|
||||
id: 'statuses',
|
||||
title: 'Statuses',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'All', id: '' },
|
||||
{ label: 'Triggered', id: 'triggered' },
|
||||
{ label: 'Acknowledged', id: 'acknowledged' },
|
||||
{ label: 'Resolved', id: 'resolved' },
|
||||
],
|
||||
value: () => '',
|
||||
condition: { field: 'operation', value: 'list_incidents' },
|
||||
},
|
||||
{
|
||||
id: 'listServiceIds',
|
||||
title: 'Service IDs',
|
||||
type: 'short-input',
|
||||
placeholder: 'Comma-separated service IDs to filter',
|
||||
condition: { field: 'operation', value: 'list_incidents' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'listSince',
|
||||
title: 'Since',
|
||||
type: 'short-input',
|
||||
placeholder: 'Start date (ISO 8601, e.g., 2024-01-01T00:00:00Z)',
|
||||
condition: { field: 'operation', value: 'list_incidents' },
|
||||
mode: 'advanced',
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate an ISO 8601 timestamp. Return ONLY the timestamp string - no explanations, no extra text.',
|
||||
generationType: 'timestamp',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'listUntil',
|
||||
title: 'Until',
|
||||
type: 'short-input',
|
||||
placeholder: 'End date (ISO 8601, e.g., 2024-12-31T23:59:59Z)',
|
||||
condition: { field: 'operation', value: 'list_incidents' },
|
||||
mode: 'advanced',
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate an ISO 8601 timestamp. Return ONLY the timestamp string - no explanations, no extra text.',
|
||||
generationType: 'timestamp',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'listSortBy',
|
||||
title: 'Sort By',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'Created At (newest)', id: 'created_at:desc' },
|
||||
{ label: 'Created At (oldest)', id: 'created_at:asc' },
|
||||
],
|
||||
value: () => 'created_at:desc',
|
||||
condition: { field: 'operation', value: 'list_incidents' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'listLimit',
|
||||
title: 'Limit',
|
||||
type: 'short-input',
|
||||
placeholder: '25',
|
||||
condition: { field: 'operation', value: 'list_incidents' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
|
||||
// --- Create Incident fields ---
|
||||
{
|
||||
id: 'title',
|
||||
title: 'Title',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'create_incident' },
|
||||
placeholder: 'Incident title/summary',
|
||||
condition: { field: 'operation', value: 'create_incident' },
|
||||
},
|
||||
{
|
||||
id: 'createServiceId',
|
||||
title: 'Service ID',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'create_incident' },
|
||||
placeholder: 'PagerDuty service ID',
|
||||
condition: { field: 'operation', value: 'create_incident' },
|
||||
},
|
||||
{
|
||||
id: 'createUrgency',
|
||||
title: 'Urgency',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'High', id: 'high' },
|
||||
{ label: 'Low', id: 'low' },
|
||||
],
|
||||
value: () => 'high',
|
||||
condition: { field: 'operation', value: 'create_incident' },
|
||||
},
|
||||
{
|
||||
id: 'body',
|
||||
title: 'Description',
|
||||
type: 'long-input',
|
||||
placeholder: 'Detailed description of the incident',
|
||||
condition: { field: 'operation', value: 'create_incident' },
|
||||
},
|
||||
{
|
||||
id: 'escalationPolicyId',
|
||||
title: 'Escalation Policy ID',
|
||||
type: 'short-input',
|
||||
placeholder: 'Escalation policy ID (optional)',
|
||||
condition: { field: 'operation', value: 'create_incident' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'assigneeId',
|
||||
title: 'Assignee User ID',
|
||||
type: 'short-input',
|
||||
placeholder: 'User ID to assign (optional)',
|
||||
condition: { field: 'operation', value: 'create_incident' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
|
||||
// --- Update Incident fields ---
|
||||
{
|
||||
id: 'updateIncidentId',
|
||||
title: 'Incident ID',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'update_incident' },
|
||||
placeholder: 'ID of the incident to update',
|
||||
condition: { field: 'operation', value: 'update_incident' },
|
||||
},
|
||||
{
|
||||
id: 'updateStatus',
|
||||
title: 'Status',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'No Change', id: '' },
|
||||
{ label: 'Acknowledged', id: 'acknowledged' },
|
||||
{ label: 'Resolved', id: 'resolved' },
|
||||
],
|
||||
value: () => '',
|
||||
condition: { field: 'operation', value: 'update_incident' },
|
||||
},
|
||||
{
|
||||
id: 'updateTitle',
|
||||
title: 'New Title',
|
||||
type: 'short-input',
|
||||
placeholder: 'New incident title (optional)',
|
||||
condition: { field: 'operation', value: 'update_incident' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'updateUrgency',
|
||||
title: 'Urgency',
|
||||
type: 'dropdown',
|
||||
options: [
|
||||
{ label: 'No Change', id: '' },
|
||||
{ label: 'High', id: 'high' },
|
||||
{ label: 'Low', id: 'low' },
|
||||
],
|
||||
value: () => '',
|
||||
condition: { field: 'operation', value: 'update_incident' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'updateEscalationLevel',
|
||||
title: 'Escalation Level',
|
||||
type: 'short-input',
|
||||
placeholder: 'Escalation level number (e.g., 2)',
|
||||
condition: { field: 'operation', value: 'update_incident' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
// --- Add Note fields ---
|
||||
{
|
||||
id: 'noteIncidentId',
|
||||
title: 'Incident ID',
|
||||
type: 'short-input',
|
||||
required: { field: 'operation', value: 'add_note' },
|
||||
placeholder: 'ID of the incident',
|
||||
condition: { field: 'operation', value: 'add_note' },
|
||||
},
|
||||
{
|
||||
id: 'noteContent',
|
||||
title: 'Note Content',
|
||||
type: 'long-input',
|
||||
required: { field: 'operation', value: 'add_note' },
|
||||
placeholder: 'Note text to add to the incident',
|
||||
condition: { field: 'operation', value: 'add_note' },
|
||||
},
|
||||
|
||||
// --- List Services fields ---
|
||||
{
|
||||
id: 'serviceQuery',
|
||||
title: 'Search Query',
|
||||
type: 'short-input',
|
||||
placeholder: 'Filter services by name',
|
||||
condition: { field: 'operation', value: 'list_services' },
|
||||
},
|
||||
{
|
||||
id: 'serviceLimit',
|
||||
title: 'Limit',
|
||||
type: 'short-input',
|
||||
placeholder: '25',
|
||||
condition: { field: 'operation', value: 'list_services' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
|
||||
// --- List On-Calls fields ---
|
||||
{
|
||||
id: 'oncallEscalationPolicyIds',
|
||||
title: 'Escalation Policy IDs',
|
||||
type: 'short-input',
|
||||
placeholder: 'Comma-separated escalation policy IDs',
|
||||
condition: { field: 'operation', value: 'list_oncalls' },
|
||||
},
|
||||
{
|
||||
id: 'oncallScheduleIds',
|
||||
title: 'Schedule IDs',
|
||||
type: 'short-input',
|
||||
placeholder: 'Comma-separated schedule IDs',
|
||||
condition: { field: 'operation', value: 'list_oncalls' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'oncallLimit',
|
||||
title: 'Limit',
|
||||
type: 'short-input',
|
||||
placeholder: '25',
|
||||
condition: { field: 'operation', value: 'list_oncalls' },
|
||||
mode: 'advanced',
|
||||
},
|
||||
{
|
||||
id: 'oncallSince',
|
||||
title: 'Since',
|
||||
type: 'short-input',
|
||||
placeholder: 'Start time (ISO 8601)',
|
||||
condition: { field: 'operation', value: 'list_oncalls' },
|
||||
mode: 'advanced',
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate an ISO 8601 timestamp. Return ONLY the timestamp string - no explanations, no extra text.',
|
||||
generationType: 'timestamp',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'oncallUntil',
|
||||
title: 'Until',
|
||||
type: 'short-input',
|
||||
placeholder: 'End time (ISO 8601)',
|
||||
condition: { field: 'operation', value: 'list_oncalls' },
|
||||
mode: 'advanced',
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
prompt:
|
||||
'Generate an ISO 8601 timestamp. Return ONLY the timestamp string - no explanations, no extra text.',
|
||||
generationType: 'timestamp',
|
||||
},
|
||||
},
|
||||
],
|
||||
|
||||
tools: {
|
||||
access: [
|
||||
'pagerduty_list_incidents',
|
||||
'pagerduty_create_incident',
|
||||
'pagerduty_update_incident',
|
||||
'pagerduty_add_note',
|
||||
'pagerduty_list_services',
|
||||
'pagerduty_list_oncalls',
|
||||
],
|
||||
config: {
|
||||
tool: (params) => `pagerduty_${params.operation}`,
|
||||
params: (params) => {
|
||||
const result: Record<string, unknown> = {}
|
||||
|
||||
switch (params.operation) {
|
||||
case 'list_incidents':
|
||||
if (params.statuses) result.statuses = params.statuses
|
||||
if (params.listServiceIds) result.serviceIds = params.listServiceIds
|
||||
if (params.listSince) result.since = params.listSince
|
||||
if (params.listUntil) result.until = params.listUntil
|
||||
if (params.listSortBy) result.sortBy = params.listSortBy
|
||||
if (params.listLimit) result.limit = params.listLimit
|
||||
break
|
||||
|
||||
case 'create_incident':
|
||||
if (params.createServiceId) result.serviceId = params.createServiceId
|
||||
if (params.createUrgency) result.urgency = params.createUrgency
|
||||
break
|
||||
|
||||
case 'update_incident':
|
||||
if (params.updateIncidentId) result.incidentId = params.updateIncidentId
|
||||
if (params.updateStatus) result.status = params.updateStatus
|
||||
if (params.updateTitle) result.title = params.updateTitle
|
||||
if (params.updateUrgency) result.urgency = params.updateUrgency
|
||||
if (params.updateEscalationLevel) result.escalationLevel = params.updateEscalationLevel
|
||||
break
|
||||
|
||||
case 'add_note':
|
||||
if (params.noteIncidentId) result.incidentId = params.noteIncidentId
|
||||
if (params.noteContent) result.content = params.noteContent
|
||||
break
|
||||
|
||||
case 'list_services':
|
||||
if (params.serviceQuery) result.query = params.serviceQuery
|
||||
if (params.serviceLimit) result.limit = params.serviceLimit
|
||||
break
|
||||
|
||||
case 'list_oncalls':
|
||||
if (params.oncallEscalationPolicyIds)
|
||||
result.escalationPolicyIds = params.oncallEscalationPolicyIds
|
||||
if (params.oncallScheduleIds) result.scheduleIds = params.oncallScheduleIds
|
||||
if (params.oncallSince) result.since = params.oncallSince
|
||||
if (params.oncallUntil) result.until = params.oncallUntil
|
||||
if (params.oncallLimit) result.limit = params.oncallLimit
|
||||
break
|
||||
}
|
||||
|
||||
return result
|
||||
},
|
||||
},
|
||||
},
|
||||
|
||||
inputs: {
|
||||
operation: { type: 'string', description: 'Operation to perform' },
|
||||
apiKey: { type: 'string', description: 'PagerDuty REST API Key' },
|
||||
fromEmail: { type: 'string', description: 'Valid PagerDuty user email' },
|
||||
statuses: { type: 'string', description: 'Status filter for incidents' },
|
||||
listServiceIds: { type: 'string', description: 'Service IDs filter' },
|
||||
listSince: { type: 'string', description: 'Start date filter' },
|
||||
listUntil: { type: 'string', description: 'End date filter' },
|
||||
title: { type: 'string', description: 'Incident title' },
|
||||
createServiceId: { type: 'string', description: 'Service ID for new incident' },
|
||||
createUrgency: { type: 'string', description: 'Urgency level' },
|
||||
body: { type: 'string', description: 'Incident description' },
|
||||
updateIncidentId: { type: 'string', description: 'Incident ID to update' },
|
||||
updateStatus: { type: 'string', description: 'New status' },
|
||||
noteIncidentId: { type: 'string', description: 'Incident ID for note' },
|
||||
noteContent: { type: 'string', description: 'Note content' },
|
||||
escalationPolicyId: { type: 'string', description: 'Escalation policy ID' },
|
||||
assigneeId: { type: 'string', description: 'Assignee user ID' },
|
||||
updateTitle: { type: 'string', description: 'New incident title' },
|
||||
updateUrgency: { type: 'string', description: 'New urgency level' },
|
||||
updateEscalationLevel: { type: 'string', description: 'Escalation level number' },
|
||||
listSortBy: { type: 'string', description: 'Sort field' },
|
||||
listLimit: { type: 'string', description: 'Max results for incidents' },
|
||||
serviceQuery: { type: 'string', description: 'Service name filter' },
|
||||
serviceLimit: { type: 'string', description: 'Max results for services' },
|
||||
oncallEscalationPolicyIds: { type: 'string', description: 'Escalation policy IDs filter' },
|
||||
oncallScheduleIds: { type: 'string', description: 'Schedule IDs filter' },
|
||||
oncallSince: { type: 'string', description: 'On-call start time filter' },
|
||||
oncallUntil: { type: 'string', description: 'On-call end time filter' },
|
||||
oncallLimit: { type: 'string', description: 'Max results for on-calls' },
|
||||
},
|
||||
|
||||
outputs: {
|
||||
incidents: {
|
||||
type: 'json',
|
||||
description: 'Array of incidents (list_incidents)',
|
||||
},
|
||||
total: {
|
||||
type: 'number',
|
||||
description: 'Total count of results',
|
||||
},
|
||||
more: {
|
||||
type: 'boolean',
|
||||
description: 'Whether more results are available',
|
||||
},
|
||||
id: {
|
||||
type: 'string',
|
||||
description: 'Created/updated resource ID',
|
||||
},
|
||||
incidentNumber: {
|
||||
type: 'number',
|
||||
description: 'Incident number',
|
||||
},
|
||||
title: {
|
||||
type: 'string',
|
||||
description: 'Incident title',
|
||||
},
|
||||
status: {
|
||||
type: 'string',
|
||||
description: 'Incident status',
|
||||
},
|
||||
urgency: {
|
||||
type: 'string',
|
||||
description: 'Incident urgency',
|
||||
},
|
||||
createdAt: {
|
||||
type: 'string',
|
||||
description: 'Creation timestamp',
|
||||
},
|
||||
updatedAt: {
|
||||
type: 'string',
|
||||
description: 'Last updated timestamp',
|
||||
},
|
||||
serviceName: {
|
||||
type: 'string',
|
||||
description: 'Service name',
|
||||
},
|
||||
serviceId: {
|
||||
type: 'string',
|
||||
description: 'Service ID',
|
||||
},
|
||||
htmlUrl: {
|
||||
type: 'string',
|
||||
description: 'PagerDuty web URL',
|
||||
},
|
||||
content: {
|
||||
type: 'string',
|
||||
description: 'Note content (add_note)',
|
||||
},
|
||||
userName: {
|
||||
type: 'string',
|
||||
description: 'User name (add_note)',
|
||||
},
|
||||
services: {
|
||||
type: 'json',
|
||||
description: 'Array of services (list_services)',
|
||||
},
|
||||
oncalls: {
|
||||
type: 'json',
|
||||
description: 'Array of on-call entries (list_oncalls)',
|
||||
},
|
||||
},
|
||||
}
|
||||
@@ -4,6 +4,7 @@ import { AhrefsBlock } from '@/blocks/blocks/ahrefs'
|
||||
import { AirtableBlock } from '@/blocks/blocks/airtable'
|
||||
import { AirweaveBlock } from '@/blocks/blocks/airweave'
|
||||
import { AlgoliaBlock } from '@/blocks/blocks/algolia'
|
||||
import { AmplitudeBlock } from '@/blocks/blocks/amplitude'
|
||||
import { ApiBlock } from '@/blocks/blocks/api'
|
||||
import { ApiTriggerBlock } from '@/blocks/blocks/api_trigger'
|
||||
import { ApifyBlock } from '@/blocks/blocks/apify'
|
||||
@@ -56,6 +57,7 @@ import { GoogleDriveBlock } from '@/blocks/blocks/google_drive'
|
||||
import { GoogleFormsBlock } from '@/blocks/blocks/google_forms'
|
||||
import { GoogleGroupsBlock } from '@/blocks/blocks/google_groups'
|
||||
import { GoogleMapsBlock } from '@/blocks/blocks/google_maps'
|
||||
import { GooglePagespeedBlock } from '@/blocks/blocks/google_pagespeed'
|
||||
import { GoogleSheetsBlock, GoogleSheetsV2Block } from '@/blocks/blocks/google_sheets'
|
||||
import { GoogleSlidesBlock, GoogleSlidesV2Block } from '@/blocks/blocks/google_slides'
|
||||
import { GoogleTasksBlock } from '@/blocks/blocks/google_tasks'
|
||||
@@ -112,6 +114,7 @@ import { OneDriveBlock } from '@/blocks/blocks/onedrive'
|
||||
import { OnePasswordBlock } from '@/blocks/blocks/onepassword'
|
||||
import { OpenAIBlock } from '@/blocks/blocks/openai'
|
||||
import { OutlookBlock } from '@/blocks/blocks/outlook'
|
||||
import { PagerDutyBlock } from '@/blocks/blocks/pagerduty'
|
||||
import { ParallelBlock } from '@/blocks/blocks/parallel'
|
||||
import { PerplexityBlock } from '@/blocks/blocks/perplexity'
|
||||
import { PineconeBlock } from '@/blocks/blocks/pinecone'
|
||||
@@ -193,6 +196,7 @@ export const registry: Record<string, BlockConfig> = {
|
||||
airtable: AirtableBlock,
|
||||
airweave: AirweaveBlock,
|
||||
algolia: AlgoliaBlock,
|
||||
amplitude: AmplitudeBlock,
|
||||
api: ApiBlock,
|
||||
api_trigger: ApiTriggerBlock,
|
||||
apify: ApifyBlock,
|
||||
@@ -250,6 +254,7 @@ export const registry: Record<string, BlockConfig> = {
|
||||
google_forms: GoogleFormsBlock,
|
||||
google_groups: GoogleGroupsBlock,
|
||||
google_maps: GoogleMapsBlock,
|
||||
google_pagespeed: GooglePagespeedBlock,
|
||||
google_tasks: GoogleTasksBlock,
|
||||
google_translate: GoogleTranslateBlock,
|
||||
gong: GongBlock,
|
||||
@@ -313,6 +318,7 @@ export const registry: Record<string, BlockConfig> = {
|
||||
onedrive: OneDriveBlock,
|
||||
openai: OpenAIBlock,
|
||||
outlook: OutlookBlock,
|
||||
pagerduty: PagerDutyBlock,
|
||||
parallel_ai: ParallelBlock,
|
||||
perplexity: PerplexityBlock,
|
||||
pinecone: PineconeBlock,
|
||||
|
||||
@@ -1209,6 +1209,17 @@ export function AlgoliaIcon(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function AmplitudeIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 49 49'>
|
||||
<path
|
||||
fill='#FFFFFF'
|
||||
d='M23.4,15.3c0.6,1.8,1.2,4.1,1.9,6.7c-2.6,0-5.3-0.1-7.8-0.1h-1.3c1.5-5.7,3.2-10.1,4.6-11.1 c0.1-0.1,0.2-0.1,0.4-0.1c0.2,0,0.3,0.1,0.5,0.3C21.9,11.5,22.5,12.7,23.4,15.3z M49,24.5C49,38,38,49,24.5,49S0,38,0,24.5 S11,0,24.5,0S49,11,49,24.5z M42.7,23.9c0-0.6-0.4-1.2-1-1.3l0,0l0,0l0,0c-0.1,0-0.1,0-0.2,0h-0.2c-4.1-0.3-8.4-0.4-12.4-0.5l0,0 C27,14.8,24.5,7.4,21.3,7.4c-3,0-5.8,4.9-8.2,14.5c-1.7,0-3.2,0-4.6-0.1c-0.1,0-0.2,0-0.2,0c-0.3,0-0.5,0-0.5,0 c-0.8,0.1-1.4,0.9-1.4,1.7c0,0.8,0.6,1.6,1.5,1.7l0,0h4.6c-0.4,1.9-0.8,3.8-1.1,5.6l-0.1,0.8l0,0c0,0.6,0.5,1.1,1.1,1.1 c0.4,0,0.8-0.2,1-0.5l0,0l2.2-7.1h10.7c0.8,3.1,1.7,6.3,2.8,9.3c0.6,1.6,2,5.4,4.4,5.4l0,0c3.6,0,5-5.8,5.9-9.6 c0.2-0.8,0.4-1.5,0.5-2.1l0.1-0.2l0,0c0-0.1,0-0.2,0-0.3c-0.1-0.2-0.2-0.3-0.4-0.4c-0.3-0.1-0.5,0.1-0.6,0.4l0,0l-0.1,0.2 c-0.3,0.8-0.6,1.6-0.8,2.3v0.1c-1.6,4.4-2.3,6.4-3.7,6.4l0,0l0,0l0,0c-1.8,0-3.5-7.3-4.1-10.1c-0.1-0.5-0.2-0.9-0.3-1.3h11.7 c0.2,0,0.4-0.1,0.6-0.1l0,0c0,0,0,0,0.1,0c0,0,0,0,0.1,0l0,0c0,0,0.1,0,0.1-0.1l0,0C42.5,24.6,42.7,24.3,42.7,23.9z'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function GoogleBooksIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 478.633 540.068'>
|
||||
@@ -1938,13 +1949,11 @@ export function ElevenLabsIcon(props: SVGProps<SVGSVGElement>) {
|
||||
|
||||
export function LinkupIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 24 24'>
|
||||
<g transform='translate(12, 12) scale(1.3) translate(-12, -12)'>
|
||||
<path
|
||||
d='M20.2 14.1c-.4-.3-1.6-.4-2.9-.2.5-1.4 1.3-3.9.1-5-.6-.5-1.5-.7-2.6-.5-.3 0-.6.1-1 .2-1.1-1.6-2.4-2.5-3.8-2.5-1.6 0-3.1 1-4.1 2.9-1.2 2.1-1.9 5.1-1.9 8.8v.03l.4.3c3-.9 7.5-2.3 10.7-2.9 0 .9.1 1.9.1 2.8v.03l.4.3c.1 0 5.4-1.7 5.3-3.3 0-.2-.1-.5-.3-.7zM19.9 14.7c.03.4-1.7 1.4-4 2.3.5-.7 1-1.6 1.3-2.5 1.4-.1 2.4-.1 2.7.2zM16.4 14.6c-.3.7-.7 1.4-1.2 2-.02-.6-.1-1.2-.2-1.8.4-.1.9-.1 1.4-.2zM16.5 9.4c.8.7.9 2.4.1 5.1-.5.1-1 .1-1.5.2-.3-2-.9-3.8-1.7-5.3.3-.1.6-.2.8-.2.9-.1 1.7.05 2.3.2zM9.5 6.8c1.2 0 2.3.7 3.2 2.1-2.8 1.1-5.9 3.4-8.4 7.8.2-5.1 1.9-9.9 5.2-9.9zM4.7 17c3.4-4.9 6.4-6.8 8.4-7.8.7 1.3 1.2 2.9 1.5 4.8-3.2.6-7.3 1.8-9.9 3z'
|
||||
fill='#000000'
|
||||
/>
|
||||
</g>
|
||||
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 154 107' fill='none'>
|
||||
<path
|
||||
d='M150.677 72.7113C146.612 70.2493 137.909 69.542 124.794 70.6076C128.992 57.6776 133.757 35.3911 121.323 25.1527C115.886 20.6743 107.471 19.0437 97.6162 20.5594C94.6758 21.0142 91.5752 21.7445 88.3878 22.732C78.8667 8.28165 66.2954 0 53.8613 0C39.4288 0 26.1304 9.3381 16.4081 26.2872C5.67515 45.014 0 71.9626 0 104.23V104.533L3.60356 106.94L3.88251 106.825C30.5754 95.5628 67.5759 85.0718 100.593 79.4037C101.604 87.644 102.116 95.9945 102.116 104.235V104.52L105.491 107L105.761 106.913C106.255 106.752 155.159 90.8822 153.979 77.5894C153.856 76.2022 153.183 74.2271 150.677 72.7113ZM148.409 78.09C148.715 81.5442 133.236 91.0568 111.838 98.8883C115.968 92.0995 119.818 84.1715 122.777 76.3584C135.659 75.1411 144.531 75.5545 147.792 77.5296C148.377 77.8833 148.409 78.09 148.409 78.09ZM116.668 77.0106C114.084 83.3769 110.951 89.6329 107.54 95.2458C107.334 89.5135 106.913 83.8821 106.296 78.4621C109.922 77.8971 113.407 77.4102 116.668 77.0106ZM117.774 29.4979C125.379 35.7585 125.782 51.3205 118.867 71.1772C114.747 71.6319 110.284 72.2382 105.596 72.9777C103.049 55.1742 98.2839 39.966 91.4243 27.7525C94.566 26.8155 96.9669 26.3469 98.4622 26.1127C106.721 24.8404 113.581 26.0438 117.774 29.4979ZM53.8567 5.62215C65.0561 5.62215 74.8882 12.0022 83.0922 24.5923C57.7027 34.5413 30.3193 59.4092 5.78032 94.8003C7.43119 51.4813 23.0299 5.62215 53.8613 5.62215M10.1933 98.2406C40.7504 53.9341 68.2024 36.4429 86.0739 29.5852C92.4487 41.2383 97.2046 56.5522 99.8433 73.9331C70.5209 79.0316 35.6377 88.4983 10.1933 98.2406Z'
|
||||
fill='#000000'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
@@ -2453,6 +2462,17 @@ export function OutlookIcon(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function PagerDutyIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 64 64' fill='none'>
|
||||
<path
|
||||
d='M6.704 59.217H0v-33.65c0-3.455 1.418-5.544 2.604-6.704 2.63-2.58 6.2-2.656 6.782-2.656h10.546c3.765 0 5.93 1.52 7.117 2.8 2.346 2.553 2.372 5.853 2.32 6.73v12.687c0 3.662-1.496 5.828-2.733 6.988-2.553 2.398-5.93 2.45-6.73 2.424H6.704zm13.46-18.102c.36 0 1.367-.103 1.908-.62.413-.387.62-1.083.62-2.1v-13.02c0-.36-.077-1.315-.593-1.857-.5-.516-1.444-.62-2.166-.62h-10.6c-2.63 0-2.63 1.985-2.63 2.656v15.55zM57.296 4.783H64V38.46c0 3.455-1.418 5.544-2.604 6.704-2.63 2.58-6.2 2.656-6.782 2.656H44.068c-3.765 0-5.93-1.52-7.117-2.8-2.346-2.553-2.372-5.853-2.32-6.73V25.62c0-3.662 1.496-5.828 2.733-6.988 2.553-2.398 5.93-2.45 6.73-2.424h13.202zM43.836 22.9c-.36 0-1.367.103-1.908.62-.413.387-.62 1.083-.62 2.1v13.02c0 .36.077 1.315.593 1.857.5.516 1.444.62 2.166.62h10.598c2.656-.026 2.656-2 2.656-2.682V22.9z'
|
||||
fill='#06AC38'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function MicrosoftExcelIcon(props: SVGProps<SVGSVGElement>) {
|
||||
const id = useId()
|
||||
const gradientId = `excel_gradient_${id}`
|
||||
@@ -3996,10 +4016,10 @@ export function IntercomIcon(props: SVGProps<SVGSVGElement>) {
|
||||
|
||||
export function LoopsIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} viewBox='0 0 256 256' fill='none' xmlns='http://www.w3.org/2000/svg'>
|
||||
<svg {...props} viewBox='0 0 214 186' fill='none' xmlns='http://www.w3.org/2000/svg'>
|
||||
<path
|
||||
fill='currentColor'
|
||||
d='M192.352 88.042c0-7.012-5.685-12.697-12.697-12.697s-12.697 5.685-12.697 12.697c0 .634.052 1.255.142 1.866a25.248 25.248 0 0 0-4.9-.49c-14.006 0-25.36 11.354-25.36 25.36 0 1.63.16 3.222.456 4.765a37.8 37.8 0 0 0-9.296-1.173c-20.95 0-37.935 16.985-37.935 37.935S107.05 194.24 128 194.24s37.935-16.985 37.935-37.935a37.7 37.7 0 0 0-3.78-16.555 25.2 25.2 0 0 0 12.487-3.336 25.2 25.2 0 0 0 4.558 3.336v.02c14.006 0 25.36-11.354 25.36-25.36 0-12.48-9.018-22.855-20.888-24.996a12.6 12.6 0 0 0 8.68-11.972m-77.05 68.263c0-7.012 5.685-12.697 12.697-12.697s12.697 5.685 12.697 12.697c0 7.013-5.685 12.697-12.697 12.697s-12.697-5.685-12.697-12.697'
|
||||
d='M122.19,0 H90.27 C40.51,0 0,39.88 0,92.95 C0,141.07 38.93,183.77 90.27,183.77 H122.19 C172.61,183.77 213.31,142.82 213.31,92.95 C213.31,43.29 173.09,0 122.19,0 Z M10.82,92.54 C10.82,50.19 45.91,11.49 91.96,11.49 C138.73,11.49 172.69,50.33 172.69,92.13 C172.69,117.76 154.06,139.09 129.02,143.31 C145.16,131.15 155.48,112.73 155.48,92.4 C155.48,59.09 127.44,28.82 92.37,28.82 C57.23,28.82 28.51,57.23 28.51,92.91 C28.51,122.63 43.61,151.08 69.99,168.21 L71.74,169.33 C35.99,161.39 10.82,130.11 10.82,92.54 Z M106.33,42.76 C128.88,50.19 143.91,68.92 143.91,92.26 C143.91,114.23 128.68,134.63 106.12,141.71 C105.44,141.96 105.17,141.96 105.17,141.96 C83.91,135.76 69.29,116.38 69.29,92.71 C69.29,69.91 83.71,50.33 106.33,42.76 Z M120.91,172.13 C76.11,172.13 40.09,137.21 40.09,93.32 C40.09,67.03 57.17,46.11 83.98,41.33 C67.04,53.83 57.3,71.71 57.3,92.71 C57.3,125.75 82.94,155.33 120.77,155.33 C155.01,155.33 184.31,125.2 184.31,92.47 C184.31,62.34 169.96,34.06 141.92,14.55 L141.65,14.34 C175.81,23.68 202.26,54.11 202.26,92.81 C202.26,135.69 166.38,172.13 120.91,172.13 Z'
|
||||
fill='#FB5001'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
@@ -4542,7 +4562,7 @@ export function DatabricksIcon(props: SVGProps<SVGSVGElement>) {
|
||||
<svg {...props} viewBox='0 0 241 266' fill='none' xmlns='http://www.w3.org/2000/svg'>
|
||||
<path
|
||||
d='M228.085 109.654L120.615 171.674L5.53493 105.41L0 108.475V156.582L120.615 225.911L228.085 164.128V189.596L120.615 251.615L5.53493 185.351L0 188.417V196.67L120.615 266L241 196.67V148.564L235.465 145.498L120.615 211.527L12.9148 149.743V124.275L120.615 186.059L241 116.729V69.3298L235.004 65.7925L120.615 131.585L18.4498 73.1028L120.615 14.3848L204.562 62.7269L211.942 58.4823V52.5869L120.615 0L0 69.3298V76.8759L120.615 146.206L228.085 84.1862V109.654Z'
|
||||
fill='#F9F7F4'
|
||||
fill='#FF3621'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
@@ -5578,6 +5598,35 @@ export function GoogleMapsIcon(props: SVGProps<SVGSVGElement>) {
|
||||
)
|
||||
}
|
||||
|
||||
export function GooglePagespeedIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} viewBox='-1.74 -1.81 285.55 266.85' xmlns='http://www.w3.org/2000/svg'>
|
||||
<path
|
||||
d='M272.73 37.23v179.68a18.58 18.58 0 0 1-18.57 18.59H18.65A18.58 18.58 0 0 1 .06 216.94V37.23z'
|
||||
fill='#e1e1e1'
|
||||
/>
|
||||
<path
|
||||
d='M18.65 0h235.5a18.58 18.58 0 0 1 18.58 18.56v18.67H.07V18.59A18.58 18.58 0 0 1 18.64 0z'
|
||||
fill='#c2c2c2'
|
||||
/>
|
||||
<path
|
||||
d='M136.3 92.96a99 99 0 0 0-99 99v.13c0 2.08-.12 4.64 0 6.2h43.25a54.87 54.87 0 0 1 0-6.2 55.81 55.81 0 0 1 85.06-47.45l31.12-31.12a98.76 98.76 0 0 0-60.44-20.57z'
|
||||
fill='#4285f4'
|
||||
/>
|
||||
<path
|
||||
d='M196.73 113.46l-31.14 31.14a55.74 55.74 0 0 1 26.56 47.45 54.87 54.87 0 0 1 0 6.2h43.39c.12-1.48 0-4.12 0-6.2a99 99 0 0 0-38.81-78.59z'
|
||||
fill='#f44336'
|
||||
/>
|
||||
<circle cx='24.85' cy='18.59' fill='#eee' r='6.2' />
|
||||
<circle cx='49.65' cy='18.59' fill='#eee' r='6.2' />
|
||||
<path
|
||||
d='M197.01 117.23a3.05 3.05 0 0 0 .59-1.81 3.11 3.11 0 0 0-3.1-3.1 3 3 0 0 0-1.91.68l-67.56 52a18.58 18.58 0 1 0 27.24 24.33l44.73-72.1z'
|
||||
fill='#9e9e9e'
|
||||
/>
|
||||
</svg>
|
||||
)
|
||||
}
|
||||
|
||||
export function GoogleTranslateIcon(props: SVGProps<SVGSVGElement>) {
|
||||
return (
|
||||
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 998.1 998.3'>
|
||||
|
||||
@@ -1,3 +1,4 @@
|
||||
import { useCallback } from 'react'
|
||||
import { createLogger } from '@sim/logger'
|
||||
import { useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
|
||||
import type { WorkflowDeploymentVersionResponse } from '@/lib/workflows/persistence/utils'
|
||||
@@ -209,6 +210,13 @@ export function useChatDeploymentInfo(workflowId: string | null, options?: { ena
|
||||
enabled: Boolean(chatId) && statusQuery.isSuccess && (options?.enabled ?? true),
|
||||
})
|
||||
|
||||
const refetch = useCallback(async () => {
|
||||
const statusResult = await statusQuery.refetch()
|
||||
if (statusResult.data?.deployment?.id) {
|
||||
await detailQuery.refetch()
|
||||
}
|
||||
}, [statusQuery.refetch, detailQuery.refetch])
|
||||
|
||||
return {
|
||||
isLoading:
|
||||
statusQuery.isLoading || Boolean(statusQuery.data?.isDeployed && detailQuery.isLoading),
|
||||
@@ -216,12 +224,7 @@ export function useChatDeploymentInfo(workflowId: string | null, options?: { ena
|
||||
error: statusQuery.error ?? detailQuery.error,
|
||||
chatExists: statusQuery.data?.isDeployed ?? false,
|
||||
existingChat: detailQuery.data ?? null,
|
||||
refetch: async () => {
|
||||
await statusQuery.refetch()
|
||||
if (statusQuery.data?.deployment?.id) {
|
||||
await detailQuery.refetch()
|
||||
}
|
||||
},
|
||||
refetch,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -12,7 +12,7 @@ import {
|
||||
getActiveSSEConnectionsByRoute,
|
||||
} from '@/lib/monitoring/sse-connections'
|
||||
|
||||
const logger = createLogger('MemoryTelemetry')
|
||||
const logger = createLogger('MemoryTelemetry', { logLevel: 'INFO' })
|
||||
|
||||
const MB = 1024 * 1024
|
||||
|
||||
|
||||
134
apps/sim/tools/amplitude/event_segmentation.ts
Normal file
134
apps/sim/tools/amplitude/event_segmentation.ts
Normal file
@@ -0,0 +1,134 @@
|
||||
import type {
|
||||
AmplitudeEventSegmentationParams,
|
||||
AmplitudeEventSegmentationResponse,
|
||||
} from '@/tools/amplitude/types'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const eventSegmentationTool: ToolConfig<
|
||||
AmplitudeEventSegmentationParams,
|
||||
AmplitudeEventSegmentationResponse
|
||||
> = {
|
||||
id: 'amplitude_event_segmentation',
|
||||
name: 'Amplitude Event Segmentation',
|
||||
description:
|
||||
'Query event analytics data with segmentation. Get event counts, uniques, averages, and more.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude API Key',
|
||||
},
|
||||
secretKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude Secret Key',
|
||||
},
|
||||
eventType: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Event type name to analyze',
|
||||
},
|
||||
start: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Start date in YYYYMMDD format',
|
||||
},
|
||||
end: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'End date in YYYYMMDD format',
|
||||
},
|
||||
metric: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description:
|
||||
'Metric type: uniques, totals, pct_dau, average, histogram, sums, value_avg, or formula (default: uniques)',
|
||||
},
|
||||
interval: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Time interval: 1 (daily), 7 (weekly), or 30 (monthly)',
|
||||
},
|
||||
groupBy: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Property name to group by (prefix custom user properties with "gp:")',
|
||||
},
|
||||
limit: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Maximum number of group-by values (max 1000)',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: (params) => {
|
||||
const url = new URL('https://amplitude.com/api/2/events/segmentation')
|
||||
const eventObj = JSON.stringify({ event_type: params.eventType })
|
||||
url.searchParams.set('e', eventObj)
|
||||
url.searchParams.set('start', params.start)
|
||||
url.searchParams.set('end', params.end)
|
||||
if (params.metric) url.searchParams.set('m', params.metric)
|
||||
if (params.interval) url.searchParams.set('i', params.interval)
|
||||
if (params.groupBy) url.searchParams.set('g', params.groupBy)
|
||||
if (params.limit) url.searchParams.set('limit', params.limit)
|
||||
return url.toString()
|
||||
},
|
||||
method: 'GET',
|
||||
headers: (params) => ({
|
||||
Authorization: `Basic ${btoa(`${params.apiKey}:${params.secretKey}`)}`,
|
||||
}),
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error || `Amplitude Event Segmentation API error: ${response.status}`)
|
||||
}
|
||||
|
||||
const result = data.data ?? {}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
series: result.series ?? [],
|
||||
seriesLabels: result.seriesLabels ?? [],
|
||||
seriesCollapsed: result.seriesCollapsed ?? [],
|
||||
xValues: result.xValues ?? [],
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
series: {
|
||||
type: 'json',
|
||||
description: 'Time-series data arrays indexed by series',
|
||||
},
|
||||
seriesLabels: {
|
||||
type: 'array',
|
||||
description: 'Labels for each data series',
|
||||
items: { type: 'string' },
|
||||
},
|
||||
seriesCollapsed: {
|
||||
type: 'json',
|
||||
description: 'Collapsed aggregate totals per series',
|
||||
},
|
||||
xValues: {
|
||||
type: 'array',
|
||||
description: 'Date values for the x-axis',
|
||||
items: { type: 'string' },
|
||||
},
|
||||
},
|
||||
}
|
||||
105
apps/sim/tools/amplitude/get_active_users.ts
Normal file
105
apps/sim/tools/amplitude/get_active_users.ts
Normal file
@@ -0,0 +1,105 @@
|
||||
import type {
|
||||
AmplitudeGetActiveUsersParams,
|
||||
AmplitudeGetActiveUsersResponse,
|
||||
} from '@/tools/amplitude/types'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const getActiveUsersTool: ToolConfig<
|
||||
AmplitudeGetActiveUsersParams,
|
||||
AmplitudeGetActiveUsersResponse
|
||||
> = {
|
||||
id: 'amplitude_get_active_users',
|
||||
name: 'Amplitude Get Active Users',
|
||||
description: 'Get active or new user counts over a date range from the Dashboard REST API.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude API Key',
|
||||
},
|
||||
secretKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude Secret Key',
|
||||
},
|
||||
start: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Start date in YYYYMMDD format',
|
||||
},
|
||||
end: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'End date in YYYYMMDD format',
|
||||
},
|
||||
metric: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Metric type: "active" or "new" (default: active)',
|
||||
},
|
||||
interval: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Time interval: 1 (daily), 7 (weekly), or 30 (monthly)',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: (params) => {
|
||||
const url = new URL('https://amplitude.com/api/2/users')
|
||||
url.searchParams.set('start', params.start)
|
||||
url.searchParams.set('end', params.end)
|
||||
if (params.metric) url.searchParams.set('m', params.metric)
|
||||
if (params.interval) url.searchParams.set('i', params.interval)
|
||||
return url.toString()
|
||||
},
|
||||
method: 'GET',
|
||||
headers: (params) => ({
|
||||
Authorization: `Basic ${btoa(`${params.apiKey}:${params.secretKey}`)}`,
|
||||
}),
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error || `Amplitude Active Users API error: ${response.status}`)
|
||||
}
|
||||
|
||||
const result = data.data ?? {}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
series: result.series ?? [],
|
||||
seriesMeta: result.seriesMeta ?? [],
|
||||
xValues: result.xValues ?? [],
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
series: {
|
||||
type: 'json',
|
||||
description: 'Array of data series with user counts per time interval',
|
||||
},
|
||||
seriesMeta: {
|
||||
type: 'array',
|
||||
description: 'Metadata labels for each data series (e.g., segment names)',
|
||||
items: { type: 'string' },
|
||||
},
|
||||
xValues: {
|
||||
type: 'array',
|
||||
description: 'Date values for the x-axis',
|
||||
items: { type: 'string' },
|
||||
},
|
||||
},
|
||||
}
|
||||
102
apps/sim/tools/amplitude/get_revenue.ts
Normal file
102
apps/sim/tools/amplitude/get_revenue.ts
Normal file
@@ -0,0 +1,102 @@
|
||||
import type {
|
||||
AmplitudeGetRevenueParams,
|
||||
AmplitudeGetRevenueResponse,
|
||||
} from '@/tools/amplitude/types'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const getRevenueTool: ToolConfig<AmplitudeGetRevenueParams, AmplitudeGetRevenueResponse> = {
|
||||
id: 'amplitude_get_revenue',
|
||||
name: 'Amplitude Get Revenue',
|
||||
description: 'Get revenue LTV data including ARPU, ARPPU, total revenue, and paying user counts.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude API Key',
|
||||
},
|
||||
secretKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude Secret Key',
|
||||
},
|
||||
start: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Start date in YYYYMMDD format',
|
||||
},
|
||||
end: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'End date in YYYYMMDD format',
|
||||
},
|
||||
metric: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Metric: 0 (ARPU), 1 (ARPPU), 2 (Total Revenue), 3 (Paying Users)',
|
||||
},
|
||||
interval: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Time interval: 1 (daily), 7 (weekly), or 30 (monthly)',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: (params) => {
|
||||
const url = new URL('https://amplitude.com/api/2/revenue/ltv')
|
||||
url.searchParams.set('start', params.start)
|
||||
url.searchParams.set('end', params.end)
|
||||
if (params.metric) url.searchParams.set('m', params.metric)
|
||||
if (params.interval) url.searchParams.set('i', params.interval)
|
||||
return url.toString()
|
||||
},
|
||||
method: 'GET',
|
||||
headers: (params) => ({
|
||||
Authorization: `Basic ${btoa(`${params.apiKey}:${params.secretKey}`)}`,
|
||||
}),
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error || `Amplitude Revenue API error: ${response.status}`)
|
||||
}
|
||||
|
||||
const result = data.data ?? {}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
series: result.series ?? [],
|
||||
seriesLabels: result.seriesLabels ?? [],
|
||||
xValues: result.xValues ?? [],
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
series: {
|
||||
type: 'json',
|
||||
description: 'Array of revenue data series',
|
||||
},
|
||||
seriesLabels: {
|
||||
type: 'array',
|
||||
description: 'Labels for each data series',
|
||||
items: { type: 'string' },
|
||||
},
|
||||
xValues: {
|
||||
type: 'array',
|
||||
description: 'Date values for the x-axis',
|
||||
items: { type: 'string' },
|
||||
},
|
||||
},
|
||||
}
|
||||
99
apps/sim/tools/amplitude/group_identify.ts
Normal file
99
apps/sim/tools/amplitude/group_identify.ts
Normal file
@@ -0,0 +1,99 @@
|
||||
import type {
|
||||
AmplitudeGroupIdentifyParams,
|
||||
AmplitudeGroupIdentifyResponse,
|
||||
} from '@/tools/amplitude/types'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const groupIdentifyTool: ToolConfig<
|
||||
AmplitudeGroupIdentifyParams,
|
||||
AmplitudeGroupIdentifyResponse
|
||||
> = {
|
||||
id: 'amplitude_group_identify',
|
||||
name: 'Amplitude Group Identify',
|
||||
description:
|
||||
'Set group-level properties in Amplitude. Supports $set, $setOnce, $add, $append, $unset operations.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude API Key',
|
||||
},
|
||||
groupType: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Group classification (e.g., "company", "org_id")',
|
||||
},
|
||||
groupValue: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Specific group identifier (e.g., "Acme Corp")',
|
||||
},
|
||||
groupProperties: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description:
|
||||
'JSON object of group properties. Use operations like $set, $setOnce, $add, $append, $unset.',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: 'https://api2.amplitude.com/groupidentify',
|
||||
method: 'POST',
|
||||
headers: () => ({
|
||||
'Content-Type': 'application/json',
|
||||
}),
|
||||
body: (params) => {
|
||||
let groupProperties: Record<string, unknown> = {}
|
||||
try {
|
||||
groupProperties = JSON.parse(params.groupProperties)
|
||||
} catch {
|
||||
groupProperties = {}
|
||||
}
|
||||
|
||||
return {
|
||||
api_key: params.apiKey,
|
||||
identification: [
|
||||
{
|
||||
group_type: params.groupType,
|
||||
group_value: params.groupValue,
|
||||
group_properties: groupProperties,
|
||||
},
|
||||
],
|
||||
}
|
||||
},
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const text = await response.text()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Amplitude Group Identify API error: ${text}`)
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
code: response.status,
|
||||
message: text || null,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
code: {
|
||||
type: 'number',
|
||||
description: 'HTTP response status code',
|
||||
},
|
||||
message: {
|
||||
type: 'string',
|
||||
description: 'Response message',
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
97
apps/sim/tools/amplitude/identify_user.ts
Normal file
97
apps/sim/tools/amplitude/identify_user.ts
Normal file
@@ -0,0 +1,97 @@
|
||||
import type {
|
||||
AmplitudeIdentifyUserParams,
|
||||
AmplitudeIdentifyUserResponse,
|
||||
} from '@/tools/amplitude/types'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const identifyUserTool: ToolConfig<
|
||||
AmplitudeIdentifyUserParams,
|
||||
AmplitudeIdentifyUserResponse
|
||||
> = {
|
||||
id: 'amplitude_identify_user',
|
||||
name: 'Amplitude Identify User',
|
||||
description:
|
||||
'Set user properties in Amplitude using the Identify API. Supports $set, $setOnce, $add, $append, $unset operations.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude API Key',
|
||||
},
|
||||
userId: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'User ID (required if no device_id)',
|
||||
},
|
||||
deviceId: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Device ID (required if no user_id)',
|
||||
},
|
||||
userProperties: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description:
|
||||
'JSON object of user properties. Use operations like $set, $setOnce, $add, $append, $unset.',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: 'https://api2.amplitude.com/identify',
|
||||
method: 'POST',
|
||||
headers: () => ({
|
||||
'Content-Type': 'application/json',
|
||||
}),
|
||||
body: (params) => {
|
||||
const identification: Record<string, unknown> = {}
|
||||
|
||||
if (params.userId) identification.user_id = params.userId
|
||||
if (params.deviceId) identification.device_id = params.deviceId
|
||||
|
||||
try {
|
||||
identification.user_properties = JSON.parse(params.userProperties)
|
||||
} catch {
|
||||
identification.user_properties = {}
|
||||
}
|
||||
|
||||
return {
|
||||
api_key: params.apiKey,
|
||||
identification: [identification],
|
||||
}
|
||||
},
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const text = await response.text()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(`Amplitude Identify API error: ${text}`)
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
code: response.status,
|
||||
message: text || null,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
code: {
|
||||
type: 'number',
|
||||
description: 'HTTP response status code',
|
||||
},
|
||||
message: {
|
||||
type: 'string',
|
||||
description: 'Response message',
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
23
apps/sim/tools/amplitude/index.ts
Normal file
23
apps/sim/tools/amplitude/index.ts
Normal file
@@ -0,0 +1,23 @@
|
||||
import { eventSegmentationTool } from '@/tools/amplitude/event_segmentation'
|
||||
import { getActiveUsersTool } from '@/tools/amplitude/get_active_users'
|
||||
import { getRevenueTool } from '@/tools/amplitude/get_revenue'
|
||||
import { groupIdentifyTool } from '@/tools/amplitude/group_identify'
|
||||
import { identifyUserTool } from '@/tools/amplitude/identify_user'
|
||||
import { listEventsTool } from '@/tools/amplitude/list_events'
|
||||
import { realtimeActiveUsersTool } from '@/tools/amplitude/realtime_active_users'
|
||||
import { sendEventTool } from '@/tools/amplitude/send_event'
|
||||
import { userActivityTool } from '@/tools/amplitude/user_activity'
|
||||
import { userProfileTool } from '@/tools/amplitude/user_profile'
|
||||
import { userSearchTool } from '@/tools/amplitude/user_search'
|
||||
|
||||
export const amplitudeSendEventTool = sendEventTool
|
||||
export const amplitudeIdentifyUserTool = identifyUserTool
|
||||
export const amplitudeGroupIdentifyTool = groupIdentifyTool
|
||||
export const amplitudeUserSearchTool = userSearchTool
|
||||
export const amplitudeUserActivityTool = userActivityTool
|
||||
export const amplitudeUserProfileTool = userProfileTool
|
||||
export const amplitudeEventSegmentationTool = eventSegmentationTool
|
||||
export const amplitudeGetActiveUsersTool = getActiveUsersTool
|
||||
export const amplitudeRealtimeActiveUsersTool = realtimeActiveUsersTool
|
||||
export const amplitudeListEventsTool = listEventsTool
|
||||
export const amplitudeGetRevenueTool = getRevenueTool
|
||||
79
apps/sim/tools/amplitude/list_events.ts
Normal file
79
apps/sim/tools/amplitude/list_events.ts
Normal file
@@ -0,0 +1,79 @@
|
||||
import type {
|
||||
AmplitudeListEventsParams,
|
||||
AmplitudeListEventsResponse,
|
||||
} from '@/tools/amplitude/types'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const listEventsTool: ToolConfig<AmplitudeListEventsParams, AmplitudeListEventsResponse> = {
|
||||
id: 'amplitude_list_events',
|
||||
name: 'Amplitude List Events',
|
||||
description:
|
||||
'List all event types in the Amplitude project with their weekly totals and unique counts.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude API Key',
|
||||
},
|
||||
secretKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude Secret Key',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: 'https://amplitude.com/api/2/events/list',
|
||||
method: 'GET',
|
||||
headers: (params) => ({
|
||||
Authorization: `Basic ${btoa(`${params.apiKey}:${params.secretKey}`)}`,
|
||||
}),
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error || `Amplitude List Events API error: ${response.status}`)
|
||||
}
|
||||
|
||||
const events = (data.data ?? []).map(
|
||||
(e: Record<string, unknown>) =>
|
||||
({
|
||||
value: (e.value as string) ?? '',
|
||||
displayName: (e.display as string) ?? null,
|
||||
totals: (e.totals as number) ?? 0,
|
||||
hidden: (e.hidden as boolean) ?? false,
|
||||
deleted: (e.deleted as boolean) ?? false,
|
||||
}) as const
|
||||
)
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
events,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
events: {
|
||||
type: 'array',
|
||||
description: 'List of event types in the project',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
value: { type: 'string', description: 'Event type name' },
|
||||
displayName: { type: 'string', description: 'Event display name' },
|
||||
totals: { type: 'number', description: 'Weekly total count' },
|
||||
hidden: { type: 'boolean', description: 'Whether the event is hidden' },
|
||||
deleted: { type: 'boolean', description: 'Whether the event is deleted' },
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
74
apps/sim/tools/amplitude/realtime_active_users.ts
Normal file
74
apps/sim/tools/amplitude/realtime_active_users.ts
Normal file
@@ -0,0 +1,74 @@
|
||||
import type {
|
||||
AmplitudeRealtimeActiveUsersParams,
|
||||
AmplitudeRealtimeActiveUsersResponse,
|
||||
} from '@/tools/amplitude/types'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const realtimeActiveUsersTool: ToolConfig<
|
||||
AmplitudeRealtimeActiveUsersParams,
|
||||
AmplitudeRealtimeActiveUsersResponse
|
||||
> = {
|
||||
id: 'amplitude_realtime_active_users',
|
||||
name: 'Amplitude Real-time Active Users',
|
||||
description: 'Get real-time active user counts at 5-minute granularity for the last 2 days.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude API Key',
|
||||
},
|
||||
secretKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude Secret Key',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: 'https://amplitude.com/api/2/realtime',
|
||||
method: 'GET',
|
||||
headers: (params) => ({
|
||||
Authorization: `Basic ${btoa(`${params.apiKey}:${params.secretKey}`)}`,
|
||||
}),
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error || `Amplitude Real-time API error: ${response.status}`)
|
||||
}
|
||||
|
||||
const result = data.data ?? {}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
series: result.series ?? [],
|
||||
seriesLabels: result.seriesLabels ?? [],
|
||||
xValues: result.xValues ?? [],
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
series: {
|
||||
type: 'json',
|
||||
description: 'Array of data series with active user counts at 5-minute intervals',
|
||||
},
|
||||
seriesLabels: {
|
||||
type: 'array',
|
||||
description: 'Labels for each series (e.g., "Today", "Yesterday")',
|
||||
items: { type: 'string' },
|
||||
},
|
||||
xValues: {
|
||||
type: 'array',
|
||||
description: 'Time values for the x-axis (e.g., "15:00", "15:05")',
|
||||
items: { type: 'string' },
|
||||
},
|
||||
},
|
||||
}
|
||||
214
apps/sim/tools/amplitude/send_event.ts
Normal file
214
apps/sim/tools/amplitude/send_event.ts
Normal file
@@ -0,0 +1,214 @@
|
||||
import type { AmplitudeSendEventParams, AmplitudeSendEventResponse } from '@/tools/amplitude/types'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const sendEventTool: ToolConfig<AmplitudeSendEventParams, AmplitudeSendEventResponse> = {
|
||||
id: 'amplitude_send_event',
|
||||
name: 'Amplitude Send Event',
|
||||
description: 'Track an event in Amplitude using the HTTP V2 API.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude API Key',
|
||||
},
|
||||
userId: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'User ID (required if no device_id)',
|
||||
},
|
||||
deviceId: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Device ID (required if no user_id)',
|
||||
},
|
||||
eventType: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Name of the event (e.g., "page_view", "purchase")',
|
||||
},
|
||||
eventProperties: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'JSON object of custom event properties',
|
||||
},
|
||||
userProperties: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description:
|
||||
'JSON object of user properties to set (supports $set, $setOnce, $add, $append, $unset)',
|
||||
},
|
||||
time: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Event timestamp in milliseconds since epoch',
|
||||
},
|
||||
sessionId: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Session start time in milliseconds since epoch',
|
||||
},
|
||||
insertId: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Unique ID for deduplication (within 7-day window)',
|
||||
},
|
||||
appVersion: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Application version string',
|
||||
},
|
||||
platform: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Platform (e.g., "Web", "iOS", "Android")',
|
||||
},
|
||||
country: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Two-letter country code',
|
||||
},
|
||||
language: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Language code (e.g., "en")',
|
||||
},
|
||||
ip: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'IP address for geo-location',
|
||||
},
|
||||
price: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Price of the item purchased',
|
||||
},
|
||||
quantity: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Quantity of items purchased',
|
||||
},
|
||||
revenue: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Revenue amount',
|
||||
},
|
||||
productId: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Product identifier',
|
||||
},
|
||||
revenueType: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Revenue type (e.g., "purchase", "refund")',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: 'https://api2.amplitude.com/2/httpapi',
|
||||
method: 'POST',
|
||||
headers: () => ({
|
||||
'Content-Type': 'application/json',
|
||||
}),
|
||||
body: (params) => {
|
||||
const event: Record<string, unknown> = {
|
||||
event_type: params.eventType,
|
||||
}
|
||||
|
||||
if (params.userId) event.user_id = params.userId
|
||||
if (params.deviceId) event.device_id = params.deviceId
|
||||
if (params.time) event.time = Number(params.time)
|
||||
if (params.sessionId) event.session_id = Number(params.sessionId)
|
||||
if (params.insertId) event.insert_id = params.insertId
|
||||
if (params.appVersion) event.app_version = params.appVersion
|
||||
if (params.platform) event.platform = params.platform
|
||||
if (params.country) event.country = params.country
|
||||
if (params.language) event.language = params.language
|
||||
if (params.ip) event.ip = params.ip
|
||||
if (params.price) event.price = Number(params.price)
|
||||
if (params.quantity) event.quantity = Number(params.quantity)
|
||||
if (params.revenue) event.revenue = Number(params.revenue)
|
||||
if (params.productId) event.product_id = params.productId
|
||||
if (params.revenueType) event.revenue_type = params.revenueType
|
||||
|
||||
if (params.eventProperties) {
|
||||
try {
|
||||
event.event_properties = JSON.parse(params.eventProperties)
|
||||
} catch {
|
||||
event.event_properties = {}
|
||||
}
|
||||
}
|
||||
|
||||
if (params.userProperties) {
|
||||
try {
|
||||
event.user_properties = JSON.parse(params.userProperties)
|
||||
} catch {
|
||||
event.user_properties = {}
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
api_key: params.apiKey,
|
||||
events: [event],
|
||||
}
|
||||
},
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
|
||||
if (data.code !== 200) {
|
||||
throw new Error(data.error || `Amplitude API error: code ${data.code}`)
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
code: data.code ?? 200,
|
||||
eventsIngested: data.events_ingested ?? 0,
|
||||
payloadSizeBytes: data.payload_size_bytes ?? 0,
|
||||
serverUploadTime: data.server_upload_time ?? 0,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
code: {
|
||||
type: 'number',
|
||||
description: 'Response code (200 for success)',
|
||||
},
|
||||
eventsIngested: {
|
||||
type: 'number',
|
||||
description: 'Number of events ingested',
|
||||
},
|
||||
payloadSizeBytes: {
|
||||
type: 'number',
|
||||
description: 'Size of the payload in bytes',
|
||||
},
|
||||
serverUploadTime: {
|
||||
type: 'number',
|
||||
description: 'Server upload timestamp',
|
||||
},
|
||||
},
|
||||
}
|
||||
241
apps/sim/tools/amplitude/types.ts
Normal file
241
apps/sim/tools/amplitude/types.ts
Normal file
@@ -0,0 +1,241 @@
|
||||
import type { ToolResponse } from '@/tools/types'
|
||||
|
||||
/**
|
||||
* Base params shared by endpoints using API key in body.
|
||||
*/
|
||||
export interface AmplitudeApiKeyParams {
|
||||
apiKey: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Base params shared by endpoints using Basic Auth (api_key:secret_key).
|
||||
*/
|
||||
export interface AmplitudeBasicAuthParams {
|
||||
apiKey: string
|
||||
secretKey: string
|
||||
}
|
||||
|
||||
/**
|
||||
* Send Event params (HTTP V2 API).
|
||||
*/
|
||||
export interface AmplitudeSendEventParams extends AmplitudeApiKeyParams {
|
||||
userId?: string
|
||||
deviceId?: string
|
||||
eventType: string
|
||||
eventProperties?: string
|
||||
userProperties?: string
|
||||
time?: string
|
||||
sessionId?: string
|
||||
insertId?: string
|
||||
appVersion?: string
|
||||
platform?: string
|
||||
country?: string
|
||||
language?: string
|
||||
ip?: string
|
||||
price?: string
|
||||
quantity?: string
|
||||
revenue?: string
|
||||
productId?: string
|
||||
revenueType?: string
|
||||
}
|
||||
|
||||
export interface AmplitudeSendEventResponse extends ToolResponse {
|
||||
output: {
|
||||
code: number
|
||||
eventsIngested: number
|
||||
payloadSizeBytes: number
|
||||
serverUploadTime: number
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Identify User params (Identify API).
|
||||
*/
|
||||
export interface AmplitudeIdentifyUserParams extends AmplitudeApiKeyParams {
|
||||
userId?: string
|
||||
deviceId?: string
|
||||
userProperties: string
|
||||
}
|
||||
|
||||
export interface AmplitudeIdentifyUserResponse extends ToolResponse {
|
||||
output: {
|
||||
code: number
|
||||
message: string | null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Group Identify params (Group Identify API).
|
||||
*/
|
||||
export interface AmplitudeGroupIdentifyParams extends AmplitudeApiKeyParams {
|
||||
groupType: string
|
||||
groupValue: string
|
||||
groupProperties: string
|
||||
}
|
||||
|
||||
export interface AmplitudeGroupIdentifyResponse extends ToolResponse {
|
||||
output: {
|
||||
code: number
|
||||
message: string | null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* User Search params (Dashboard REST API).
|
||||
*/
|
||||
export interface AmplitudeUserSearchParams extends AmplitudeBasicAuthParams {
|
||||
user: string
|
||||
}
|
||||
|
||||
export interface AmplitudeUserSearchResponse extends ToolResponse {
|
||||
output: {
|
||||
matches: Array<{
|
||||
amplitudeId: number
|
||||
userId: string | null
|
||||
}>
|
||||
type: string | null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* User Activity params (Dashboard REST API).
|
||||
*/
|
||||
export interface AmplitudeUserActivityParams extends AmplitudeBasicAuthParams {
|
||||
amplitudeId: string
|
||||
offset?: string
|
||||
limit?: string
|
||||
direction?: string
|
||||
}
|
||||
|
||||
export interface AmplitudeUserActivityResponse extends ToolResponse {
|
||||
output: {
|
||||
events: Array<{
|
||||
eventType: string
|
||||
eventTime: string
|
||||
eventProperties: Record<string, unknown>
|
||||
userProperties: Record<string, unknown>
|
||||
sessionId: number | null
|
||||
platform: string | null
|
||||
country: string | null
|
||||
city: string | null
|
||||
}>
|
||||
userData: {
|
||||
userId: string | null
|
||||
canonicalAmplitudeId: number | null
|
||||
numEvents: number | null
|
||||
numSessions: number | null
|
||||
platform: string | null
|
||||
country: string | null
|
||||
} | null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* User Profile params (User Profile API).
|
||||
*/
|
||||
export interface AmplitudeUserProfileParams {
|
||||
secretKey: string
|
||||
userId?: string
|
||||
deviceId?: string
|
||||
getAmpProps?: string
|
||||
getCohortIds?: string
|
||||
getComputations?: string
|
||||
}
|
||||
|
||||
export interface AmplitudeUserProfileResponse extends ToolResponse {
|
||||
output: {
|
||||
userId: string | null
|
||||
deviceId: string | null
|
||||
ampProps: Record<string, unknown> | null
|
||||
cohortIds: string[] | null
|
||||
computations: Record<string, unknown> | null
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Event Segmentation params (Dashboard REST API).
|
||||
*/
|
||||
export interface AmplitudeEventSegmentationParams extends AmplitudeBasicAuthParams {
|
||||
eventType: string
|
||||
start: string
|
||||
end: string
|
||||
metric?: string
|
||||
interval?: string
|
||||
groupBy?: string
|
||||
limit?: string
|
||||
}
|
||||
|
||||
export interface AmplitudeEventSegmentationResponse extends ToolResponse {
|
||||
output: {
|
||||
series: unknown[]
|
||||
seriesLabels: string[]
|
||||
seriesCollapsed: unknown[]
|
||||
xValues: string[]
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Active Users params (Dashboard REST API).
|
||||
*/
|
||||
export interface AmplitudeGetActiveUsersParams extends AmplitudeBasicAuthParams {
|
||||
start: string
|
||||
end: string
|
||||
metric?: string
|
||||
interval?: string
|
||||
}
|
||||
|
||||
export interface AmplitudeGetActiveUsersResponse extends ToolResponse {
|
||||
output: {
|
||||
series: number[][]
|
||||
seriesMeta: string[]
|
||||
xValues: string[]
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Real-time Active Users params (Dashboard REST API).
|
||||
*/
|
||||
export interface AmplitudeRealtimeActiveUsersParams extends AmplitudeBasicAuthParams {}
|
||||
|
||||
export interface AmplitudeRealtimeActiveUsersResponse extends ToolResponse {
|
||||
output: {
|
||||
series: number[][]
|
||||
seriesLabels: string[]
|
||||
xValues: string[]
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* List Events params (Dashboard REST API).
|
||||
*/
|
||||
export interface AmplitudeListEventsParams extends AmplitudeBasicAuthParams {}
|
||||
|
||||
export interface AmplitudeListEventsResponse extends ToolResponse {
|
||||
output: {
|
||||
events: Array<{
|
||||
value: string
|
||||
displayName: string | null
|
||||
totals: number
|
||||
hidden: boolean
|
||||
deleted: boolean
|
||||
}>
|
||||
}
|
||||
}
|
||||
|
||||
/**
|
||||
* Get Revenue params (Dashboard REST API).
|
||||
*/
|
||||
export interface AmplitudeGetRevenueParams extends AmplitudeBasicAuthParams {
|
||||
start: string
|
||||
end: string
|
||||
metric?: string
|
||||
interval?: string
|
||||
}
|
||||
|
||||
export interface AmplitudeGetRevenueResponse extends ToolResponse {
|
||||
output: {
|
||||
series: unknown[]
|
||||
seriesLabels: string[]
|
||||
xValues: string[]
|
||||
}
|
||||
}
|
||||
144
apps/sim/tools/amplitude/user_activity.ts
Normal file
144
apps/sim/tools/amplitude/user_activity.ts
Normal file
@@ -0,0 +1,144 @@
|
||||
import type {
|
||||
AmplitudeUserActivityParams,
|
||||
AmplitudeUserActivityResponse,
|
||||
} from '@/tools/amplitude/types'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const userActivityTool: ToolConfig<
|
||||
AmplitudeUserActivityParams,
|
||||
AmplitudeUserActivityResponse
|
||||
> = {
|
||||
id: 'amplitude_user_activity',
|
||||
name: 'Amplitude User Activity',
|
||||
description: 'Get the event stream for a specific user by their Amplitude ID.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude API Key',
|
||||
},
|
||||
secretKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude Secret Key',
|
||||
},
|
||||
amplitudeId: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Amplitude internal user ID',
|
||||
},
|
||||
offset: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Offset for pagination (default 0)',
|
||||
},
|
||||
limit: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Maximum number of events to return (default 1000, max 1000)',
|
||||
},
|
||||
direction: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Sort direction: "latest" or "earliest" (default: latest)',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: (params) => {
|
||||
const url = new URL('https://amplitude.com/api/2/useractivity')
|
||||
url.searchParams.set('user', params.amplitudeId.trim())
|
||||
if (params.offset) url.searchParams.set('offset', params.offset)
|
||||
if (params.limit) url.searchParams.set('limit', params.limit)
|
||||
if (params.direction) url.searchParams.set('direction', params.direction)
|
||||
return url.toString()
|
||||
},
|
||||
method: 'GET',
|
||||
headers: (params) => ({
|
||||
Authorization: `Basic ${btoa(`${params.apiKey}:${params.secretKey}`)}`,
|
||||
}),
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error || `Amplitude User Activity API error: ${response.status}`)
|
||||
}
|
||||
|
||||
const events = (data.events ?? []).map(
|
||||
(e: Record<string, unknown>) =>
|
||||
({
|
||||
eventType: (e.event_type as string) ?? '',
|
||||
eventTime: (e.event_time as string) ?? '',
|
||||
eventProperties: (e.event_properties as Record<string, unknown>) ?? {},
|
||||
userProperties: (e.user_properties as Record<string, unknown>) ?? {},
|
||||
sessionId: (e.session_id as number) ?? null,
|
||||
platform: (e.platform as string) ?? null,
|
||||
country: (e.country as string) ?? null,
|
||||
city: (e.city as string) ?? null,
|
||||
}) as const
|
||||
)
|
||||
|
||||
const ud = data.userData as Record<string, unknown> | undefined
|
||||
const userData = ud
|
||||
? {
|
||||
userId: (ud.user_id as string) ?? null,
|
||||
canonicalAmplitudeId: (ud.canonical_amplitude_id as number) ?? null,
|
||||
numEvents: (ud.num_events as number) ?? null,
|
||||
numSessions: (ud.num_sessions as number) ?? null,
|
||||
platform: (ud.platform as string) ?? null,
|
||||
country: (ud.country as string) ?? null,
|
||||
}
|
||||
: null
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
events,
|
||||
userData,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
events: {
|
||||
type: 'array',
|
||||
description: 'List of user events',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
eventType: { type: 'string', description: 'Type of event' },
|
||||
eventTime: { type: 'string', description: 'Event timestamp' },
|
||||
eventProperties: { type: 'json', description: 'Custom event properties' },
|
||||
userProperties: { type: 'json', description: 'User properties at event time' },
|
||||
sessionId: { type: 'number', description: 'Session ID' },
|
||||
platform: { type: 'string', description: 'Platform' },
|
||||
country: { type: 'string', description: 'Country' },
|
||||
city: { type: 'string', description: 'City' },
|
||||
},
|
||||
},
|
||||
},
|
||||
userData: {
|
||||
type: 'json',
|
||||
description: 'User metadata',
|
||||
optional: true,
|
||||
properties: {
|
||||
userId: { type: 'string', description: 'External user ID' },
|
||||
canonicalAmplitudeId: { type: 'number', description: 'Canonical Amplitude ID' },
|
||||
numEvents: { type: 'number', description: 'Total event count' },
|
||||
numSessions: { type: 'number', description: 'Total session count' },
|
||||
platform: { type: 'string', description: 'Primary platform' },
|
||||
country: { type: 'string', description: 'Country' },
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
120
apps/sim/tools/amplitude/user_profile.ts
Normal file
120
apps/sim/tools/amplitude/user_profile.ts
Normal file
@@ -0,0 +1,120 @@
|
||||
import type {
|
||||
AmplitudeUserProfileParams,
|
||||
AmplitudeUserProfileResponse,
|
||||
} from '@/tools/amplitude/types'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const userProfileTool: ToolConfig<AmplitudeUserProfileParams, AmplitudeUserProfileResponse> =
|
||||
{
|
||||
id: 'amplitude_user_profile',
|
||||
name: 'Amplitude User Profile',
|
||||
description:
|
||||
'Get a user profile including properties, cohort memberships, and computed properties.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
secretKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude Secret Key',
|
||||
},
|
||||
userId: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'External user ID (required if no device_id)',
|
||||
},
|
||||
deviceId: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Device ID (required if no user_id)',
|
||||
},
|
||||
getAmpProps: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Include Amplitude user properties (true/false, default: false)',
|
||||
},
|
||||
getCohortIds: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Include cohort IDs the user belongs to (true/false, default: false)',
|
||||
},
|
||||
getComputations: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Include computed user properties (true/false, default: false)',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: (params) => {
|
||||
const url = new URL('https://profile-api.amplitude.com/v1/userprofile')
|
||||
if (params.userId) url.searchParams.set('user_id', params.userId.trim())
|
||||
if (params.deviceId) url.searchParams.set('device_id', params.deviceId.trim())
|
||||
if (params.getAmpProps) url.searchParams.set('get_amp_props', params.getAmpProps)
|
||||
if (params.getCohortIds) url.searchParams.set('get_cohort_ids', params.getCohortIds)
|
||||
if (params.getComputations) url.searchParams.set('get_computations', params.getComputations)
|
||||
return url.toString()
|
||||
},
|
||||
method: 'GET',
|
||||
headers: (params) => ({
|
||||
Authorization: `Api-Key ${params.secretKey}`,
|
||||
}),
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error || `Amplitude User Profile API error: ${response.status}`)
|
||||
}
|
||||
|
||||
const userData = data.userData ?? {}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
userId: (userData.user_id as string) ?? null,
|
||||
deviceId: (userData.device_id as string) ?? null,
|
||||
ampProps: (userData.amp_props as Record<string, unknown>) ?? null,
|
||||
cohortIds: (userData.cohort_ids as string[]) ?? null,
|
||||
computations: (userData.computations as Record<string, unknown>) ?? null,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
userId: {
|
||||
type: 'string',
|
||||
description: 'External user ID',
|
||||
optional: true,
|
||||
},
|
||||
deviceId: {
|
||||
type: 'string',
|
||||
description: 'Device ID',
|
||||
optional: true,
|
||||
},
|
||||
ampProps: {
|
||||
type: 'json',
|
||||
description:
|
||||
'Amplitude user properties (library, first_used, last_used, custom properties)',
|
||||
optional: true,
|
||||
},
|
||||
cohortIds: {
|
||||
type: 'array',
|
||||
description: 'List of cohort IDs the user belongs to',
|
||||
optional: true,
|
||||
items: { type: 'string' },
|
||||
},
|
||||
computations: {
|
||||
type: 'json',
|
||||
description: 'Computed user properties',
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
89
apps/sim/tools/amplitude/user_search.ts
Normal file
89
apps/sim/tools/amplitude/user_search.ts
Normal file
@@ -0,0 +1,89 @@
|
||||
import type {
|
||||
AmplitudeUserSearchParams,
|
||||
AmplitudeUserSearchResponse,
|
||||
} from '@/tools/amplitude/types'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const userSearchTool: ToolConfig<AmplitudeUserSearchParams, AmplitudeUserSearchResponse> = {
|
||||
id: 'amplitude_user_search',
|
||||
name: 'Amplitude User Search',
|
||||
description:
|
||||
'Search for a user by User ID, Device ID, or Amplitude ID using the Dashboard REST API.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude API Key',
|
||||
},
|
||||
secretKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Amplitude Secret Key',
|
||||
},
|
||||
user: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'User ID, Device ID, or Amplitude ID to search for',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: (params) => {
|
||||
const url = new URL('https://amplitude.com/api/2/usersearch')
|
||||
url.searchParams.set('user', params.user.trim())
|
||||
return url.toString()
|
||||
},
|
||||
method: 'GET',
|
||||
headers: (params) => ({
|
||||
Authorization: `Basic ${btoa(`${params.apiKey}:${params.secretKey}`)}`,
|
||||
}),
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error || `Amplitude User Search API error: ${response.status}`)
|
||||
}
|
||||
|
||||
const matches = (data.matches ?? []).map(
|
||||
(m: Record<string, unknown>) =>
|
||||
({
|
||||
amplitudeId: (m.amplitude_id as number) ?? 0,
|
||||
userId: (m.user_id as string) ?? null,
|
||||
}) as const
|
||||
)
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
matches,
|
||||
type: (data.type as string) ?? null,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
matches: {
|
||||
type: 'array',
|
||||
description: 'List of matching users',
|
||||
items: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
amplitudeId: { type: 'number', description: 'Amplitude internal user ID' },
|
||||
userId: { type: 'string', description: 'External user ID' },
|
||||
},
|
||||
},
|
||||
},
|
||||
type: {
|
||||
type: 'string',
|
||||
description: 'Match type (e.g., match_user_or_device_id)',
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
223
apps/sim/tools/google_pagespeed/analyze.ts
Normal file
223
apps/sim/tools/google_pagespeed/analyze.ts
Normal file
@@ -0,0 +1,223 @@
|
||||
import type {
|
||||
GooglePagespeedAnalyzeParams,
|
||||
GooglePagespeedAnalyzeResponse,
|
||||
} from '@/tools/google_pagespeed/types'
|
||||
import type { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const analyzeTool: ToolConfig<GooglePagespeedAnalyzeParams, GooglePagespeedAnalyzeResponse> =
|
||||
{
|
||||
id: 'google_pagespeed_analyze',
|
||||
name: 'Google PageSpeed Analyze',
|
||||
description:
|
||||
'Analyze a webpage for performance, accessibility, SEO, and best practices using Google PageSpeed Insights.',
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-only',
|
||||
description: 'Google PageSpeed Insights API Key',
|
||||
},
|
||||
url: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'The URL of the webpage to analyze',
|
||||
},
|
||||
category: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description:
|
||||
'Lighthouse categories to analyze (comma-separated): performance, accessibility, best-practices, seo',
|
||||
},
|
||||
strategy: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Analysis strategy: desktop or mobile',
|
||||
},
|
||||
locale: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
visibility: 'user-or-llm',
|
||||
description: 'Locale for results (e.g., en, fr, de)',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: (params) => {
|
||||
const url = new URL('https://www.googleapis.com/pagespeedonline/v5/runPagespeed')
|
||||
url.searchParams.append('url', params.url.trim())
|
||||
url.searchParams.append('key', params.apiKey)
|
||||
|
||||
if (params.category) {
|
||||
const categories = params.category.split(',').map((c) => c.trim())
|
||||
for (const cat of categories) {
|
||||
url.searchParams.append('category', cat)
|
||||
}
|
||||
} else {
|
||||
url.searchParams.append('category', 'performance')
|
||||
url.searchParams.append('category', 'accessibility')
|
||||
url.searchParams.append('category', 'best-practices')
|
||||
url.searchParams.append('category', 'seo')
|
||||
}
|
||||
|
||||
if (params.strategy) {
|
||||
url.searchParams.append('strategy', params.strategy)
|
||||
}
|
||||
if (params.locale) {
|
||||
url.searchParams.append('locale', params.locale)
|
||||
}
|
||||
|
||||
return url.toString()
|
||||
},
|
||||
method: 'GET',
|
||||
headers: () => ({
|
||||
Accept: 'application/json',
|
||||
}),
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
|
||||
if (!response.ok) {
|
||||
throw new Error(data.error?.message ?? 'Failed to analyze page')
|
||||
}
|
||||
|
||||
const lighthouse = data.lighthouseResult ?? {}
|
||||
const categories = lighthouse.categories ?? {}
|
||||
const audits = lighthouse.audits ?? {}
|
||||
const loadingExperience = data.loadingExperience ?? {}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
finalUrl: data.id ?? null,
|
||||
performanceScore: categories.performance?.score ?? null,
|
||||
accessibilityScore: categories.accessibility?.score ?? null,
|
||||
bestPracticesScore: categories['best-practices']?.score ?? null,
|
||||
seoScore: categories.seo?.score ?? null,
|
||||
firstContentfulPaint: audits['first-contentful-paint']?.displayValue ?? null,
|
||||
firstContentfulPaintMs: audits['first-contentful-paint']?.numericValue ?? null,
|
||||
largestContentfulPaint: audits['largest-contentful-paint']?.displayValue ?? null,
|
||||
largestContentfulPaintMs: audits['largest-contentful-paint']?.numericValue ?? null,
|
||||
totalBlockingTime: audits['total-blocking-time']?.displayValue ?? null,
|
||||
totalBlockingTimeMs: audits['total-blocking-time']?.numericValue ?? null,
|
||||
cumulativeLayoutShift: audits['cumulative-layout-shift']?.displayValue ?? null,
|
||||
cumulativeLayoutShiftValue: audits['cumulative-layout-shift']?.numericValue ?? null,
|
||||
speedIndex: audits['speed-index']?.displayValue ?? null,
|
||||
speedIndexMs: audits['speed-index']?.numericValue ?? null,
|
||||
interactive: audits.interactive?.displayValue ?? null,
|
||||
interactiveMs: audits.interactive?.numericValue ?? null,
|
||||
overallCategory: loadingExperience.overall_category ?? null,
|
||||
analysisTimestamp: data.analysisUTCTimestamp ?? null,
|
||||
lighthouseVersion: lighthouse.lighthouseVersion ?? null,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
outputs: {
|
||||
finalUrl: {
|
||||
type: 'string',
|
||||
description: 'The final URL after redirects',
|
||||
optional: true,
|
||||
},
|
||||
performanceScore: {
|
||||
type: 'number',
|
||||
description: 'Performance category score (0-1)',
|
||||
optional: true,
|
||||
},
|
||||
accessibilityScore: {
|
||||
type: 'number',
|
||||
description: 'Accessibility category score (0-1)',
|
||||
optional: true,
|
||||
},
|
||||
bestPracticesScore: {
|
||||
type: 'number',
|
||||
description: 'Best Practices category score (0-1)',
|
||||
optional: true,
|
||||
},
|
||||
seoScore: {
|
||||
type: 'number',
|
||||
description: 'SEO category score (0-1)',
|
||||
optional: true,
|
||||
},
|
||||
firstContentfulPaint: {
|
||||
type: 'string',
|
||||
description: 'Time to First Contentful Paint (display value)',
|
||||
optional: true,
|
||||
},
|
||||
firstContentfulPaintMs: {
|
||||
type: 'number',
|
||||
description: 'Time to First Contentful Paint in milliseconds',
|
||||
optional: true,
|
||||
},
|
||||
largestContentfulPaint: {
|
||||
type: 'string',
|
||||
description: 'Time to Largest Contentful Paint (display value)',
|
||||
optional: true,
|
||||
},
|
||||
largestContentfulPaintMs: {
|
||||
type: 'number',
|
||||
description: 'Time to Largest Contentful Paint in milliseconds',
|
||||
optional: true,
|
||||
},
|
||||
totalBlockingTime: {
|
||||
type: 'string',
|
||||
description: 'Total Blocking Time (display value)',
|
||||
optional: true,
|
||||
},
|
||||
totalBlockingTimeMs: {
|
||||
type: 'number',
|
||||
description: 'Total Blocking Time in milliseconds',
|
||||
optional: true,
|
||||
},
|
||||
cumulativeLayoutShift: {
|
||||
type: 'string',
|
||||
description: 'Cumulative Layout Shift (display value)',
|
||||
optional: true,
|
||||
},
|
||||
cumulativeLayoutShiftValue: {
|
||||
type: 'number',
|
||||
description: 'Cumulative Layout Shift numeric value',
|
||||
optional: true,
|
||||
},
|
||||
speedIndex: {
|
||||
type: 'string',
|
||||
description: 'Speed Index (display value)',
|
||||
optional: true,
|
||||
},
|
||||
speedIndexMs: {
|
||||
type: 'number',
|
||||
description: 'Speed Index in milliseconds',
|
||||
optional: true,
|
||||
},
|
||||
interactive: {
|
||||
type: 'string',
|
||||
description: 'Time to Interactive (display value)',
|
||||
optional: true,
|
||||
},
|
||||
interactiveMs: {
|
||||
type: 'number',
|
||||
description: 'Time to Interactive in milliseconds',
|
||||
optional: true,
|
||||
},
|
||||
overallCategory: {
|
||||
type: 'string',
|
||||
description: 'Overall loading experience category (FAST, AVERAGE, SLOW, or NONE)',
|
||||
optional: true,
|
||||
},
|
||||
analysisTimestamp: {
|
||||
type: 'string',
|
||||
description: 'UTC timestamp of the analysis',
|
||||
optional: true,
|
||||
},
|
||||
lighthouseVersion: {
|
||||
type: 'string',
|
||||
description: 'Version of Lighthouse used for the analysis',
|
||||
optional: true,
|
||||
},
|
||||
},
|
||||
}
|
||||
5
apps/sim/tools/google_pagespeed/index.ts
Normal file
5
apps/sim/tools/google_pagespeed/index.ts
Normal file
@@ -0,0 +1,5 @@
|
||||
import { analyzeTool } from '@/tools/google_pagespeed/analyze'
|
||||
|
||||
export const googlePagespeedAnalyzeTool = analyzeTool
|
||||
|
||||
export * from '@/tools/google_pagespeed/types'
|
||||
37
apps/sim/tools/google_pagespeed/types.ts
Normal file
37
apps/sim/tools/google_pagespeed/types.ts
Normal file
@@ -0,0 +1,37 @@
|
||||
import type { ToolResponse } from '@/tools/types'
|
||||
|
||||
export interface GooglePagespeedBaseParams {
|
||||
apiKey: string
|
||||
}
|
||||
|
||||
export interface GooglePagespeedAnalyzeParams extends GooglePagespeedBaseParams {
|
||||
url: string
|
||||
category?: string
|
||||
strategy?: string
|
||||
locale?: string
|
||||
}
|
||||
|
||||
export interface GooglePagespeedAnalyzeResponse extends ToolResponse {
|
||||
output: {
|
||||
finalUrl: string | null
|
||||
performanceScore: number | null
|
||||
accessibilityScore: number | null
|
||||
bestPracticesScore: number | null
|
||||
seoScore: number | null
|
||||
firstContentfulPaint: string | null
|
||||
firstContentfulPaintMs: number | null
|
||||
largestContentfulPaint: string | null
|
||||
largestContentfulPaintMs: number | null
|
||||
totalBlockingTime: string | null
|
||||
totalBlockingTimeMs: number | null
|
||||
cumulativeLayoutShift: string | null
|
||||
cumulativeLayoutShiftValue: number | null
|
||||
speedIndex: string | null
|
||||
speedIndexMs: number | null
|
||||
interactive: string | null
|
||||
interactiveMs: number | null
|
||||
overallCategory: string | null
|
||||
analysisTimestamp: string | null
|
||||
lighthouseVersion: string | null
|
||||
}
|
||||
}
|
||||
@@ -31,7 +31,7 @@ export const greenhouseGetApplicationTool: ToolConfig<
|
||||
|
||||
request: {
|
||||
url: (params: GreenhouseGetApplicationParams) =>
|
||||
`https://harvest.greenhouse.io/v1/applications/${params.applicationId}`,
|
||||
`https://harvest.greenhouse.io/v1/applications/${params.applicationId.trim()}`,
|
||||
method: 'GET',
|
||||
headers: (params: GreenhouseGetApplicationParams) => ({
|
||||
Authorization: `Basic ${btoa(`${params.apiKey}:`)}`,
|
||||
@@ -31,7 +31,7 @@ export const greenhouseGetCandidateTool: ToolConfig<
|
||||
|
||||
request: {
|
||||
url: (params: GreenhouseGetCandidateParams) =>
|
||||
`https://harvest.greenhouse.io/v1/candidates/${params.candidateId}`,
|
||||
`https://harvest.greenhouse.io/v1/candidates/${params.candidateId.trim()}`,
|
||||
method: 'GET',
|
||||
headers: (params: GreenhouseGetCandidateParams) => ({
|
||||
Authorization: `Basic ${btoa(`${params.apiKey}:`)}`,
|
||||
@@ -25,7 +25,7 @@ export const greenhouseGetJobTool: ToolConfig<GreenhouseGetJobParams, Greenhouse
|
||||
|
||||
request: {
|
||||
url: (params: GreenhouseGetJobParams) =>
|
||||
`https://harvest.greenhouse.io/v1/jobs/${params.jobId}`,
|
||||
`https://harvest.greenhouse.io/v1/jobs/${params.jobId.trim()}`,
|
||||
method: 'GET',
|
||||
headers: (params: GreenhouseGetJobParams) => ({
|
||||
Authorization: `Basic ${btoa(`${params.apiKey}:`)}`,
|
||||
@@ -25,7 +25,7 @@ export const greenhouseGetUserTool: ToolConfig<GreenhouseGetUserParams, Greenhou
|
||||
|
||||
request: {
|
||||
url: (params: GreenhouseGetUserParams) =>
|
||||
`https://harvest.greenhouse.io/v1/users/${params.userId}`,
|
||||
`https://harvest.greenhouse.io/v1/users/${params.userId.trim()}`,
|
||||
method: 'GET',
|
||||
headers: (params: GreenhouseGetUserParams) => ({
|
||||
Authorization: `Basic ${btoa(`${params.apiKey}:`)}`,
|
||||
@@ -1,14 +1,14 @@
|
||||
import { greenhouseGetApplicationTool } from '@/tools/greenhouse/get-application'
|
||||
import { greenhouseGetCandidateTool } from '@/tools/greenhouse/get-candidate'
|
||||
import { greenhouseGetJobTool } from '@/tools/greenhouse/get-job'
|
||||
import { greenhouseGetUserTool } from '@/tools/greenhouse/get-user'
|
||||
import { greenhouseListApplicationsTool } from '@/tools/greenhouse/list-applications'
|
||||
import { greenhouseListCandidatesTool } from '@/tools/greenhouse/list-candidates'
|
||||
import { greenhouseListDepartmentsTool } from '@/tools/greenhouse/list-departments'
|
||||
import { greenhouseListJobStagesTool } from '@/tools/greenhouse/list-job-stages'
|
||||
import { greenhouseListJobsTool } from '@/tools/greenhouse/list-jobs'
|
||||
import { greenhouseListOfficesTool } from '@/tools/greenhouse/list-offices'
|
||||
import { greenhouseListUsersTool } from '@/tools/greenhouse/list-users'
|
||||
import { greenhouseGetApplicationTool } from '@/tools/greenhouse/get_application'
|
||||
import { greenhouseGetCandidateTool } from '@/tools/greenhouse/get_candidate'
|
||||
import { greenhouseGetJobTool } from '@/tools/greenhouse/get_job'
|
||||
import { greenhouseGetUserTool } from '@/tools/greenhouse/get_user'
|
||||
import { greenhouseListApplicationsTool } from '@/tools/greenhouse/list_applications'
|
||||
import { greenhouseListCandidatesTool } from '@/tools/greenhouse/list_candidates'
|
||||
import { greenhouseListDepartmentsTool } from '@/tools/greenhouse/list_departments'
|
||||
import { greenhouseListJobStagesTool } from '@/tools/greenhouse/list_job_stages'
|
||||
import { greenhouseListJobsTool } from '@/tools/greenhouse/list_jobs'
|
||||
import { greenhouseListOfficesTool } from '@/tools/greenhouse/list_offices'
|
||||
import { greenhouseListUsersTool } from '@/tools/greenhouse/list_users'
|
||||
|
||||
export {
|
||||
greenhouseGetApplicationTool,
|
||||
|
||||
@@ -87,7 +87,7 @@ export const greenhouseListCandidatesTool: ToolConfig<
|
||||
if (params.updated_after) url.searchParams.append('updated_after', params.updated_after)
|
||||
if (params.updated_before) url.searchParams.append('updated_before', params.updated_before)
|
||||
if (params.job_id) url.searchParams.append('job_id', params.job_id)
|
||||
if (params.email) url.searchParams.append('email', params.email)
|
||||
if (params.email) url.searchParams.append('email_address', params.email)
|
||||
if (params.candidate_ids) url.searchParams.append('candidate_ids', params.candidate_ids)
|
||||
return url.toString()
|
||||
},
|
||||
@@ -45,7 +45,7 @@ export const greenhouseListJobStagesTool: ToolConfig<
|
||||
|
||||
request: {
|
||||
url: (params: GreenhouseListJobStagesParams) => {
|
||||
const url = new URL(`https://harvest.greenhouse.io/v1/jobs/${params.jobId}/stages`)
|
||||
const url = new URL(`https://harvest.greenhouse.io/v1/jobs/${params.jobId.trim()}/stages`)
|
||||
if (params.per_page) url.searchParams.append('per_page', String(params.per_page))
|
||||
if (params.page) url.searchParams.append('page', String(params.page))
|
||||
return url.toString()
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user