Compare commits

...

11 Commits

Author SHA1 Message Date
Siddharth Ganesan
29dd509f46 v3.1 2025-11-11 20:17:34 -08:00
Siddharth Ganesan
c705d506c6 v3 2025-11-11 20:15:38 -08:00
Siddharth Ganesan
0eb294d44f v1 2025-11-11 19:26:50 -08:00
Siddharth Ganesan
33a4f06e8e template 2025-11-11 17:19:02 -08:00
waleed
db5591da89 rename blog to studio 2025-11-08 18:48:57 -08:00
Siddharth Ganesan
41431fc380 feat(helm): add copilot (#1833)
* Add helm for copilot

* Remove otel and log level

* Change repo name

* improvement(helm): enhance copilot chart with HA support and validation

* refactor(helm): consolidate copilot secrets and fix postgres volume mount
2025-11-08 18:48:57 -08:00
Waleed
4631f4982f improvement(code): add wand config and system prompt for python code generation, strip \n from stdout in JS/Python (#1862) 2025-11-08 18:48:57 -08:00
Waleed
914661e6cb improvement(ux): optimistic updates for envvars, custom tools, folder operations, workflow deletions. shared hook for connection tags & tag dropdown, fix for triggers not re-rendering on trigger selected (#1861) 2025-11-08 18:48:57 -08:00
Waleed
962017fc25 fix(build): remove mdx from transpilation (#1860) 2025-11-08 18:48:57 -08:00
waleed
4e9a0dee35 added scaffolding and authors for everyone's articles 2025-11-08 12:37:01 -08:00
waleed
e702a05993 feat(blog): fundraise blog 2025-11-08 12:37:01 -08:00
73 changed files with 3123 additions and 977 deletions

View File

@@ -14,9 +14,11 @@ import {
} from '@/components/ui/select'
import { Textarea } from '@/components/ui/textarea'
import { quickValidateEmail } from '@/lib/email/validation'
import { isHosted } from '@/lib/environment'
import { createLogger } from '@/lib/logs/console/logger'
import { cn } from '@/lib/utils'
import { LegalLayout } from '@/app/(landing)/components'
import Footer from '@/app/(landing)/components/footer/footer'
import Nav from '@/app/(landing)/components/nav/nav'
import { soehne } from '@/app/fonts/soehne/soehne'
const logger = createLogger('CareersPage')
@@ -199,329 +201,340 @@ export default function CareersPage() {
}
return (
<LegalLayout title='Join Our Team'>
<div className={`${soehne.className} mx-auto max-w-2xl`}>
{/* Form Section */}
<section className='rounded-2xl border border-gray-200 bg-white p-6 shadow-sm sm:p-10'>
<h2 className='mb-2 font-medium text-2xl sm:text-3xl'>Apply Now</h2>
<p className='mb-8 text-gray-600 text-sm sm:text-base'>
Help us build the future of AI workflows
</p>
<main className={`${soehne.className} min-h-screen bg-white text-gray-900`}>
<Nav variant='landing' />
<form onSubmit={onSubmit} className='space-y-5'>
{/* Name and Email */}
<div className='grid grid-cols-1 gap-4 sm:gap-6 md:grid-cols-2'>
<div className='space-y-2'>
<Label htmlFor='name' className='font-medium text-sm'>
Full Name *
</Label>
<Input
id='name'
placeholder='John Doe'
value={name}
onChange={(e) => setName(e.target.value)}
className={cn(
showErrors &&
nameErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
/>
{showErrors && nameErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{nameErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
{/* Content */}
<div className='px-4 pt-[60px] pb-[80px] sm:px-8 md:px-[44px]'>
<h1 className='mb-10 text-center font-bold text-4xl text-gray-900 md:text-5xl'>
Join Our Team
</h1>
<div className='space-y-2'>
<Label htmlFor='email' className='font-medium text-sm'>
Email *
</Label>
<Input
id='email'
type='email'
placeholder='john@example.com'
value={email}
onChange={(e) => setEmail(e.target.value)}
className={cn(
showErrors &&
emailErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
/>
{showErrors && emailErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{emailErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
</div>
{/* Phone and Position */}
<div className='grid grid-cols-1 gap-4 sm:gap-6 md:grid-cols-2'>
<div className='space-y-2'>
<Label htmlFor='phone' className='font-medium text-sm'>
Phone Number
</Label>
<Input
id='phone'
type='tel'
placeholder='+1 (555) 123-4567'
value={phone}
onChange={(e) => setPhone(e.target.value)}
/>
</div>
<div className='space-y-2'>
<Label htmlFor='position' className='font-medium text-sm'>
Position of Interest *
</Label>
<Input
id='position'
placeholder='e.g. Full Stack Engineer, Product Designer'
value={position}
onChange={(e) => setPosition(e.target.value)}
className={cn(
showErrors &&
positionErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
/>
{showErrors && positionErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{positionErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
</div>
{/* LinkedIn and Portfolio */}
<div className='grid grid-cols-1 gap-4 sm:gap-6 md:grid-cols-2'>
<div className='space-y-2'>
<Label htmlFor='linkedin' className='font-medium text-sm'>
LinkedIn Profile
</Label>
<Input
id='linkedin'
placeholder='https://linkedin.com/in/yourprofile'
value={linkedin}
onChange={(e) => setLinkedin(e.target.value)}
className={cn(
showErrors &&
linkedinErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
/>
{showErrors && linkedinErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{linkedinErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
<div className='space-y-2'>
<Label htmlFor='portfolio' className='font-medium text-sm'>
Portfolio / Website
</Label>
<Input
id='portfolio'
placeholder='https://yourportfolio.com'
value={portfolio}
onChange={(e) => setPortfolio(e.target.value)}
className={cn(
showErrors &&
portfolioErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
/>
{showErrors && portfolioErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{portfolioErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
</div>
{/* Experience and Location */}
<div className='grid grid-cols-1 gap-4 sm:gap-6 md:grid-cols-2'>
<div className='space-y-2'>
<Label htmlFor='experience' className='font-medium text-sm'>
Years of Experience *
</Label>
<Select value={experience} onValueChange={setExperience}>
<SelectTrigger
className={cn(
showErrors &&
experienceErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
>
<SelectValue placeholder='Select experience level' />
</SelectTrigger>
<SelectContent>
<SelectItem value='0-1'>0-1 years</SelectItem>
<SelectItem value='1-3'>1-3 years</SelectItem>
<SelectItem value='3-5'>3-5 years</SelectItem>
<SelectItem value='5-10'>5-10 years</SelectItem>
<SelectItem value='10+'>10+ years</SelectItem>
</SelectContent>
</Select>
{showErrors && experienceErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{experienceErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
<div className='space-y-2'>
<Label htmlFor='location' className='font-medium text-sm'>
Location *
</Label>
<Input
id='location'
placeholder='e.g. San Francisco, CA'
value={location}
onChange={(e) => setLocation(e.target.value)}
className={cn(
showErrors &&
locationErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
/>
{showErrors && locationErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{locationErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
</div>
{/* Message */}
<div className='space-y-2'>
<Label htmlFor='message' className='font-medium text-sm'>
Tell us about yourself *
</Label>
<Textarea
id='message'
placeholder='Tell us about your experience, what excites you about Sim, and why you would be a great fit for this role...'
className={cn(
'min-h-[140px]',
showErrors &&
messageErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
value={message}
onChange={(e) => setMessage(e.target.value)}
/>
<p className='mt-1.5 text-gray-500 text-xs'>Minimum 50 characters</p>
{showErrors && messageErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{messageErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
{/* Resume Upload */}
<div className='space-y-2'>
<Label htmlFor='resume' className='font-medium text-sm'>
Resume *
</Label>
<div className='relative'>
{resume ? (
<div className='flex items-center gap-2 rounded-md border border-input bg-background px-3 py-2'>
<span className='flex-1 truncate text-sm'>{resume.name}</span>
<button
type='button'
onClick={(e) => {
e.preventDefault()
setResume(null)
if (fileInputRef.current) {
fileInputRef.current.value = ''
}
}}
className='flex-shrink-0 text-muted-foreground transition-colors hover:text-foreground'
aria-label='Remove file'
>
<X className='h-4 w-4' />
</button>
</div>
) : (
<div className='mx-auto max-w-4xl'>
{/* Form Section */}
<section className='rounded-2xl border border-gray-200 bg-white p-6 shadow-sm sm:p-10'>
<form onSubmit={onSubmit} className='space-y-5'>
{/* Name and Email */}
<div className='grid grid-cols-1 gap-4 sm:gap-6 md:grid-cols-2'>
<div className='space-y-2'>
<Label htmlFor='name' className='font-medium text-sm'>
Full Name *
</Label>
<Input
id='resume'
type='file'
accept='.pdf,.doc,.docx'
onChange={handleFileChange}
ref={fileInputRef}
id='name'
placeholder='John Doe'
value={name}
onChange={(e) => setName(e.target.value)}
className={cn(
showErrors &&
resumeErrors.length > 0 &&
nameErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
/>
{showErrors && nameErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{nameErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
<div className='space-y-2'>
<Label htmlFor='email' className='font-medium text-sm'>
Email *
</Label>
<Input
id='email'
type='email'
placeholder='john@example.com'
value={email}
onChange={(e) => setEmail(e.target.value)}
className={cn(
showErrors &&
emailErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
/>
{showErrors && emailErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{emailErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
</div>
{/* Phone and Position */}
<div className='grid grid-cols-1 gap-4 sm:gap-6 md:grid-cols-2'>
<div className='space-y-2'>
<Label htmlFor='phone' className='font-medium text-sm'>
Phone Number
</Label>
<Input
id='phone'
type='tel'
placeholder='+1 (555) 123-4567'
value={phone}
onChange={(e) => setPhone(e.target.value)}
/>
</div>
<div className='space-y-2'>
<Label htmlFor='position' className='font-medium text-sm'>
Position of Interest *
</Label>
<Input
id='position'
placeholder='e.g. Full Stack Engineer, Product Designer'
value={position}
onChange={(e) => setPosition(e.target.value)}
className={cn(
showErrors &&
positionErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
/>
{showErrors && positionErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{positionErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
</div>
{/* LinkedIn and Portfolio */}
<div className='grid grid-cols-1 gap-4 sm:gap-6 md:grid-cols-2'>
<div className='space-y-2'>
<Label htmlFor='linkedin' className='font-medium text-sm'>
LinkedIn Profile
</Label>
<Input
id='linkedin'
placeholder='https://linkedin.com/in/yourprofile'
value={linkedin}
onChange={(e) => setLinkedin(e.target.value)}
className={cn(
showErrors &&
linkedinErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
/>
{showErrors && linkedinErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{linkedinErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
<div className='space-y-2'>
<Label htmlFor='portfolio' className='font-medium text-sm'>
Portfolio / Website
</Label>
<Input
id='portfolio'
placeholder='https://yourportfolio.com'
value={portfolio}
onChange={(e) => setPortfolio(e.target.value)}
className={cn(
showErrors &&
portfolioErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
/>
{showErrors && portfolioErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{portfolioErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
</div>
{/* Experience and Location */}
<div className='grid grid-cols-1 gap-4 sm:gap-6 md:grid-cols-2'>
<div className='space-y-2'>
<Label htmlFor='experience' className='font-medium text-sm'>
Years of Experience *
</Label>
<Select value={experience} onValueChange={setExperience}>
<SelectTrigger
className={cn(
showErrors &&
experienceErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
>
<SelectValue placeholder='Select experience level' />
</SelectTrigger>
<SelectContent>
<SelectItem value='0-1'>0-1 years</SelectItem>
<SelectItem value='1-3'>1-3 years</SelectItem>
<SelectItem value='3-5'>3-5 years</SelectItem>
<SelectItem value='5-10'>5-10 years</SelectItem>
<SelectItem value='10+'>10+ years</SelectItem>
</SelectContent>
</Select>
{showErrors && experienceErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{experienceErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
<div className='space-y-2'>
<Label htmlFor='location' className='font-medium text-sm'>
Location *
</Label>
<Input
id='location'
placeholder='e.g. San Francisco, CA'
value={location}
onChange={(e) => setLocation(e.target.value)}
className={cn(
showErrors &&
locationErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
/>
{showErrors && locationErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{locationErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
</div>
{/* Message */}
<div className='space-y-2'>
<Label htmlFor='message' className='font-medium text-sm'>
Tell us about yourself *
</Label>
<Textarea
id='message'
placeholder='Tell us about your experience, what excites you about Sim, and why you would be a great fit for this role...'
className={cn(
'min-h-[140px]',
showErrors &&
messageErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
value={message}
onChange={(e) => setMessage(e.target.value)}
/>
<p className='mt-1.5 text-gray-500 text-xs'>Minimum 50 characters</p>
{showErrors && messageErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{messageErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</div>
<p className='mt-1.5 text-gray-500 text-xs'>PDF or Word document, max 10MB</p>
{showErrors && resumeErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{resumeErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
{/* Resume Upload */}
<div className='space-y-2'>
<Label htmlFor='resume' className='font-medium text-sm'>
Resume *
</Label>
<div className='relative'>
{resume ? (
<div className='flex items-center gap-2 rounded-md border border-input bg-background px-3 py-2'>
<span className='flex-1 truncate text-sm'>{resume.name}</span>
<button
type='button'
onClick={(e) => {
e.preventDefault()
setResume(null)
if (fileInputRef.current) {
fileInputRef.current.value = ''
}
}}
className='flex-shrink-0 text-muted-foreground transition-colors hover:text-foreground'
aria-label='Remove file'
>
<X className='h-4 w-4' />
</button>
</div>
) : (
<Input
id='resume'
type='file'
accept='.pdf,.doc,.docx'
onChange={handleFileChange}
ref={fileInputRef}
className={cn(
showErrors &&
resumeErrors.length > 0 &&
'border-red-500 focus:border-red-500 focus:ring-red-100 focus-visible:ring-red-500'
)}
/>
)}
</div>
)}
</div>
{/* Submit Button */}
<div className='flex justify-end pt-2'>
<Button
type='submit'
disabled={isSubmitting || submitStatus === 'success'}
className='min-w-[200px] rounded-[10px] border border-[#6F3DFA] bg-gradient-to-b from-[#8357FF] to-[#6F3DFA] text-white shadow-[inset_0_2px_4px_0_#9B77FF] transition-all duration-300 hover:opacity-90 disabled:opacity-50'
size='lg'
>
{isSubmitting ? (
<>
<Loader2 className='mr-2 h-4 w-4 animate-spin' />
Submitting...
</>
) : submitStatus === 'success' ? (
'Submitted'
) : (
'Submit Application'
<p className='mt-1.5 text-gray-500 text-xs'>PDF or Word document, max 10MB</p>
{showErrors && resumeErrors.length > 0 && (
<div className='mt-1 space-y-1 text-red-400 text-xs'>
{resumeErrors.map((error, index) => (
<p key={index}>{error}</p>
))}
</div>
)}
</Button>
</div>
</form>
</section>
</div>
{/* Additional Info */}
<section className='mt-6 text-center text-gray-600 text-sm'>
<p>
Questions? Email us at{' '}
<a
href='mailto:careers@sim.ai'
className='font-medium text-gray-900 underline transition-colors hover:text-gray-700'
>
careers@sim.ai
</a>
</p>
</section>
{/* Submit Button */}
<div className='flex justify-end pt-2'>
<Button
type='submit'
disabled={isSubmitting || submitStatus === 'success'}
className='min-w-[200px] rounded-[10px] border border-[#6F3DFA] bg-gradient-to-b from-[#8357FF] to-[#6F3DFA] text-white shadow-[inset_0_2px_4px_0_#9B77FF] transition-all duration-300 hover:opacity-90 disabled:opacity-50'
size='lg'
>
{isSubmitting ? (
<>
<Loader2 className='mr-2 h-4 w-4 animate-spin' />
Submitting...
</>
) : submitStatus === 'success' ? (
'Submitted'
) : (
'Submit Application'
)}
</Button>
</div>
</form>
</section>
{/* Additional Info */}
<section className='mt-6 text-center text-gray-600 text-sm'>
<p>
Questions? Email us at{' '}
<a
href='mailto:careers@sim.ai'
className='font-medium text-gray-900 underline transition-colors hover:text-gray-700'
>
careers@sim.ai
</a>
</p>
</section>
</div>
</div>
</LegalLayout>
{/* Footer - Only for hosted instances */}
{isHosted && (
<div className='relative z-20'>
<Footer fullWidth={true} />
</div>
)}
</main>
)
}

View File

@@ -217,10 +217,10 @@ export default function Footer({ fullWidth = false }: FooterProps) {
Enterprise
</Link>
<Link
href='/blog'
href='/studio'
className='text-[14px] text-muted-foreground transition-colors hover:text-foreground'
>
Blog
Sim Studio
</Link>
<Link
href='/changelog'

View File

@@ -8,13 +8,14 @@ import { soehne } from '@/app/fonts/soehne/soehne'
interface LegalLayoutProps {
title: string
children: React.ReactNode
navVariant?: 'landing' | 'auth' | 'legal'
}
export default function LegalLayout({ title, children }: LegalLayoutProps) {
export default function LegalLayout({ title, children, navVariant = 'legal' }: LegalLayoutProps) {
return (
<main className={`${soehne.className} min-h-screen bg-white text-gray-900`}>
{/* Header - Nav handles all conditional logic */}
<Nav variant='legal' />
<Nav variant={navVariant} />
{/* Content */}
<div className='px-12 pt-[40px] pb-[40px]'>

View File

@@ -71,7 +71,7 @@ export default function Nav({ hideAuthButtons = false, variant = 'landing' }: Na
</li>
<li>
<Link
href='#pricing'
href='/?from=nav#pricing'
className='text-[16px] text-muted-foreground transition-colors hover:text-foreground'
scroll={true}
>
@@ -88,6 +88,14 @@ export default function Nav({ hideAuthButtons = false, variant = 'landing' }: Na
Enterprise
</button>
</li>
<li>
<Link
href='/careers'
className='text-[16px] text-muted-foreground transition-colors hover:text-foreground'
>
Careers
</Link>
</li>
<li>
<a
href='https://github.com/simstudioai/sim'

View File

@@ -48,8 +48,8 @@ export default async function Page({ params }: { params: Promise<{ slug: string
/>
<header className='mx-auto max-w-[1450px] px-6 pt-8 sm:px-8 sm:pt-12 md:px-12 md:pt-16'>
<div className='mb-6'>
<Link href='/blog' className='text-gray-600 text-sm hover:text-gray-900'>
Back to Blog
<Link href='/studio' className='text-gray-600 text-sm hover:text-gray-900'>
Back to Sim Studio
</Link>
</div>
<div className='flex flex-col gap-8 md:flex-row md:gap-12'>
@@ -133,7 +133,7 @@ export default async function Page({ params }: { params: Promise<{ slug: string
<h2 className='mb-4 font-medium text-[24px]'>Related posts</h2>
<div className='grid grid-cols-1 gap-6 sm:grid-cols-2 lg:grid-cols-3'>
{related.map((p) => (
<Link key={p.slug} href={`/blog/${p.slug}`} className='group'>
<Link key={p.slug} href={`/studio/${p.slug}`} className='group'>
<div className='overflow-hidden rounded-lg border border-gray-200'>
<Image
src={p.ogImage}

View File

@@ -20,7 +20,7 @@ export default async function AuthorPage({ params }: { params: Promise<{ id: str
'@context': 'https://schema.org',
'@type': 'Person',
name: author.name,
url: `https://sim.ai/blog/authors/${author.id}`,
url: `https://sim.ai/studio/authors/${author.id}`,
sameAs: author.url ? [author.url] : [],
image: author.avatarUrl,
}
@@ -44,7 +44,7 @@ export default async function AuthorPage({ params }: { params: Promise<{ id: str
</div>
<div className='grid grid-cols-1 gap-8 sm:grid-cols-2'>
{posts.map((p) => (
<Link key={p.slug} href={`/blog/${p.slug}`} className='group'>
<Link key={p.slug} href={`/studio/${p.slug}`} className='group'>
<div className='overflow-hidden rounded-lg border border-gray-200'>
<Image
src={p.ogImage}

View File

@@ -1,12 +1,12 @@
export default function Head() {
return (
<>
<link rel='canonical' href='https://sim.ai/blog' />
<link rel='canonical' href='https://sim.ai/studio' />
<link
rel='alternate'
type='application/rss+xml'
title='Sim Blog'
href='https://sim.ai/blog/rss.xml'
title='Sim Studio'
href='https://sim.ai/studio/rss.xml'
/>
</>
)

View File

@@ -1,6 +1,6 @@
import { Footer, Nav } from '@/app/(landing)/components'
export default function BlogLayout({ children }: { children: React.ReactNode }) {
export default function StudioLayout({ children }: { children: React.ReactNode }) {
const orgJsonLd = {
'@context': 'https://schema.org',
'@type': 'Organization',

View File

@@ -6,7 +6,7 @@ import { soehne } from '@/app/fonts/soehne/soehne'
export const revalidate = 3600
export default async function BlogIndex({
export default async function StudioIndex({
searchParams,
}: {
searchParams: Promise<{ page?: string; tag?: string }>
@@ -17,44 +17,46 @@ export default async function BlogIndex({
const all = await getAllPostMeta()
const filtered = tag ? all.filter((p) => p.tags.includes(tag)) : all
const totalPages = Math.max(1, Math.ceil(filtered.length / perPage))
const featured = pageNum === 1 ? filtered.find((p) => p.featured) || filtered[0] : null
const listBase = featured ? filtered.filter((p) => p.slug !== featured.slug) : filtered
const totalPages = Math.max(1, Math.ceil(listBase.length / perPage))
const start = (pageNum - 1) * perPage
const posts = filtered.slice(start, start + perPage)
const posts = listBase.slice(start, start + perPage)
// Tag filter chips are intentionally disabled for now.
// const tags = await getAllTags()
const blogJsonLd = {
const studioJsonLd = {
'@context': 'https://schema.org',
'@type': 'Blog',
name: 'Sim Blog',
url: 'https://sim.ai/blog',
name: 'Sim Studio',
url: 'https://sim.ai/studio',
description: 'Announcements, insights, and guides for building AI agent workflows.',
}
const [featured, ...rest] = posts
const rest = posts
return (
<main className={`${soehne.className} mx-auto max-w-[1200px] px-6 py-12 sm:px-8 md:px-12`}>
<script
type='application/ld+json'
dangerouslySetInnerHTML={{ __html: JSON.stringify(blogJsonLd) }}
dangerouslySetInnerHTML={{ __html: JSON.stringify(studioJsonLd) }}
/>
<h1 className='mb-3 font-medium text-[40px] leading-tight sm:text-[56px]'>The Sim Times</h1>
<h1 className='mb-3 font-medium text-[40px] leading-tight sm:text-[56px]'>Sim Studio</h1>
<p className='mb-10 text-[18px] text-gray-700'>
Announcements, insights, and guides for building AI agent workflows.
</p>
{/* Tag filter chips hidden until we have more posts */}
{/* <div className='mb-10 flex flex-wrap gap-3'>
<Link href='/blog' className={`rounded-full border px-3 py-1 text-sm ${!tag ? 'border-black bg-black text-white' : 'border-gray-300'}`}>All</Link>
<Link href='/studio' className={`rounded-full border px-3 py-1 text-sm ${!tag ? 'border-black bg-black text-white' : 'border-gray-300'}`}>All</Link>
{tags.map((t) => (
<Link key={t.tag} href={`/blog?tag=${encodeURIComponent(t.tag)}`} className={`rounded-full border px-3 py-1 text-sm ${tag === t.tag ? 'border-black bg-black text-white' : 'border-gray-300'}`}>
<Link key={t.tag} href={`/studio?tag=${encodeURIComponent(t.tag)}`} className={`rounded-full border px-3 py-1 text-sm ${tag === t.tag ? 'border-black bg-black text-white' : 'border-gray-300'}`}>
{t.tag} ({t.count})
</Link>
))}
</div> */}
{featured && (
<Link href={`/blog/${featured.slug}`} className='group mb-10 block'>
<Link href={`/studio/${featured.slug}`} className='group mb-10 block'>
<div className='overflow-hidden rounded-2xl border border-gray-200'>
<Image
src={featured.ogImage}
@@ -135,7 +137,7 @@ export default async function BlogIndex({
return (
<Link
key={p.slug}
href={`/blog/${p.slug}`}
href={`/studio/${p.slug}`}
className='group mb-6 inline-block w-full break-inside-avoid'
>
<div className='overflow-hidden rounded-xl border border-gray-200 transition-colors duration-300 hover:border-gray-300'>
@@ -199,7 +201,7 @@ export default async function BlogIndex({
<div className='mt-10 flex items-center justify-center gap-3'>
{pageNum > 1 && (
<Link
href={`/blog?page=${pageNum - 1}${tag ? `&tag=${encodeURIComponent(tag)}` : ''}`}
href={`/studio?page=${pageNum - 1}${tag ? `&tag=${encodeURIComponent(tag)}` : ''}`}
className='rounded border px-3 py-1 text-sm'
>
Previous
@@ -210,7 +212,7 @@ export default async function BlogIndex({
</span>
{pageNum < totalPages && (
<Link
href={`/blog?page=${pageNum + 1}${tag ? `&tag=${encodeURIComponent(tag)}` : ''}`}
href={`/studio?page=${pageNum + 1}${tag ? `&tag=${encodeURIComponent(tag)}` : ''}`}
className='rounded border px-3 py-1 text-sm'
>
Next

View File

@@ -11,7 +11,7 @@ export async function GET() {
const xml = `<?xml version="1.0" encoding="UTF-8" ?>
<rss version="2.0">
<channel>
<title>Sim Blog</title>
<title>Sim Studio</title>
<link>${site}</link>
<description>Announcements, insights, and guides for AI agent workflows.</description>
${items

View File

@@ -7,13 +7,13 @@ export default async function TagsIndex() {
<main className='mx-auto max-w-[900px] px-6 py-10 sm:px-8 md:px-12'>
<h1 className='mb-6 font-medium text-[32px] leading-tight'>Browse by tag</h1>
<div className='flex flex-wrap gap-3'>
<Link href='/blog' className='rounded-full border border-gray-300 px-3 py-1 text-sm'>
<Link href='/studio' className='rounded-full border border-gray-300 px-3 py-1 text-sm'>
All
</Link>
{tags.map((t) => (
<Link
key={t.tag}
href={`/blog?tag=${encodeURIComponent(t.tag)}`}
href={`/studio?tag=${encodeURIComponent(t.tag)}`}
className='rounded-full border border-gray-300 px-3 py-1 text-sm'
>
{t.tag} ({t.count})

View File

@@ -640,6 +640,18 @@ function escapeRegExp(string: string): string {
return string.replace(/[.*+?^${}()|[\]\\]/g, '\\$&')
}
/**
* Remove one trailing newline from stdout
* This handles the common case where print() or console.log() adds a trailing \n
* that users don't expect to see in the output
*/
function cleanStdout(stdout: string): string {
if (stdout.endsWith('\n')) {
return stdout.slice(0, -1)
}
return stdout
}
export async function POST(req: NextRequest) {
const requestId = generateRequestId()
const startTime = Date.now()
@@ -820,7 +832,7 @@ export async function POST(req: NextRequest) {
return NextResponse.json({
success: true,
output: { result: e2bResult ?? null, stdout, executionTime },
output: { result: e2bResult ?? null, stdout: cleanStdout(stdout), executionTime },
})
}
// Track prologue lines for error adjustment
@@ -884,7 +896,7 @@ export async function POST(req: NextRequest) {
return NextResponse.json({
success: true,
output: { result: e2bResult ?? null, stdout, executionTime },
output: { result: e2bResult ?? null, stdout: cleanStdout(stdout), executionTime },
})
}
@@ -948,7 +960,7 @@ export async function POST(req: NextRequest) {
return NextResponse.json({
success: true,
output: { result, stdout, executionTime },
output: { result, stdout: cleanStdout(stdout), executionTime },
})
} catch (error: any) {
const executionTime = Date.now() - startTime
@@ -981,7 +993,7 @@ export async function POST(req: NextRequest) {
error: userFriendlyErrorMessage,
output: {
result: null,
stdout,
stdout: cleanStdout(stdout),
executionTime,
},
// Include debug information in development or for debugging

View File

@@ -20,7 +20,7 @@ export function ThemeProvider({ children, ...props }: ThemeProviderProps) {
pathname.startsWith('/careers') ||
pathname.startsWith('/changelog') ||
pathname.startsWith('/chat') ||
pathname.startsWith('/blog')
pathname.startsWith('/studio')
? 'light'
: undefined

View File

@@ -3,11 +3,14 @@
import { useCallback, useRef, useState } from 'react'
import clsx from 'clsx'
import { ChevronDown, RepeatIcon, SplitIcon } from 'lucide-react'
import { shallow } from 'zustand/shallow'
import { createLogger } from '@/lib/logs/console/logger'
import { extractFieldsFromSchema } from '@/lib/response-format'
import type { ConnectedBlock } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel-new/components/editor/hooks/use-block-connections'
import { useBlockOutputFields } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-block-output-fields'
import { getBlock } from '@/blocks/registry'
import { getTool } from '@/tools/utils'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
import { FieldItem, type SchemaField, TREE_SPACING } from './components/field-item/field-item'
const logger = createLogger('ConnectionBlocks')
@@ -22,144 +25,6 @@ const TREE_STYLES = {
LINE_OFFSET: 4,
} as const
const RESERVED_KEYS = new Set(['type', 'description'])
/**
* Checks if a property is an object type
*/
const isObject = (prop: any): boolean => prop && typeof prop === 'object'
/**
* Extracts nested fields from array or object properties
*/
const extractChildFields = (prop: any): SchemaField[] | undefined => {
if (!isObject(prop)) return undefined
if (prop.properties && isObject(prop.properties)) {
return extractNestedFields(prop.properties)
}
if (prop.items?.properties && isObject(prop.items.properties)) {
return extractNestedFields(prop.items.properties)
}
if (!('type' in prop)) {
return extractNestedFields(prop)
}
if (prop.type === 'array') {
const itemDefs = Object.fromEntries(
Object.entries(prop).filter(([key]) => !RESERVED_KEYS.has(key))
)
if (Object.keys(itemDefs).length > 0) {
return extractNestedFields(itemDefs)
}
}
return undefined
}
/**
* Recursively extracts nested fields from output properties
*/
const extractNestedFields = (properties: Record<string, any>): SchemaField[] => {
return Object.entries(properties).map(([name, prop]) => {
const baseType = isObject(prop) && typeof prop.type === 'string' ? prop.type : 'string'
const type = isObject(prop) && !('type' in prop) ? 'object' : baseType
return {
name,
type,
description: isObject(prop) ? prop.description : undefined,
children: extractChildFields(prop),
}
})
}
/**
* Gets tool outputs for a block's operation
*/
const getToolOutputs = (blockConfig: any, connection: ConnectedBlock): Record<string, any> => {
if (!blockConfig?.tools?.config?.tool || !connection.operation) return {}
try {
const toolId = blockConfig.tools.config.tool({ operation: connection.operation })
if (!toolId) return {}
const toolConfig = getTool(toolId)
return toolConfig?.outputs || {}
} catch {
return {}
}
}
/**
* Creates a schema field from an output definition
*/
const createFieldFromOutput = (
name: string,
output: any,
responseFormatFields?: SchemaField[]
): SchemaField => {
const hasExplicitType = isObject(output) && typeof output.type === 'string'
const type = hasExplicitType ? output.type : isObject(output) ? 'object' : 'string'
const field: SchemaField = {
name,
type,
description: isObject(output) && 'description' in output ? output.description : undefined,
}
if (name === 'data' && responseFormatFields && responseFormatFields.length > 0) {
field.children = responseFormatFields
} else {
field.children = extractChildFields(output)
}
return field
}
/**
* Builds complete field list for a connection, combining base outputs and responseFormat
*/
const buildConnectionFields = (connection: ConnectedBlock): SchemaField[] => {
const blockConfig = getBlock(connection.type)
if (!blockConfig && (connection.type === 'loop' || connection.type === 'parallel')) {
return [
{
name: 'results',
type: 'array',
description: 'Array of results from the loop/parallel execution',
},
]
}
const toolOutputs = getToolOutputs(blockConfig, connection)
const baseOutputs =
Object.keys(toolOutputs).length > 0
? toolOutputs
: connection.outputs || blockConfig?.outputs || {}
const responseFormatFields = extractFieldsFromSchema(connection.responseFormat)
if (responseFormatFields.length > 0 && Object.keys(baseOutputs).length === 0) {
return responseFormatFields
}
if (Object.keys(baseOutputs).length === 0) {
return []
}
return Object.entries(baseOutputs).map(([name, output]) =>
createFieldFromOutput(
name,
output,
responseFormatFields.length > 0 ? responseFormatFields : undefined
)
)
}
/**
* Calculates total height of visible nested fields recursively
*/
@@ -192,6 +57,125 @@ const calculateFieldsHeight = (
return totalHeight
}
interface ConnectionItemProps {
connection: ConnectedBlock
isExpanded: boolean
onToggleExpand: (connectionId: string) => void
isFieldExpanded: (connectionId: string, fieldPath: string) => boolean
onConnectionDragStart: (e: React.DragEvent, connection: ConnectedBlock) => void
renderFieldTree: (
fields: SchemaField[],
parentPath: string,
level: number,
connection: ConnectedBlock
) => React.ReactNode
connectionRef: (el: HTMLDivElement | null) => void
mergedSubBlocks: Record<string, any>
sourceBlock: { triggerMode?: boolean } | undefined
}
/**
* Individual connection item component that uses the hook
*/
function ConnectionItem({
connection,
isExpanded,
onToggleExpand,
isFieldExpanded,
onConnectionDragStart,
renderFieldTree,
connectionRef,
mergedSubBlocks,
sourceBlock,
}: ConnectionItemProps) {
const blockConfig = getBlock(connection.type)
const fields = useBlockOutputFields({
blockId: connection.id,
blockType: connection.type,
mergedSubBlocks,
responseFormat: connection.responseFormat,
operation: connection.operation,
triggerMode: sourceBlock?.triggerMode,
})
const hasFields = fields.length > 0
let Icon = blockConfig?.icon
let bgColor = blockConfig?.bgColor || '#6B7280'
if (!blockConfig) {
if (connection.type === 'loop') {
Icon = RepeatIcon as typeof Icon
bgColor = '#2FB3FF'
} else if (connection.type === 'parallel') {
Icon = SplitIcon as typeof Icon
bgColor = '#FEE12B'
}
}
return (
<div className='mb-[2px] last:mb-0' ref={connectionRef}>
<div
draggable
onDragStart={(e) => onConnectionDragStart(e, connection)}
className={clsx(
'group flex h-[25px] cursor-grab items-center gap-[8px] rounded-[8px] px-[5.5px] text-[14px] hover:bg-[#2C2C2C] active:cursor-grabbing dark:hover:bg-[#2C2C2C]',
hasFields && 'cursor-pointer'
)}
onClick={() => hasFields && onToggleExpand(connection.id)}
>
<div
className='relative flex h-[16px] w-[16px] flex-shrink-0 items-center justify-center overflow-hidden rounded-[4px]'
style={{ backgroundColor: bgColor }}
>
{Icon && (
<Icon
className={clsx(
'text-white transition-transform duration-200',
hasFields && 'group-hover:scale-110',
'!h-[10px] !w-[10px]'
)}
/>
)}
</div>
<span
className={clsx(
'truncate font-medium',
'text-[#AEAEAE] group-hover:text-[#E6E6E6] dark:text-[#AEAEAE] dark:group-hover:text-[#E6E6E6]'
)}
>
{connection.name}
</span>
{hasFields && (
<ChevronDown
className={clsx(
'h-3.5 w-3.5 flex-shrink-0 transition-transform',
'text-[#AEAEAE] group-hover:text-[#E6E6E6] dark:text-[#AEAEAE] dark:group-hover:text-[#E6E6E6]',
isExpanded && 'rotate-180'
)}
/>
)}
</div>
{isExpanded && hasFields && (
<div className='relative'>
<div
className='pointer-events-none absolute'
style={{
left: `${TREE_SPACING.VERTICAL_LINE_LEFT_OFFSET}px`,
top: `${TREE_STYLES.LINE_OFFSET}px`,
width: '1px',
height: `${calculateFieldsHeight(fields, '', connection.id, isFieldExpanded) - TREE_STYLES.LINE_OFFSET * 2}px`,
background: TREE_STYLES.LINE_COLOR,
}}
/>
{renderFieldTree(fields, '', 0, connection)}
</div>
)}
</div>
)
}
/**
* Connection blocks component that displays incoming connections with their schemas
*/
@@ -201,6 +185,31 @@ export function ConnectionBlocks({ connections, currentBlockId }: ConnectionBloc
const scrollContainerRef = useRef<HTMLDivElement>(null)
const connectionRefs = useRef<Map<string, HTMLDivElement>>(new Map())
const { blocks } = useWorkflowStore(
(state) => ({
blocks: state.blocks,
}),
shallow
)
const workflowId = useWorkflowRegistry((state) => state.activeWorkflowId)
const workflowSubBlockValues = useSubBlockStore((state) =>
workflowId ? (state.workflowValues[workflowId] ?? {}) : {}
)
const getMergedSubBlocks = useCallback(
(sourceBlockId: string): Record<string, any> => {
const base = blocks[sourceBlockId]?.subBlocks || {}
const live = workflowSubBlockValues?.[sourceBlockId] || {}
const merged: Record<string, any> = { ...base }
for (const [subId, liveVal] of Object.entries(live)) {
merged[subId] = { ...(base[subId] || {}), value: liveVal }
}
return merged
},
[blocks, workflowSubBlockValues]
)
const toggleConnectionExpansion = useCallback((connectionId: string) => {
setExpandedConnections((prev) => {
const newSet = new Set(prev)
@@ -327,94 +336,28 @@ export function ConnectionBlocks({ connections, currentBlockId }: ConnectionBloc
return (
<div ref={scrollContainerRef} className='space-y-[2px]'>
{connections.map((connection) => {
const blockConfig = getBlock(connection.type)
const isExpanded = expandedConnections.has(connection.id)
const fields = buildConnectionFields(connection)
const hasFields = fields.length > 0
let Icon = blockConfig?.icon
let bgColor = blockConfig?.bgColor || '#6B7280'
if (!blockConfig) {
if (connection.type === 'loop') {
Icon = RepeatIcon as typeof Icon
bgColor = '#2FB3FF'
} else if (connection.type === 'parallel') {
Icon = SplitIcon as typeof Icon
bgColor = '#FEE12B'
}
}
const mergedSubBlocks = getMergedSubBlocks(connection.id)
const sourceBlock = blocks[connection.id]
return (
<div
<ConnectionItem
key={connection.id}
className='mb-[2px] last:mb-0'
ref={(el) => {
connection={connection}
isExpanded={expandedConnections.has(connection.id)}
onToggleExpand={toggleConnectionExpansion}
isFieldExpanded={isFieldExpanded}
onConnectionDragStart={handleConnectionDragStart}
renderFieldTree={renderFieldTree}
connectionRef={(el) => {
if (el) {
connectionRefs.current.set(connection.id, el)
} else {
connectionRefs.current.delete(connection.id)
}
}}
>
<div
draggable
onDragStart={(e) => handleConnectionDragStart(e, connection)}
className={clsx(
'group flex h-[25px] cursor-grab items-center gap-[8px] rounded-[8px] px-[5.5px] text-[14px] hover:bg-[#2C2C2C] active:cursor-grabbing dark:hover:bg-[#2C2C2C]',
hasFields && 'cursor-pointer'
)}
onClick={() => hasFields && toggleConnectionExpansion(connection.id)}
>
<div
className='relative flex h-[16px] w-[16px] flex-shrink-0 items-center justify-center overflow-hidden rounded-[4px]'
style={{ backgroundColor: bgColor }}
>
{Icon && (
<Icon
className={clsx(
'text-white transition-transform duration-200',
hasFields && 'group-hover:scale-110',
'!h-[10px] !w-[10px]'
)}
/>
)}
</div>
<span
className={clsx(
'truncate font-medium',
'text-[#AEAEAE] group-hover:text-[#E6E6E6] dark:text-[#AEAEAE] dark:group-hover:text-[#E6E6E6]'
)}
>
{connection.name}
</span>
{hasFields && (
<ChevronDown
className={clsx(
'h-3.5 w-3.5 flex-shrink-0 transition-transform',
'text-[#AEAEAE] group-hover:text-[#E6E6E6] dark:text-[#AEAEAE] dark:group-hover:text-[#E6E6E6]',
isExpanded && 'rotate-180'
)}
/>
)}
</div>
{isExpanded && hasFields && (
<div className='relative'>
<div
className='pointer-events-none absolute'
style={{
left: `${TREE_SPACING.VERTICAL_LINE_LEFT_OFFSET}px`,
top: `${TREE_STYLES.LINE_OFFSET}px`,
width: '1px',
height: `${calculateFieldsHeight(fields, '', connection.id, isFieldExpanded) - TREE_STYLES.LINE_OFFSET * 2}px`,
background: TREE_STYLES.LINE_COLOR,
}}
/>
{renderFieldTree(fields, '', 0, connection)}
</div>
)}
</div>
mergedSubBlocks={mergedSubBlocks}
sourceBlock={sourceBlock}
/>
)
})}
</div>

View File

@@ -210,7 +210,6 @@ export function Code({
const accessiblePrefixes = useAccessibleReferencePrefixes(blockId)
const emitTagSelection = useTagSelection(blockId, subBlockId)
const [languageValue] = useSubBlockValue<string>(blockId, 'language')
const [remoteExecution] = useSubBlockValue<boolean>(blockId, 'remoteExecution')
// Derived state
const effectiveLanguage = (languageValue as 'javascript' | 'python' | 'json') || language
@@ -244,14 +243,14 @@ export function Code({
}, [generationType])
const dynamicPlaceholder = useMemo(() => {
if (remoteExecution && languageValue === CodeLanguage.Python) {
if (languageValue === CodeLanguage.Python) {
return 'Write Python...'
}
return placeholder
}, [remoteExecution, languageValue, placeholder])
}, [languageValue, placeholder])
const dynamicWandConfig = useMemo(() => {
if (remoteExecution && languageValue === CodeLanguage.Python) {
if (languageValue === CodeLanguage.Python) {
return {
...wandConfig,
prompt: PYTHON_AI_PROMPT,
@@ -259,11 +258,11 @@ export function Code({
}
}
return wandConfig
}, [wandConfig, remoteExecution, languageValue])
}, [wandConfig, languageValue])
// AI code generation integration
const wandHook = useWand({
wandConfig: wandConfig || { enabled: false, prompt: '' },
wandConfig: dynamicWandConfig || { enabled: false, prompt: '' },
currentValue: code,
onStreamStart: () => handleStreamStartRef.current?.(),
onStreamChunk: (chunk: string) => handleStreamChunkRef.current?.(chunk),

View File

@@ -493,7 +493,7 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
(metric: { name: string }) => `${normalizedBlockName}.${metric.name.toLowerCase()}`
)
} else {
const outputPaths = generateOutputPaths(blockConfig.outputs)
const outputPaths = getBlockOutputPaths(sourceBlock.type, mergedSubBlocks)
blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
}
} else if (sourceBlock.type === 'variables') {
@@ -515,7 +515,11 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
if (schemaFields.length > 0) {
blockTags = schemaFields.map((field) => `${normalizedBlockName}.${field.name}`)
} else {
const outputPaths = generateOutputPaths(blockConfig.outputs || {})
const outputPaths = getBlockOutputPaths(
sourceBlock.type,
mergedSubBlocks,
sourceBlock.triggerMode
)
blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
}
} else if (!blockConfig.outputs || Object.keys(blockConfig.outputs).length === 0) {
@@ -573,21 +577,19 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
if (dynamicOutputs.length > 0) {
blockTags = dynamicOutputs.map((path) => `${normalizedBlockName}.${path}`)
} else {
const outputPaths = generateOutputPaths(blockConfig.outputs || {})
const outputPaths = getBlockOutputPaths(sourceBlock.type, mergedSubBlocks, true)
blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
}
} else if (sourceBlock.type === 'approval') {
// For approval block, use dynamic outputs based on inputFormat
const dynamicOutputs = getBlockOutputPaths(sourceBlock.type, mergedSubBlocks)
// If it's a self-reference, only show url (available immediately)
const isSelfReference = activeSourceBlockId === blockId
if (dynamicOutputs.length > 0) {
const allTags = dynamicOutputs.map((path) => `${normalizedBlockName}.${path}`)
blockTags = isSelfReference ? allTags.filter((tag) => tag.endsWith('.url')) : allTags
} else {
const outputPaths = generateOutputPaths(blockConfig.outputs || {})
const outputPaths = getBlockOutputPaths(sourceBlock.type, mergedSubBlocks)
const allTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
blockTags = isSelfReference ? allTags.filter((tag) => tag.endsWith('.url')) : allTags
}
@@ -601,7 +603,11 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
if (toolOutputPaths.length > 0) {
blockTags = toolOutputPaths.map((path) => `${normalizedBlockName}.${path}`)
} else {
const outputPaths = generateOutputPaths(blockConfig.outputs || {})
const outputPaths = getBlockOutputPaths(
sourceBlock.type,
mergedSubBlocks,
sourceBlock.triggerMode
)
blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
}
}
@@ -845,7 +851,7 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
(metric: { name: string }) => `${normalizedBlockName}.${metric.name.toLowerCase()}`
)
} else {
const outputPaths = generateOutputPaths(blockConfig.outputs)
const outputPaths = getBlockOutputPaths(accessibleBlock.type, mergedSubBlocks)
blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
}
} else if (accessibleBlock.type === 'variables') {
@@ -867,7 +873,11 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
if (schemaFields.length > 0) {
blockTags = schemaFields.map((field) => `${normalizedBlockName}.${field.name}`)
} else {
const outputPaths = generateOutputPaths(blockConfig.outputs || {})
const outputPaths = getBlockOutputPaths(
accessibleBlock.type,
mergedSubBlocks,
accessibleBlock.triggerMode
)
blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
}
} else if (!blockConfig.outputs || Object.keys(blockConfig.outputs).length === 0) {
@@ -879,21 +889,19 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
if (dynamicOutputs.length > 0) {
blockTags = dynamicOutputs.map((path) => `${normalizedBlockName}.${path}`)
} else {
const outputPaths = generateOutputPaths(blockConfig.outputs || {})
const outputPaths = getBlockOutputPaths(accessibleBlock.type, mergedSubBlocks, true)
blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
}
} else if (accessibleBlock.type === 'approval') {
// For approval block, use dynamic outputs based on inputFormat
const dynamicOutputs = getBlockOutputPaths(accessibleBlock.type, mergedSubBlocks)
// If it's a self-reference, only show url (available immediately)
const isSelfReference = accessibleBlockId === blockId
if (dynamicOutputs.length > 0) {
const allTags = dynamicOutputs.map((path) => `${normalizedBlockName}.${path}`)
blockTags = isSelfReference ? allTags.filter((tag) => tag.endsWith('.url')) : allTags
} else {
const outputPaths = generateOutputPaths(blockConfig.outputs || {})
const outputPaths = getBlockOutputPaths(accessibleBlock.type, mergedSubBlocks)
const allTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
blockTags = isSelfReference ? allTags.filter((tag) => tag.endsWith('.url')) : allTags
}
@@ -907,7 +915,11 @@ export const TagDropdown: React.FC<TagDropdownProps> = ({
if (toolOutputPaths.length > 0) {
blockTags = toolOutputPaths.map((path) => `${normalizedBlockName}.${path}`)
} else {
const outputPaths = generateOutputPaths(blockConfig.outputs || {})
const outputPaths = getBlockOutputPaths(
accessibleBlock.type,
mergedSubBlocks,
accessibleBlock.triggerMode
)
blockTags = outputPaths.map((path) => `${normalizedBlockName}.${path}`)
}
}

View File

@@ -36,7 +36,25 @@ export function useEditorSubblockLayout(
const blocks = useWorkflowStore.getState().blocks || {}
const mergedMap = mergeSubblockState(blocks, activeWorkflowId || undefined, blockId)
const mergedState = mergedMap ? mergedMap[blockId] : undefined
stateToUse = mergedState?.subBlocks || {}
const mergedSubBlocks = mergedState?.subBlocks || {}
stateToUse = Object.keys(mergedSubBlocks).reduce(
(acc, key) => {
const value =
blockSubBlockValues[key] !== undefined
? blockSubBlockValues[key]
: (mergedSubBlocks[key]?.value ?? null)
acc[key] = { value }
return acc
},
{} as Record<string, { value: unknown }>
)
Object.keys(blockSubBlockValues).forEach((key) => {
if (!(key in stateToUse)) {
stateToUse[key] = { value: blockSubBlockValues[key] }
}
})
// Filter visible blocks and those that meet their conditions
const visibleSubBlocks = (config.subBlocks || []).filter((block) => {
@@ -59,12 +77,12 @@ export function useEditorSubblockLayout(
if (block.mode === 'advanced' && !displayAdvancedMode) return false
if (block.mode === 'trigger') {
// Show trigger mode blocks only when in trigger mode
return displayTriggerMode
if (!displayTriggerMode) return false
}
}
// When in trigger mode, hide blocks that don't have mode: 'trigger'
if (displayTriggerMode) {
if (displayTriggerMode && block.mode !== 'trigger') {
return false
}

View File

@@ -0,0 +1,373 @@
'use client'
import { useMemo } from 'react'
import { extractFieldsFromSchema } from '@/lib/response-format'
import { getBlockOutputPaths, getBlockOutputs } from '@/lib/workflows/block-outputs'
import { TRIGGER_TYPES } from '@/lib/workflows/triggers'
import type { SchemaField } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel-new/components/editor/components/connection-blocks/components/field-item/field-item'
import { getBlock } from '@/blocks'
import type { BlockConfig } from '@/blocks/types'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
import { getTool } from '@/tools/utils'
const RESERVED_KEYS = new Set(['type', 'description'])
/**
* Checks if a property is an object type
*/
const isObject = (prop: any): boolean => prop && typeof prop === 'object'
/**
* Gets a subblock value from the store
*/
const getSubBlockValue = (blockId: string, property: string): any => {
return useSubBlockStore.getState().getValue(blockId, property)
}
/**
* Generates output paths for a tool-based block
*/
const generateToolOutputPaths = (blockConfig: BlockConfig, operation: string): string[] => {
if (!blockConfig?.tools?.config?.tool) return []
try {
const toolId = blockConfig.tools.config.tool({ operation })
if (!toolId) return []
const toolConfig = getTool(toolId)
if (!toolConfig?.outputs) return []
return generateOutputPaths(toolConfig.outputs)
} catch {
return []
}
}
/**
* Recursively generates all output paths from an outputs schema
*/
const generateOutputPaths = (outputs: Record<string, any>, prefix = ''): string[] => {
const paths: string[] = []
for (const [key, value] of Object.entries(outputs)) {
const currentPath = prefix ? `${prefix}.${key}` : key
if (typeof value === 'string') {
paths.push(currentPath)
} else if (typeof value === 'object' && value !== null) {
if ('type' in value && typeof value.type === 'string') {
paths.push(currentPath)
// Handle nested objects and arrays
if (value.type === 'object' && value.properties) {
paths.push(...generateOutputPaths(value.properties, currentPath))
} else if (value.type === 'array' && value.items?.properties) {
paths.push(...generateOutputPaths(value.items.properties, currentPath))
} else if (
value.type === 'array' &&
value.items &&
typeof value.items === 'object' &&
!('type' in value.items)
) {
paths.push(...generateOutputPaths(value.items, currentPath))
}
} else {
const subPaths = generateOutputPaths(value, currentPath)
paths.push(...subPaths)
}
} else {
paths.push(currentPath)
}
}
return paths
}
/**
* Extracts nested fields from array or object properties
*/
const extractChildFields = (prop: any): SchemaField[] | undefined => {
if (!isObject(prop)) return undefined
if (prop.properties && isObject(prop.properties)) {
return extractNestedFields(prop.properties)
}
if (prop.items?.properties && isObject(prop.items.properties)) {
return extractNestedFields(prop.items.properties)
}
if (!('type' in prop)) {
return extractNestedFields(prop)
}
if (prop.type === 'array') {
const itemDefs = Object.fromEntries(
Object.entries(prop).filter(([key]) => !RESERVED_KEYS.has(key))
)
if (Object.keys(itemDefs).length > 0) {
return extractNestedFields(itemDefs)
}
}
return undefined
}
/**
* Recursively extracts nested fields from output properties
*/
const extractNestedFields = (properties: Record<string, any>): SchemaField[] => {
return Object.entries(properties).map(([name, prop]) => {
const baseType = isObject(prop) && typeof prop.type === 'string' ? prop.type : 'string'
const type = isObject(prop) && !('type' in prop) ? 'object' : baseType
return {
name,
type,
description: isObject(prop) ? prop.description : undefined,
children: extractChildFields(prop),
}
})
}
/**
* Creates a schema field from an output definition
*/
const createFieldFromOutput = (
name: string,
output: any,
responseFormatFields?: SchemaField[]
): SchemaField => {
const hasExplicitType = isObject(output) && typeof output.type === 'string'
const type = hasExplicitType ? output.type : isObject(output) ? 'object' : 'string'
const field: SchemaField = {
name,
type,
description: isObject(output) && 'description' in output ? output.description : undefined,
}
if (name === 'data' && responseFormatFields && responseFormatFields.length > 0) {
field.children = responseFormatFields
} else {
field.children = extractChildFields(output)
}
return field
}
/**
* Gets tool outputs for a block's operation
*/
const getToolOutputs = (
blockConfig: BlockConfig | null,
operation?: string
): Record<string, any> => {
if (!blockConfig?.tools?.config?.tool || !operation) return {}
try {
const toolId = blockConfig.tools.config.tool({ operation })
if (!toolId) return {}
const toolConfig = getTool(toolId)
return toolConfig?.outputs || {}
} catch {
return {}
}
}
interface UseBlockOutputFieldsParams {
blockId: string
blockType: string
mergedSubBlocks?: Record<string, any>
responseFormat?: any
operation?: string
triggerMode?: boolean
}
/**
* Hook that generates consistent block output fields using the same logic as tag-dropdown
* Returns SchemaField[] format for use in connection-blocks component
*/
export function useBlockOutputFields({
blockId,
blockType,
mergedSubBlocks,
responseFormat,
operation,
triggerMode,
}: UseBlockOutputFieldsParams): SchemaField[] {
return useMemo(() => {
const blockConfig = getBlock(blockType)
// Handle loop/parallel blocks without config
if (!blockConfig && (blockType === 'loop' || blockType === 'parallel')) {
return [
{
name: 'results',
type: 'array',
description: 'Array of results from the loop/parallel execution',
},
]
}
if (!blockConfig) {
return []
}
// Handle evaluator blocks - use metrics if available
if (blockType === 'evaluator') {
const metricsValue = mergedSubBlocks?.metrics?.value ?? getSubBlockValue(blockId, 'metrics')
if (metricsValue && Array.isArray(metricsValue) && metricsValue.length > 0) {
const validMetrics = metricsValue.filter((metric: { name?: string }) => metric?.name)
return validMetrics.map((metric: { name: string }) => ({
name: metric.name.toLowerCase(),
type: 'number',
description: `Metric: ${metric.name}`,
}))
}
// Fall through to use blockConfig.outputs
}
// Handle variables blocks - use variable assignments if available
if (blockType === 'variables') {
const variablesValue =
mergedSubBlocks?.variables?.value ?? getSubBlockValue(blockId, 'variables')
if (variablesValue && Array.isArray(variablesValue) && variablesValue.length > 0) {
const validAssignments = variablesValue.filter((assignment: { variableName?: string }) =>
assignment?.variableName?.trim()
)
return validAssignments.map((assignment: { variableName: string }) => ({
name: assignment.variableName.trim(),
type: 'any',
description: `Variable: ${assignment.variableName}`,
}))
}
// Fall through to empty or default
return []
}
// Get base outputs using getBlockOutputs (handles triggers, starter, approval, etc.)
let baseOutputs: Record<string, any> = {}
if (blockConfig.category === 'triggers' || blockType === 'starter') {
// Use getBlockOutputPaths to get dynamic outputs, then reconstruct the structure
const outputPaths = getBlockOutputPaths(blockType, mergedSubBlocks, triggerMode)
if (outputPaths.length > 0) {
// Reconstruct outputs structure from paths
// This is a simplified approach - we'll use the paths to build the structure
baseOutputs = getBlockOutputs(blockType, mergedSubBlocks, triggerMode)
} else if (blockType === 'starter') {
const startWorkflowValue = mergedSubBlocks?.startWorkflow?.value
if (startWorkflowValue === 'chat') {
baseOutputs = {
input: { type: 'string', description: 'User message' },
conversationId: { type: 'string', description: 'Conversation ID' },
files: { type: 'files', description: 'Uploaded files' },
}
} else {
const inputFormatValue = mergedSubBlocks?.inputFormat?.value
if (inputFormatValue && Array.isArray(inputFormatValue) && inputFormatValue.length > 0) {
baseOutputs = {}
inputFormatValue.forEach((field: { name?: string; type?: string }) => {
if (field.name && field.name.trim() !== '') {
baseOutputs[field.name] = {
type: field.type || 'string',
description: `Field from input format`,
}
}
})
}
}
} else if (blockType === TRIGGER_TYPES.GENERIC_WEBHOOK) {
// Generic webhook returns the whole payload
baseOutputs = {}
} else {
baseOutputs = {}
}
} else if (triggerMode && blockConfig.triggers?.enabled) {
// Trigger mode enabled
const dynamicOutputs = getBlockOutputPaths(blockType, mergedSubBlocks, true)
if (dynamicOutputs.length > 0) {
baseOutputs = getBlockOutputs(blockType, mergedSubBlocks, true)
} else {
baseOutputs = blockConfig.outputs || {}
}
} else if (blockType === 'approval') {
// Approval block uses dynamic outputs from inputFormat
baseOutputs = getBlockOutputs(blockType, mergedSubBlocks)
} else {
// For tool-based blocks, try to get tool outputs first
const operationValue =
operation ?? mergedSubBlocks?.operation?.value ?? getSubBlockValue(blockId, 'operation')
const toolOutputs = operationValue ? getToolOutputs(blockConfig, operationValue) : {}
if (Object.keys(toolOutputs).length > 0) {
baseOutputs = toolOutputs
} else {
// Use getBlockOutputs which handles inputFormat merging
baseOutputs = getBlockOutputs(blockType, mergedSubBlocks, triggerMode)
}
}
// Handle responseFormat
const responseFormatFields = responseFormat ? extractFieldsFromSchema(responseFormat) : []
// If responseFormat exists and has fields, merge with base outputs
if (responseFormatFields.length > 0) {
// If base outputs is empty, use responseFormat fields directly
if (Object.keys(baseOutputs).length === 0) {
return responseFormatFields.map((field) => ({
name: field.name,
type: field.type,
description: field.description,
children: undefined, // ResponseFormat fields are flat
}))
}
// Otherwise, merge: responseFormat takes precedence for 'data' field
const fields: SchemaField[] = []
const responseFormatFieldNames = new Set(responseFormatFields.map((f) => f.name))
// Add base outputs, replacing 'data' with responseFormat fields if present
for (const [name, output] of Object.entries(baseOutputs)) {
if (name === 'data' && responseFormatFields.length > 0) {
fields.push(
createFieldFromOutput(
name,
output,
responseFormatFields.map((f) => ({
name: f.name,
type: f.type,
description: f.description,
}))
)
)
} else if (!responseFormatFieldNames.has(name)) {
fields.push(createFieldFromOutput(name, output))
}
}
// Add responseFormat fields that aren't in base outputs
for (const field of responseFormatFields) {
if (!baseOutputs[field.name]) {
fields.push({
name: field.name,
type: field.type,
description: field.description,
})
}
}
return fields
}
// No responseFormat, just use base outputs
if (Object.keys(baseOutputs).length === 0) {
return []
}
return Object.entries(baseOutputs).map(([name, output]) => createFieldFromOutput(name, output))
}, [blockId, blockType, mergedSubBlocks, responseFormat, operation, triggerMode])
}

View File

@@ -47,7 +47,6 @@ export function FolderItem({
}: FolderItemProps) {
const { expandedFolders, toggleExpanded, updateFolderAPI, deleteFolder } = useFolderStore()
const [showDeleteDialog, setShowDeleteDialog] = useState(false)
const [isDeleting, setIsDeleting] = useState(false)
const [isDragging, setIsDragging] = useState(false)
const [isEditing, setIsEditing] = useState(false)
const [editValue, setEditValue] = useState(folder.name)
@@ -169,14 +168,12 @@ export function FolderItem({
}
const confirmDelete = async () => {
setIsDeleting(true)
setShowDeleteDialog(false)
try {
await deleteFolder(folder.id, workspaceId)
setShowDeleteDialog(false)
} catch (error) {
logger.error('Failed to delete folder:', { error })
} finally {
setIsDeleting(false)
}
}
@@ -233,15 +230,12 @@ export function FolderItem({
</AlertDialogHeader>
<AlertDialogFooter className='flex'>
<AlertDialogCancel className='h-9 w-full rounded-[8px]' disabled={isDeleting}>
Cancel
</AlertDialogCancel>
<AlertDialogCancel className='h-9 w-full rounded-[8px]'>Cancel</AlertDialogCancel>
<AlertDialogAction
onClick={confirmDelete}
disabled={isDeleting}
className='h-9 w-full rounded-[8px] bg-red-500 text-white transition-all duration-200 hover:bg-red-600 dark:bg-red-500 dark:hover:bg-red-600'
>
{isDeleting ? 'Deleting...' : 'Delete'}
Delete
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>
@@ -354,15 +348,12 @@ export function FolderItem({
</AlertDialogHeader>
<AlertDialogFooter className='flex'>
<AlertDialogCancel className='h-9 w-full rounded-[8px]' disabled={isDeleting}>
Cancel
</AlertDialogCancel>
<AlertDialogCancel className='h-9 w-full rounded-[8px]'>Cancel</AlertDialogCancel>
<AlertDialogAction
onClick={confirmDelete}
disabled={isDeleting}
className='h-9 w-full rounded-[8px] bg-red-500 text-white transition-all duration-200 hover:bg-red-600 dark:bg-red-500 dark:hover:bg-red-600'
>
{isDeleting ? 'Deleting...' : 'Delete'}
Delete
</AlertDialogAction>
</AlertDialogFooter>
</AlertDialogContent>

View File

@@ -301,15 +301,24 @@ function useDragHandlers(
if (workflowIdsData) {
const workflowIds = JSON.parse(workflowIdsData) as string[]
try {
// Update workflows sequentially to avoid race conditions
for (const workflowId of workflowIds) {
await updateWorkflow(workflowId, { folderId: targetFolderId })
}
logger.info(logMessage || `Moved ${workflowIds.length} workflow(s)`)
} catch (error) {
logger.error('Failed to move workflows:', error)
}
Promise.allSettled(
workflowIds.map((workflowId) => updateWorkflow(workflowId, { folderId: targetFolderId }))
)
.then((results) => {
const failures = results.filter((r) => r.status === 'rejected')
if (failures.length === 0) {
logger.info(logMessage || `Moved ${workflowIds.length} workflow(s)`)
} else if (failures.length === workflowIds.length) {
logger.error('Failed to move all workflows')
} else {
const successCount = results.length - failures.length
logger.warn(`Partially moved workflows: ${successCount}/${workflowIds.length}`)
}
})
.catch((error) => {
logger.error('Unexpected error moving workflows:', error)
})
}
// Handle folder drops

View File

@@ -83,7 +83,7 @@ export const CareersConfirmationEmail = ({
documentation
</a>{' '}
to learn more about what we're building, or check out our{' '}
<a href={`${baseUrl}/blog`} style={{ color: '#802FFF', textDecoration: 'none' }}>
<a href={`${baseUrl}/studio`} style={{ color: '#802FFF', textDecoration: 'none' }}>
blog
</a>{' '}
for the latest updates.

View File

@@ -0,0 +1,7 @@
{
"id": "adam",
"name": "Adam Gough",
"url": "https://x.com/adamgough",
"xHandle": "adamgough",
"avatarUrl": "/studio/authors/adam.png"
}

View File

@@ -3,5 +3,5 @@
"name": "Emir Karabeg",
"url": "https://x.com/karabegemir",
"xHandle": "karabegemir",
"avatarUrl": "/blog/authors/emir.png"
"avatarUrl": "/studio/authors/emir.png"
}

View File

@@ -0,0 +1,7 @@
{
"id": "sid",
"name": "Siddharth Ganesan",
"url": "https://x.com/sidganesan",
"xHandle": "sidganesan",
"avatarUrl": "/studio/authors/sid.png"
}

View File

@@ -0,0 +1,7 @@
{
"id": "vik",
"name": "Vikhyath Mondreti",
"url": "https://x.com/vikhyathm",
"xHandle": "vikhyathm",
"avatarUrl": "/studio/authors/vik.png"
}

View File

@@ -3,5 +3,5 @@
"name": "Waleed Latif",
"url": "https://x.com/typingwala",
"xHandle": "typingwala",
"avatarUrl": "/blog/authors/waleed.png"
"avatarUrl": "/studio/authors/waleed.png"
}

View File

@@ -0,0 +1,101 @@
---
slug: copilot
title: 'Inside Sim Copilot — architecture, benchmarks, and how it fits'
description: 'A technical overview of Sim Copilot: the architecture behind it, our early benchmarks, and how Copilot integrates with agentic workflows in Sim.'
date: 2025-11-08
updated: 2025-11-08
authors:
- sid
readingTime: 7
tags: [Copilot, AI Assistant, Benchmarks, Architecture, Sim]
ogImage: /studio/copilot/cover.png
ogAlt: 'Sim Copilot technical overview'
about: ['AI Assistants', 'Agentic Workflows', 'Retrieval Augmented Generation']
timeRequired: PT7M
canonical: https://sim.ai/studio/copilot
featured: false
draft: false
---
> This is a technical deepdive scaffold for Sim Copilot. Well keep updating it as we publish more results and open up additional capabilities.
## TL;DR
- Copilot is a contextaware assistant embedded into the Sim editor.
- It has firstclass access (with user approval) to workflows, blocks, logs, and docs.
- The system is retrievalcentric with strong guardrails and deterministic execution paths.
## Architecture at a glance
1. Intent understanding
- Lightweight classifier + instruction parser directs requests into tools.
2. Context assembly
- Indexed sources (workflows, blocks, logs, docs) with semantic and lexical signals.
- Safety filters for scope + permission checks.
3. Tooling and actions
- Readonly tools (explain, compare, search), proposechanges tools, and execution tools.
4. Response synthesis
- Deterministic templates for diffs, plans, and explanations.
5. Humanintheloop
- All writes gated behind explicit user approval.
```mermaid
flowchart LR
U[User] --> I(Intent)
I --> C(Context Builder)
C -->|RAG| R[Retriever]
R --> T(Tools)
T --> S(Response Synth)
S --> U
```
## Retrieval and grounding
- Sources: workspace workflows, block metadata, execution logs, and product docs.
- Indexing: hybrid scoring (BM25 + embeddings) with recency decay and persource caps.
- Normalization: chunking with stable anchors so diffs remain linereferential.
## Early benchmarks (scaffold)
> Numbers below are placeholders for the structure; well replace with full runs.
| Task | Top1 Retrieval@K | Edit Accuracy | Time (p50) |
| ----------------------------------- | -----------------:| ------------: | ---------: |
| Explain a workflow block | 92% | 88% | 1.2s |
| Propose a safe fix for an error | 78% | 70% | 2.1s |
| Generate a new block configuration | 74% | 65% | 2.6s |
| Find relevant execution logs | 90% | 84% | 1.4s |
Measurement notes:
- Retrieval@K: correctness of the top candidate chunk for a labeled query.
- Edit Accuracy: humanvalidated acceptance rate for proposed changes.
- Time: serverside latency (excludes model coldstart).
## Guardrails and safety
- Scope enforcement: actions limited to the open workspace with explicit user triggers.
- Sensitive data policies and redaction in logs.
- Proposal mode: diffs are reviewed and applied only on user approval.
## How Copilot fits into Sim
- Ineditor assistant for building and editing workflows.
- Shortcut to documentation and examples with live context from your canvas.
- Bridge to evaluation: Copilot can set up test runs and compare outputs sidebyside.
- Works with the same permissions model you already use in Sim.
## Roadmap (highlevel)
- Multiturn plans with subtasks and rollback.
- Deeper evaluation harness and dataset management.
- Firstparty tool plugins for common blocks and providers.
## Repro and transparency
- Well publish a benchmark harness and anonymized evaluation sets.
- Expect a detailed methodology post as we widen the beta.
— Sid @ Sim

View File

@@ -0,0 +1,97 @@
---
slug: emcn
title: 'Introducing Emcn — Sims new design system'
description: Emcn is the heart of our new design language at Sim. Heres the scaffolding of the system—principles, tokens, components, and roadmap—as we prepare the full launch.
date: 2025-11-08
updated: 2025-11-08
authors:
- emir
readingTime: 6
tags: [Design, Emcn, UI, UX, Components, Sim]
ogImage: /studio/emcn/cover.png
ogAlt: 'Emcn design system cover'
about: ['Design Systems', 'Component Libraries', 'Design Tokens', 'Accessibility']
timeRequired: PT6M
canonical: https://sim.ai/studio/emcn
featured: false
draft: false
---
> This post is the scaffolding for Emcn, our new design system. Well fill it in as we publish the full documentation and component gallery.
![Emcn cover placeholder](/studio/emcn/cover.png)
## What is Emcn?
Emcn is the design system that powers Sims product and brand. It aims to give us:
- Consistent, accessible UI across web surfaces
- A fast path from Figma to code with stronglytyped tokens
- A composable component library that scales with product complexity
## Principles
1. Opinionated but flexible
2. Accessible by default (WCAG AA+)
3. Stronglytyped, themeable tokens (light/dark + brand accents)
4. Composable components over oneoff variants
5. Performance first (minimal runtime, zero layout shift)
## Foundations (Tokens)
- Color: semantic palettes (bg, fg, muted, accent, destructive) with oncolors
- Typography: scale + weights mapped to roles (display, title, body, code)
- Spacing: 2/4 grid, container and gutter rules
- Radius: component tiers (base, interactive, card, sheet)
- Shadows: subtle elevation scale for surfaces and overlays
- Motion: duration/easing tokens for affordances (not decoration)
## Components (Initial Set)
- Primitives: Button, Input, Select, Checkbox, Radio, Switch, Slider, Badge, Tooltip
- Navigation: NavBar, SideBar, Tabs, Breadcrumbs
- Feedback: Toast, Banner, Alert, Dialog, Drawer, Popover
- Layout: Grid, Stack, Container, Card, Sheet
- Content: CodeBlock, Markdown, Table, EmptyState
> Each component will include: anatomy, a11y contract, variants/slots, and code examples.
## Theming
- Light + Dark, with brand accent tokens
- Perworkspace theming hooks for enterprise deployments
- SSRsafe color mode with no flash (hydrationsafe)
## Accessibility
- Focus outlines and target sizes audited
- Color contrast tracked at token level
- Keyboard and screen reader interactions defined per component
## Tooling
- Tokens exported as TypeScript + CSS variables
- Figma library mapped 1:1 to code components
- Lint rules for token usage and a11y checks
## Roadmap
- v0: Foundations + Core components (internal)
- v1: Public docs and examples site
- v1.x: Data display, advanced forms, charts bridge
## FAQ
- What does “Emcn” mean?
A short, crisp name we liked—easy to type and remember.
- Will Emcn be opensourced?
We plan to share the foundations and many components as part of our commitment to open source.
## Were hiring
Were hiring designers and engineers who care deeply about craft and DX. If you want to help shape Emcn and Sims product, wed love to talk.
— Team Sim

View File

@@ -0,0 +1,196 @@
---
slug: executor
title: 'Inside the Sim Executor - DAG Based Execution with Native Parallelism'
description: 'How we built a DAG-based execution engine with native parallel processing, intelligent edge routing, and stateful pause/resume capabilities'
date: 2025-11-08
updated: 2025-11-12
authors:
- sid
readingTime: 12
tags: [Executor, Architecture, DAG, Orchestration]
ogImage: /studio/copilot/cover.png
ogAlt: 'Sim Executor technical overview'
about: ['Execution', 'Workflow Orchestration']
timeRequired: PT12M
canonical: https://sim.ai/studio/executor
featured: true
draft: false
---
Modern workflows aren't just linear automations anymore. They involve a variety of APIs and services, loop over a model's output, pause for human decisions, and resume hours or days later exactly where they left off.
We designed the Sim executor to make these patterns feel natural. This post shares the architecture we ended up with, the challenges we ran into along the way, and what it enables for teams building agentic systems at scale.
## Laying the Foundation
There's a single guiding philosophy we use when designing the executor: workflows should read like the work you intend to do, not like the mess of cables behind a TV.
The complexity of wiring and plumbing should be abstracted away, and building a performant workflow end to end should be easy, modular, and seamless.
That's why the Sim executor serves as both an orchestrator and a translation layer, turning user-friendly workflow representations into an executable DAG behind the scenes.
## Core engine
At its heart, the executor figures out which blocks can run, runs them, then repeats. It sounds simple in theory, but can become surprisingly complex when you factor in conditional routing, nested loops, and true parallelism.
### Compiling Workflows to Graphs
Before execution starts, we compile the visual workflow into a directed acyclic graph (DAG). Every block becomes a node and every connection becomes an edge. Loops and parallel subflows expand into more complex structures (sentinel nodes for loops, branch-indexed nodes for parallels) that preserve the DAG property while enabling iteration and concurrency.
This upfront compilation pays off immediately: the entire topology is concretely defined before the first block ever executes.
### The Execution Queue
Once we have the DAG, execution becomes eventdriven. We maintain a ready queue: nodes whose dependencies are all satisfied. When a node completes, we remove its outgoing edges from downstream nodes' incoming edge sets. Any node that hits zero incoming edges goes straight into the queue. At it's core, topological sort.
The key difference here from traditional workflow execution approaches: we don't wait for a "layer" to finish. If three nodes in the queue are independent, we launch all three immediately and let the runtime handle concurrency.
### Dependency Resolution
In our earlier prototypes, we scanned the connection array after every block execution to see what became ready. However, as the number of nodes and edges scale, performance takes a hit.
The DAG flips that model. Each node tracks its own incoming edges in a set. When a dependency completes, we remove one element from the set. When the set hits zero, the node is ready. No scanning, no filtering, no repeated checks.
This optimization compounds when you have many parallel branches or deeply nested structures. Every node knows its own readiness without asking the rest of the graph.
### Variable Resolution
Blocks reference data from different sources: loop items (`<loop.iteration>`, `<loop.item>`), parallel branch indices (`<parallel.index>`), upstream block outputs (`<blockId.output.content>`), workflow variables (`<workflow.variableName>`), and environment variables (`${API_KEY}`). The resolver tries each scope in order—loop first, then parallel, then workflow, then environment, then block outputs. Inner scopes shadow outer ones, matching standard scoping semantics. This makes variables predictable: the context you're in determines what you see, without name collision or manual prefixes.
### Multiple Triggers and Selective Compilation
A workflow can have multiple entry points. Webhooks listen at different paths, schedules run on different cadences, and some triggers can fire from the UI. Each represents a valid starting point, but only one matters for any given execution.
The DAG builder handles this through selective compilation. When a workflow executes, we receive a trigger block ID. The builder starts from that node and builds only the reachable subgraph. Blocks that aren't downstream from the trigger never make it into the DAG.
This keeps execution focused. A workflow with five different webhook triggers doesn't compile all five paths every time. The topology adapts to the context automatically.
### Executing from the Client
The executor lives server-side. Users build workflows in the client. As they iterate and test, they need to see block inputs and outputs, watch execution progress in real time, and understand which paths the workflow takes.
Polling adds latency. Duplicating execution logic clientside creates drift. We needed a way to stream execution state as it happens.
The executor emits events at key execution points—block starts, completions, streaming content, errors. These events flow through SSE to connected clients. The client reconstructs execution state from the stream, rendering logs and outputs as blocks complete.
## Parallelism
When a workflow fans out to call multiple APIs, compare outputs from different models, or process items independently, those branches should run at the same time. Not interleaved, not sequentially—actually concurrent.
Most workflow platforms handle branches differently. Some execute them one after another (n8n's v1 mode completes branch 1, then branch 2, then branch 3). Others interleave execution (run the first node of each branch, then the second node of each branch). Both approaches are deterministic, but neither gives you true parallelism.
The workarounds typically involve triggering separate sub-workflows with "wait for completion" disabled, then manually collecting results. This works, but it means coordinating execution state across multiple workflow instances, handling failures independently, and stitching outputs back together.
### How we approach it
The ready queue gives us parallelism by default. When a parallel block executes, it expands into branchindexed nodes in the DAG. Each branch is a separate copy of the blocks inside the parallel scope, indexed by branch number.
All entry nodes across all branches enter the ready queue simultaneously. The executor launches them concurrently—they're independent nodes with satisfied dependencies. As each branch progresses, its downstream nodes become ready and execute. The parallel orchestrator tracks completion by counting terminal nodes across all branches.
When all branches finish, we aggregate their outputs in branch order and continue. No coordination overhead, no manual result collection—just concurrent execution with deterministic aggregation.
### What this enables
A workflow that calls fifty different APIs processes them concurrently. Parallel model comparisons return results as they stream in, not after the slowest one finishes.
The DAG doesn't distinguish between "parallel branches" and "independent blocks that happen to be ready at the same time." Both execute concurrently. Parallelism simply emerges from workflow structure.
### Parallel subflows for cleaner authoring
For repetitive parallel work, we added parallel subflows. Instead of duplicating blocks visually for each branch on the canvas, you define a single subflow and configure the parallel block to run it N times or once per item in a collection.
Behind the scenes, this expands to the same branchindexed DAG structure. The executor doesn't distinguish between manually authored parallel branches and subflow-generated ones—they both become independent nodes that execute concurrently. Same execution model, cleaner authoring experience.
## Loops
### How loops compile to DAGs
Loops present a challenge for DAGs: graphs are acyclic, but loops repeat. We handle this by expanding loops into sentinel nodes during compilation.
![Loop sentinel nodes](/studio/executor/loop-sentinels.png)
*Loops expand into sentinel start and end nodes. The backward edge only activates when the loop continues, preserving the DAG's acyclic property.*
A loop is bookended by two nodes: a sentinel start and a sentinel end. The sentinel start activates the first blocks inside the loop. When terminal blocks complete, they route to the sentinel end. The sentinel end evaluates the loop condition and returns either "continue" (which routes back to the start) or "exit" (which activates blocks after the loop).
The backward edge from end to start doesn't count as a dependency initially—it only activates if the loop continues. This preserves the DAG property while enabling iteration.
### Iteration state and variable scoping
When a loop continues, the executor doesn't re-execute blocks from scratch. It clears their execution state (marking them as not-yet-executed) and restores their incoming edges, so they become ready for the next pass. Loop scope updates: iteration increments, the next item loads (for forEach), outputs from the previous iteration move to the aggregated results.
Blocks inside the loop access loop variables through the resolver chain. `<loop.iteration>` resolves before checking block outputs or workflow variables, so iteration context shadows outer scopes. This makes variable access predictable—you always get the current loop state.
## Conditions and Routers
Workflows branch based on runtime decisions. A condition block evaluates expressions and routes to different paths. A router block lets an AI model choose which path to take based on context. Both are core to building adaptive workflows.
### LLM-driven routing
Router blocks represent a modern pattern in workflow orchestration. Instead of hardcoding logic with if/else chains, you describe the options and let a language model decide. The model sees the conversation context, evaluates which path makes sense, and returns a selection.
The executor treats this selection as a routing decision. Each outgoing edge from a router carries metadata about which target block it represents. When the router completes, it returns the chosen block's ID. The edge manager activates only the edge matching that ID; all other edges deactivate.
This makes AI-driven routing deterministic and traceable. You can inspect the execution log and see exactly which path the model chose, why (from the model's reasoning), and which alternatives were pruned.
### Edge selection and path pruning
When a condition or router executes, it evaluates its logic and returns a single selection. The edge manager checks each outgoing edge to see if its label matches the selection. The matching edge activates; the rest deactivate.
![Edge activation and pruning](/studio/executor/edge-pruning.png)
*When a condition selects one path, the chosen edge activates while unselected paths deactivate recursively, preventing unreachable blocks from executing.*
Deactivation cascades. If an edge deactivates, the executor recursively deactivates all edges downstream from its target—unless that target has other active incoming edges. This automatic pruning prevents unreachable blocks from ever entering the ready queue.
The benefit: wasted work drops to zero. Paths that won't execute don't consume resources, don't wait in the queue, and don't clutter execution logs. The DAG reflects what actually ran, not what could have run.
### Convergence and rejoining paths
Workflows often diverge and reconverge. Multiple condition branches might lead to different processing steps, then merge at a common aggregation block. The executor handles this through edge counting.
When paths converge, the target block has multiple incoming edges—one from each upstream path. The edge manager tracks which edges activate. If a condition prunes one branch, that edge deactivates, and the target's incoming edge count decreases. The target becomes ready only when all remaining active incoming edges complete.
This works for complex topologies: nested conditions, routers feeding into other routers, parallel branches that reconverge after different amounts of work. The dependency tracking adapts automatically.
## Human in the loop
AI workflows aren't fully automated. They pause for approvals, wait for human feedback, or stop to let someone review model output before continuing. These pauses can happen anywhere—midbranch, inside a loop, across multiple parallel paths at once.
### Pause detection and state capture
When a block returns pause metadata, the executor stops processing its outgoing edges. Instead of continuing to downstream blocks, it captures the current execution state: every block output, every loop iteration, every parallel branch's progress, every routing decision, and the exact topology of remaining dependencies in the DAG.
Each pause point gets a unique context ID that encodes its position. A pause inside a loop at iteration 5 gets a different ID than the same block at iteration 6. A pause in parallel branch 3 gets a different ID than branch 4. This makes resume targeting precise—you can resume specific pause points independently.
The executor supports multiple simultaneous pauses. If three parallel branches each hit an approval block, all three pause, each with its own context ID. The execution returns with all three pause points and their resume links. Resuming any one triggers continuation from that specific point.
### Snapshot serialization
The snapshot captures everything needed to resume. Block states, execution logs, loop and parallel scopes, routing decisions, workflow variables—all serialize to JSON. The critical piece: DAG incoming edges. We save which dependencies each node still has outstanding.
When you serialize the DAG's edge state, you're freezing the exact moment in time when execution paused. This includes partiallycompleted loops (iteration 7 of 100), inflight parallel branches (12 of 50 complete), and conditional paths already pruned.
### Resume and continuation
Resuming rebuilds the DAG, restores the snapshot state, and queues the resume trigger nodes. The executor marks alreadyexecuted blocks to prevent reexecution, restores incoming edges to reflect remaining dependencies, and continues from where it stopped.
If multiple pause points exist, each can resume independently. The first resume doesn't invalidate the others—each pause has its own trigger node in the DAG. When all pauses resume, the workflow continues normally, collecting outputs from each resumed branch.
### Coordination and atomicity
The executor uses a queue lock to prevent race conditions. When a node completes with pause metadata, we acquire the lock before checking for pauses. This ensures that multiple branches pausing simultaneously don't interfere with each other's state capture.
The lock also prevents a resumed node from racing with other executing nodes. When a resume trigger fires, it enters the queue like any other node. The ready queue pattern handles coordination—resumed nodes execute when their dependencies clear, just like nodes in the original execution.
### Example
![Iterative agent refinement with human feedback](/studio/executor/hitl-loop.png)
*A common pattern: agent generates output, pauses for human review, router decides pass/fail based on feedback, saves to workflow variable, and loop continues until approved.*
A while loop runs an agent with previous feedback as context. The agent's output goes to a humanintheloop block, which pauses execution and sends a notification. The user reviews the output and provides feedback via the resume link.
When resumed, the feedback flows to a router that evaluates whether the output passes or needs revision. If it fails, the router saves the feedback to a workflow variable and routes back to continue the loop. The agent receives this feedback on the next iteration and tries again. If it passes, the router exits the loop and continues downstream.
The while loop's condition checks the workflow variable. As long as the status is "fail," the loop continues. When the router sets it to "pass," the loop exits. Each piece—loops, pause/resume, routing, variables—composes without glue because they're all firstclass executor concepts.
Multiple reviewers approving different branches works the same way. Each branch pauses independently, reviewers approve in any order, and execution continues as each approval comes in. The parallel orchestrator collects the results when all branches complete.
— Sid @ Sim

View File

@@ -0,0 +1,109 @@
---
slug: multiplayer
title: 'Realtime and Multiplayer in Sim — how it works under the hood'
description: "A technical look at Sim's realtime and multiplayer architecture: presence, collaboration, conflict resolution, and scale."
date: 2025-11-08
updated: 2025-11-08
authors:
- vik
readingTime: 8
tags: [Multiplayer, Realtime, Collaboration, CRDT, WebSockets, Sim]
ogImage: /studio/multiplayer/cover.png
ogAlt: 'Sim multiplayer architecture overview'
about: ['Realtime Systems', 'Operational Transform / CRDT', 'Collaboration']
timeRequired: PT8M
canonical: https://sim.ai/studio/multiplayer
featured: false
draft: false
---
> This post outlines the key pieces of Sims realtime and multiplayer stack. Its a scaffold well keep enriching with diagrams, traces, and code snippets as we publish more details.
## Goals
- Lowlatency collaboration on shared canvases and workflows
- Deterministic conflict resolution and auditability
- Scales from small teams to enterprise orgs
## Highlevel architecture
1. Transport
- Secure WebSocket channels per workspace/session with fallbacks.
2. Session and presence
- Authenticated connections; presence, cursors, and selections broadcast on a lightweight channel.
3. State model
- Canonical workflow state stored in a durable DB; clients hold ephemeral working copies.
4. Conflict resolution
- Operationbased CRDT/OT hybrid for block changes; idempotent ops with causal timestamps.
5. Persistence and snapshots
- Appendonly operation log; periodic compaction into snapshots for fast loads.
6. Observability
- Perop metrics, client RTT, and reconnection traces; room health dashboards.
```mermaid
sequenceDiagram
participant C1 as Client A
participant C2 as Client B
participant GW as Realtime Gateway
participant S as State Service
C1->>GW: connect(ws, auth)
C2->>GW: connect(ws, auth)
C1->>GW: op(block.update)
GW->>S: validate & persist(op)
S-->>GW: ack(op, version)
GW-->>C1: ack(op)
GW-->>C2: broadcast(op)
```
## Presence and awareness
- Presence channel carries user metadata (name, color), cursor positions, and ephemeral selections.
- Heartbeats + timeouts remove stale presence; reconnects recover presence state.
## Operations and versions
- Every mutating action becomes an operation with: opId, actorId, version, path, payload.
- Servers validate permissions and consistency (version checks) before persisting.
- Clients apply localfirst (optimistic) and reconcile on ack or transform.
## Conflict handling
- Commutative ops where possible; otherwise use a simple priority rule (timestamp + actor tiebreak).
- Pathscoped transforms for list inserts/deletes to prevent positional drift.
## Latency compensation
- Local optimistic apply → render immediately.
- On ack mismatch, transform local queue and rebase.
- Visual hints for pending vs. confirmed states.
## Scale and sharding
- Rooms keyed by workspace + resource; sticky routing ensures op ordering.
- Horizontal gateway workers; state service partitions by workspace.
- Backpressure and fanout limits on large rooms.
## Security model
- Auth tokens scoped to workspace and resources; serverside permission checks per op.
- Rate limits per actor and per room; anomaly detection for spammy clients.
## Benchmarks (placeholder)
| Metric | Result (p50) | Result (p95) |
| ------------------------------ | -----------: | -----------: |
| Roundtrip op latency | 60ms | 140ms |
| Broadcast fanout (100 users) | 8ms | 22ms |
| Reconnect time | 120ms | 280ms |
Well publish a full methodology and open telemetry traces as we finalize numbers.
## Roadmap
- Presence enrichments (inline comments, threads)
- Partialdocument subscriptions for massive canvases
- Timetravel and perblock history
— Team Sim

View File

@@ -8,8 +8,8 @@ authors:
- emir
readingTime: 9
tags: [AI Agents, Workflow Automation, OpenAI AgentKit, n8n, Sim, MCP]
ogImage: /blog/openai-vs-n8n-vs-sim/workflow.png
canonical: https://sim.ai/blog/openai-vs-n8n-vs-sim
ogImage: /studio/openai-vs-n8n-vs-sim/workflow.png
canonical: https://sim.ai/studio/openai-vs-n8n-vs-sim
draft: false
---
@@ -19,7 +19,7 @@ When building AI agent workflows, developers often evaluate multiple platforms t
OpenAI AgentKit is a set of building blocks designed to help developers take AI agents from prototype to production. Built on top of the OpenAI Responses API, it provides a structured approach to building and deploying intelligent agents.
![OpenAI AgentKit workflow interface](/blog/openai-vs-n8n-vs-sim/openai.png)
![OpenAI AgentKit workflow interface](/studio/openai-vs-n8n-vs-sim/openai.png)
### Core Features
@@ -31,7 +31,7 @@ AgentKit provides a visual canvas where developers can design and build agents.
ChatKit enables developers to embed chat interfaces to run workflows directly within their applications. It includes custom widgets that you can create and integrate, with the ability to preview interfaces right in the workflow builder before deployment.
![OpenAI AgentKit custom widgets interface](/blog/openai-vs-n8n-vs-sim/widgets.png)
![OpenAI AgentKit custom widgets interface](/studio/openai-vs-n8n-vs-sim/widgets.png)
#### Comprehensive Evaluation System
@@ -65,7 +65,7 @@ While AgentKit is powerful for building agents, it has some limitations:
n8n is a workflow automation platform that excels at connecting various services and APIs together. While it started as a general automation tool, n8n has evolved to support AI agent workflows alongside its traditional integration capabilities.
![n8n workflow automation interface](/blog/openai-vs-n8n-vs-sim/n8n.png)
![n8n workflow automation interface](/studio/openai-vs-n8n-vs-sim/n8n.png)
### Core Capabilities
@@ -117,19 +117,19 @@ Sim is a fully open-source platform (Apache 2.0 license) specifically designed f
Sim provides an intuitive drag-and-drop canvas where developers can build complex AI agent workflows visually. The platform supports sophisticated agent architectures, including multi-agent systems, conditional logic, loops, and parallel execution paths. Additionally, Sim's built-in AI Copilot can assist you directly in the editor, helping you build and modify workflows faster with intelligent suggestions and explanations.
![Sim visual workflow builder with AI agent blocks](/blog/openai-vs-n8n-vs-sim/sim.png)
![Sim visual workflow builder with AI agent blocks](/studio/openai-vs-n8n-vs-sim/sim.png)
#### AI Copilot for Workflow Building
Sim includes an intelligent in-editor AI assistant that helps you build and edit workflows faster. Copilot can explain complex concepts, suggest best practices, and even make changes to your workflow when you approve them. Using the @ context menu, you can reference workflows, blocks, knowledge bases, documentation, templates, and execution logs—giving Copilot the full context it needs to provide accurate, relevant assistance. This dramatically accelerates workflow development compared to building from scratch.
![Sim AI Copilot assisting with workflow development](/blog/openai-vs-n8n-vs-sim/copilot.png)
![Sim AI Copilot assisting with workflow development](/studio/openai-vs-n8n-vs-sim/copilot.png)
#### Pre-Built Workflow Templates
Get started quickly with Sim's extensive library of pre-built workflow templates. Browse templates across categories like Marketing, Sales, Finance, Support, and Artificial Intelligence. Each template is a production-ready workflow you can customize for your needs, saving hours of development time. Templates are created by the Sim team and community members, with popularity ratings and integration counts to help you find the right starting point.
![Sim workflow templates gallery](/blog/openai-vs-n8n-vs-sim/templates.png)
![Sim workflow templates gallery](/studio/openai-vs-n8n-vs-sim/templates.png)
#### 80+ Built-in Integrations
@@ -155,7 +155,7 @@ Sim's native knowledge base goes far beyond simple document storage. Powered by
Sim provides enterprise-grade logging that captures every detail of workflow execution. Track workflow runs with execution IDs, view block-level logs with precise timing and duration metrics, monitor token usage and costs per execution, and debug failures with detailed error traces and trace spans. The logging system integrates with Copilot—you can reference execution logs directly in your Copilot conversations to understand what happened and troubleshoot issues. This level of observability is essential for production AI agents where understanding behavior and debugging issues quickly is critical.
![Sim execution logs and monitoring dashboard](/blog/openai-vs-n8n-vs-sim/logs.png)
![Sim execution logs and monitoring dashboard](/studio/openai-vs-n8n-vs-sim/logs.png)
#### Custom Integrations via MCP Protocol

View File

@@ -0,0 +1,48 @@
---
slug: series-a
title: 'Sim raises $7M Series A'
description: Were excited to share that Sim has raised a $7M Series A led by Standard Capital to accelerate our vision for agentic workflows and expand the team.
date: 2025-11-08
updated: 2025-11-08
authors:
- sim
readingTime: 4
tags: [Announcement, Funding, Series A, Sim]
ogImage: /studio/series-a/cover.png
ogAlt: 'Sim team photo in front of neon logo'
about: ['Artificial Intelligence', 'Agentic Workflows', 'Startups', 'Funding']
timeRequired: PT4M
canonical: https://sim.ai/studio/series-a
featured: true
draft: false
---
![Sim team photo](/studio/series-a/team.png)
## Why were excited
Today were announcing our $7M Series A led by Standard Capital with participation from Perplexity Fund, SV Angel, YCombinator, and notable angels like Paul Graham, Paul Bucheit, Ali Rowghani, Kaz Nejatian, and many more. This investment helps us doubledown on our mission: make it simple for teams to build, ship, and scale agentic workflows in production.
## How we got here
We started earlier this year in our apartment in San Francisco. The goal was to rebuild our entire previous company (if you can call it that) from scratch on a visual framework. We figured that if we could at least build an AI sales and marketing operation solely using building blocks on a canvas, then anyone could build anything. Soon after, we'd built the foundation of what would become Sim. We were hellbent on being Open Source from day one, and we're proud that we've stuck to that commitment.
## Progress so far
...so far...
## Our vision
We believe the next wave of software is agentic. Teams will compose specialized agents that reason, retrieve, and act—safely and reliably—across their business. Our focus is to provide the infrastructure and UX that make this practical at scale: from prototyping to production, from singleagent flows to complex multiagent systems. On one end of the spectrum, there are SDKs and frameworks that are complex and require a lot of code to build and manage, and on the other end of the spectrum, there are platforms that are easy to use but severely limit in what you can build. Sim offers a platform that is both easy to use and powerful enough to build complex agentic workflows.
## Whats next
Well invest in building the community around Sim, and we'll be hiring across engineering, product, and design. ...ADD MORE...
## Were hiring
If youre excited about agentic systems and want to help define the future of this space, wed love to talk. Were hiring across engineering, product, and design.
— Team Sim

View File

@@ -75,6 +75,7 @@ async function scanFrontmatters(): Promise<BlogMeta[]> {
timeRequired: fm.timeRequired,
faq: fm.faq,
draft: fm.draft,
featured: fm.featured ?? false,
})
}
cachedMeta = results.sort(byDateDesc)

View File

@@ -36,6 +36,7 @@ export const BlogFrontmatterSchema = z
.optional(),
canonical: z.string().url(),
draft: z.boolean().default(false),
featured: z.boolean().default(false),
})
.strict()
@@ -58,6 +59,7 @@ export interface BlogMeta {
faq?: { q: string; a: string }[]
canonical: string
draft: boolean
featured: boolean
sourcePath?: string
}

View File

@@ -104,7 +104,7 @@ export function buildBreadcrumbJsonLd(post: BlogMeta) {
'@type': 'BreadcrumbList',
itemListElement: [
{ '@type': 'ListItem', position: 1, name: 'Home', item: 'https://sim.ai' },
{ '@type': 'ListItem', position: 2, name: 'Blog', item: 'https://sim.ai/blog' },
{ '@type': 'ListItem', position: 2, name: 'Sim Studio', item: 'https://sim.ai/studio' },
{ '@type': 'ListItem', position: 3, name: post.title, item: post.canonical },
],
}
@@ -127,8 +127,8 @@ export function buildBlogJsonLd() {
return {
'@context': 'https://schema.org',
'@type': 'Blog',
name: 'Sim Blog',
url: 'https://sim.ai/blog',
name: 'Sim Studio',
url: 'https://sim.ai/studio',
description: 'Announcements, insights, and guides for building AI agent workflows.',
}
}

View File

@@ -1,7 +1,7 @@
/**
* Environment utility functions for consistent environment detection across the application
*/
import { env, getEnv, isTruthy } from './env'
import { env, isTruthy } from './env'
/**
* Is the application running in production mode
@@ -21,9 +21,7 @@ export const isTest = env.NODE_ENV === 'test'
/**
* Is this the hosted version of the application
*/
export const isHosted =
getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.sim.ai' ||
getEnv('NEXT_PUBLIC_APP_URL') === 'https://www.staging.sim.ai'
export const isHosted = true
/**
* Is billing enforcement enabled

View File

@@ -86,7 +86,11 @@ export async function executeInE2B(req: E2BExecutionRequest): Promise<E2BExecuti
} catch {
result = jsonPart
}
cleanedStdout = lines.filter((l) => !l.startsWith(prefix)).join('\n')
const filteredLines = lines.filter((l) => !l.startsWith(prefix))
if (filteredLines.length > 0 && filteredLines[filteredLines.length - 1] === '') {
filteredLines.pop()
}
cleanedStdout = filteredLines.join('\n')
}
return { result, stdout: cleanedStdout, sandboxId }

View File

@@ -502,3 +502,103 @@ export function generateRequestId(): string {
* No-operation function for use as default callback
*/
export const noop = () => {}
/**
* Options for performing an optimistic update with automatic rollback on error
*/
export interface OptimisticUpdateOptions<T> {
/**
* Function that returns the current state value (for rollback purposes)
*/
getCurrentState: () => T
/**
* Function that performs the optimistic update to the UI state
*/
optimisticUpdate: () => void
/**
* Async function that performs the actual API call
*/
apiCall: () => Promise<void>
/**
* Function that rolls back the state to the original value
* @param originalValue - The value returned by getCurrentState before the update
*/
rollback: (originalValue: T) => void
/**
* Optional error message to log if the operation fails
*/
errorMessage?: string
/**
* Optional callback to execute on error (e.g., show toast notification)
*/
onError?: (error: Error, originalValue: T) => void
/**
* Optional callback that always runs regardless of success or error (e.g., to clear loading states)
*/
onComplete?: () => void
}
/**
* Performs an optimistic update with automatic rollback on error.
* This utility standardizes the pattern of:
* 1. Save current state
* 2. Update UI optimistically
* 3. Make API call
* 4. Rollback on error
*
* @example
* ```typescript
* await withOptimisticUpdate({
* getCurrentState: () => get().folders[id],
* optimisticUpdate: () => set(state => ({
* folders: { ...state.folders, [id]: { ...folder, name: newName } }
* })),
* apiCall: async () => {
* await fetch(`/api/folders/${id}`, {
* method: 'PUT',
* body: JSON.stringify({ name: newName })
* })
* },
* rollback: (originalFolder) => set(state => ({
* folders: { ...state.folders, [id]: originalFolder }
* })),
* errorMessage: 'Failed to rename folder',
* onError: (error) => toast.error('Could not rename folder')
* })
* ```
*/
export async function withOptimisticUpdate<T>(options: OptimisticUpdateOptions<T>): Promise<void> {
const {
getCurrentState,
optimisticUpdate,
apiCall,
rollback,
errorMessage,
onError,
onComplete,
} = options
const originalValue = getCurrentState()
optimisticUpdate()
try {
await apiCall()
} catch (error) {
rollback(originalValue)
if (errorMessage) {
logger.error(errorMessage, { error })
}
if (onError && error instanceof Error) {
onError(error, originalValue)
}
throw error
} finally {
if (onComplete) {
onComplete()
}
}
}

View File

@@ -102,11 +102,6 @@ const nextConfig: NextConfig = {
'@t3-oss/env-nextjs',
'@t3-oss/env-core',
'@sim/db',
'next-mdx-remote',
'gray-matter',
'rehype-autolink-headings',
'rehype-slug',
'remark-gfm',
],
async headers() {
return [
@@ -214,23 +209,30 @@ const nextConfig: NextConfig = {
async redirects() {
const redirects = []
// Redirect /building to /blog (legacy URL support)
redirects.push({
source: '/building/:path*',
destination: '/blog/:path*',
permanent: true,
})
// Redirect /building and /blog to /studio (legacy URL support)
redirects.push(
{
source: '/building/:path*',
destination: 'https://sim.ai/studio/:path*',
permanent: true,
},
{
source: '/blog/:path*',
destination: 'https://sim.ai/studio/:path*',
permanent: true,
}
)
// Move root feeds to blog namespace
// Move root feeds to studio namespace
redirects.push(
{
source: '/rss.xml',
destination: '/blog/rss.xml',
destination: '/studio/rss.xml',
permanent: true,
},
{
source: '/sitemap-images.xml',
destination: '/blog/sitemap-images.xml',
destination: '/studio/sitemap-images.xml',
permanent: true,
}
)

View File

Before

Width:  |  Height:  |  Size: 2.0 MiB

After

Width:  |  Height:  |  Size: 2.0 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 2.4 MiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 114 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 125 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 217 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 153 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 148 KiB

View File

Before

Width:  |  Height:  |  Size: 487 KiB

After

Width:  |  Height:  |  Size: 487 KiB

View File

Before

Width:  |  Height:  |  Size: 234 KiB

After

Width:  |  Height:  |  Size: 234 KiB

View File

Before

Width:  |  Height:  |  Size: 657 KiB

After

Width:  |  Height:  |  Size: 657 KiB

View File

Before

Width:  |  Height:  |  Size: 148 KiB

After

Width:  |  Height:  |  Size: 148 KiB

View File

Before

Width:  |  Height:  |  Size: 301 KiB

After

Width:  |  Height:  |  Size: 301 KiB

View File

Before

Width:  |  Height:  |  Size: 338 KiB

After

Width:  |  Height:  |  Size: 338 KiB

View File

Before

Width:  |  Height:  |  Size: 863 KiB

After

Width:  |  Height:  |  Size: 863 KiB

View File

Before

Width:  |  Height:  |  Size: 325 KiB

After

Width:  |  Height:  |  Size: 325 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 90 KiB

Binary file not shown.

After

Width:  |  Height:  |  Size: 10 MiB

View File

@@ -1,6 +1,7 @@
import { create } from 'zustand'
import { devtools } from 'zustand/middleware'
import { createLogger } from '@/lib/logs/console/logger'
import { withOptimisticUpdate } from '@/lib/utils'
import type { CustomToolsState, CustomToolsStore } from './types'
const logger = createLogger('CustomToolsStore')
@@ -136,84 +137,108 @@ export const useCustomToolsStore = create<CustomToolsStore>()(
},
updateTool: async (workspaceId: string, id: string, updates) => {
set({ isLoading: true, error: null })
try {
const tool = get().tools.find((t) => t.id === id)
if (!tool) {
throw new Error('Tool not found')
}
logger.info(`Updating custom tool: ${id} in workspace ${workspaceId}`)
const response = await fetch(API_ENDPOINT, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
tools: [
{
id,
title: updates.title ?? tool.title,
schema: updates.schema ?? tool.schema,
code: updates.code ?? tool.code,
},
],
workspaceId,
}),
})
const data = await response.json()
if (!response.ok) {
throw new ApiError(data.error || 'Failed to update tool', response.status)
}
if (!data.data || !Array.isArray(data.data)) {
throw new Error('Invalid API response: missing tools data')
}
set({ tools: data.data, isLoading: false })
logger.info(`Updated custom tool: ${id}`)
} catch (error) {
logger.error('Error updating custom tool:', error)
set({ isLoading: false })
throw error
const tool = get().tools.find((t) => t.id === id)
if (!tool) {
throw new Error('Tool not found')
}
await withOptimisticUpdate({
getCurrentState: () => get().tools,
optimisticUpdate: () => {
set((state) => ({
tools: state.tools.map((t) =>
t.id === id
? {
...t,
title: updates.title ?? t.title,
schema: updates.schema ?? t.schema,
code: updates.code ?? t.code,
}
: t
),
isLoading: true,
error: null,
}))
},
apiCall: async () => {
logger.info(`Updating custom tool: ${id} in workspace ${workspaceId}`)
const response = await fetch(API_ENDPOINT, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
tools: [
{
id,
title: updates.title ?? tool.title,
schema: updates.schema ?? tool.schema,
code: updates.code ?? tool.code,
},
],
workspaceId,
}),
})
const data = await response.json()
if (!response.ok) {
throw new ApiError(data.error || 'Failed to update tool', response.status)
}
if (!data.data || !Array.isArray(data.data)) {
throw new Error('Invalid API response: missing tools data')
}
set({ tools: data.data })
logger.info(`Updated custom tool: ${id}`)
},
rollback: (originalTools) => {
set({ tools: originalTools })
},
onComplete: () => {
set({ isLoading: false })
},
errorMessage: 'Error updating custom tool',
})
},
deleteTool: async (workspaceId: string | null, id: string) => {
set({ isLoading: true, error: null })
await withOptimisticUpdate({
getCurrentState: () => get().tools,
optimisticUpdate: () => {
set((state) => ({
tools: state.tools.filter((tool) => tool.id !== id),
isLoading: true,
error: null,
}))
},
apiCall: async () => {
logger.info(`Deleting custom tool: ${id}`)
try {
logger.info(`Deleting custom tool: ${id}`)
const url = workspaceId
? `${API_ENDPOINT}?id=${id}&workspaceId=${workspaceId}`
: `${API_ENDPOINT}?id=${id}`
// Build URL with optional workspaceId (for user-scoped tools)
const url = workspaceId
? `${API_ENDPOINT}?id=${id}&workspaceId=${workspaceId}`
: `${API_ENDPOINT}?id=${id}`
const response = await fetch(url, {
method: 'DELETE',
})
const response = await fetch(url, {
method: 'DELETE',
})
const data = await response.json()
const data = await response.json()
if (!response.ok) {
throw new Error(data.error || 'Failed to delete tool')
}
if (!response.ok) {
throw new Error(data.error || 'Failed to delete tool')
}
set((state) => ({
tools: state.tools.filter((tool) => tool.id !== id),
isLoading: false,
}))
logger.info(`Deleted custom tool: ${id}`)
} catch (error) {
logger.error('Error deleting custom tool:', error)
set({ isLoading: false })
throw error
}
logger.info(`Deleted custom tool: ${id}`)
},
rollback: (originalTools) => {
set({ tools: originalTools })
},
onComplete: () => {
set({ isLoading: false })
},
errorMessage: 'Error deleting custom tool',
})
},
getTool: (id: string) => {

View File

@@ -1,6 +1,7 @@
import { create } from 'zustand'
import { devtools } from 'zustand/middleware'
import { createLogger } from '@/lib/logs/console/logger'
import { withOptimisticUpdate } from '@/lib/utils'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
const logger = createLogger('FoldersStore')
@@ -282,62 +283,103 @@ export const useFolderStore = create<FolderState>()(
},
updateFolderAPI: async (id, updates) => {
const response = await fetch(`/api/folders/${id}`, {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(updates),
const originalFolder = get().folders[id]
if (!originalFolder) {
throw new Error('Folder not found')
}
let updatedFolder: WorkflowFolder | null = null
await withOptimisticUpdate({
getCurrentState: () => originalFolder,
optimisticUpdate: () => {
get().updateFolder(id, { ...updates, updatedAt: new Date() })
},
apiCall: async () => {
const response = await fetch(`/api/folders/${id}`, {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(updates),
})
if (!response.ok) {
const error = await response.json()
throw new Error(error.error || 'Failed to update folder')
}
const { folder } = await response.json()
const processedFolder = {
...folder,
createdAt: new Date(folder.createdAt),
updatedAt: new Date(folder.updatedAt),
}
get().updateFolder(id, processedFolder)
updatedFolder = processedFolder
},
rollback: (original) => {
get().updateFolder(id, original)
},
errorMessage: 'Failed to update folder',
})
if (!response.ok) {
const error = await response.json()
throw new Error(error.error || 'Failed to update folder')
}
const { folder } = await response.json()
const processedFolder = {
...folder,
createdAt: new Date(folder.createdAt),
updatedAt: new Date(folder.updatedAt),
}
get().updateFolder(id, processedFolder)
return processedFolder
return updatedFolder || { ...originalFolder, ...updates }
},
deleteFolder: async (id: string, workspaceId: string) => {
const response = await fetch(`/api/folders/${id}`, { method: 'DELETE' })
const getAllSubfolderIds = (parentId: string): string[] => {
const folders = get().folders
const childIds = Object.keys(folders).filter(
(folderId) => folders[folderId].parentId === parentId
)
const allIds = [...childIds]
if (!response.ok) {
const error = await response.json()
throw new Error(error.error || 'Failed to delete folder')
childIds.forEach((childId) => {
allIds.push(...getAllSubfolderIds(childId))
})
return allIds
}
const responseData = await response.json()
const deletedFolderIds = [id, ...getAllSubfolderIds(id)]
// Remove the folder from local state
get().removeFolder(id)
await withOptimisticUpdate({
getCurrentState: () => ({
folders: { ...get().folders },
expandedFolders: new Set(get().expandedFolders),
}),
optimisticUpdate: () => {
deletedFolderIds.forEach((folderId) => {
get().removeFolder(folderId)
})
// Remove from expanded state
set((state) => {
const newExpanded = new Set(state.expandedFolders)
newExpanded.delete(id)
return { expandedFolders: newExpanded }
set((state) => {
const newExpanded = new Set(state.expandedFolders)
deletedFolderIds.forEach((folderId) => newExpanded.delete(folderId))
return { expandedFolders: newExpanded }
})
},
apiCall: async () => {
const response = await fetch(`/api/folders/${id}`, { method: 'DELETE' })
if (!response.ok) {
const error = await response.json()
throw new Error(error.error || 'Failed to delete folder')
}
const responseData = await response.json()
logger.info(
`Deleted ${responseData.deletedItems.workflows} workflow(s) and ${responseData.deletedItems.folders} folder(s)`
)
const workflowRegistry = useWorkflowRegistry.getState()
await workflowRegistry.loadWorkflows(workspaceId)
},
rollback: (originalState) => {
set({ folders: originalState.folders, expandedFolders: originalState.expandedFolders })
},
errorMessage: 'Failed to delete folder',
})
// Remove subfolders from local state
get().removeSubfoldersRecursively(id)
// The backend has already deleted the workflows, so we just need to refresh
// the workflow registry to sync with the server state
const workflowRegistry = useWorkflowRegistry.getState()
if (workspaceId) {
await workflowRegistry.loadWorkflows(workspaceId)
}
logger.info(
`Deleted ${responseData.deletedItems.workflows} workflow(s) and ${responseData.deletedItems.folders} folder(s)`
)
},
isWorkflowInDeletedSubfolder: (workflow: Workflow, deletedFolderId: string) => {
@@ -372,6 +414,5 @@ export const useFolderStore = create<FolderState>()(
)
)
// Selector hook for checking if a workflow is selected (avoids get() calls)
export const useIsWorkflowSelected = (workflowId: string) =>
useFolderStore((state) => state.selectedWorkflows.has(workflowId))

View File

@@ -1,5 +1,6 @@
import { create } from 'zustand'
import { createLogger } from '@/lib/logs/console/logger'
import { withOptimisticUpdate } from '@/lib/utils'
import { API_ENDPOINTS } from '@/stores/constants'
import type {
CachedWorkspaceEnvData,
@@ -48,55 +49,53 @@ export const useEnvironmentStore = create<EnvironmentStore>()((set, get) => ({
},
saveEnvironmentVariables: async (variables: Record<string, string>) => {
try {
set({ isLoading: true, error: null })
const transformedVariables = Object.entries(variables).reduce(
(acc, [key, value]) => ({
...acc,
[key]: { key, value },
}),
{}
)
const transformedVariables = Object.entries(variables).reduce(
(acc, [key, value]) => ({
...acc,
[key]: { key, value },
}),
{}
)
await withOptimisticUpdate({
getCurrentState: () => get().variables,
optimisticUpdate: () => {
set({ variables: transformedVariables, isLoading: true, error: null })
},
apiCall: async () => {
const response = await fetch(API_ENDPOINTS.ENVIRONMENT, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
variables: Object.entries(transformedVariables).reduce(
(acc, [key, value]) => ({
...acc,
[key]: (value as EnvironmentVariable).value,
}),
{}
),
}),
})
set({ variables: transformedVariables })
if (!response.ok) {
throw new Error(`Failed to save environment variables: ${response.statusText}`)
}
const response = await fetch(API_ENDPOINTS.ENVIRONMENT, {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
variables: Object.entries(transformedVariables).reduce(
(acc, [key, value]) => ({
...acc,
[key]: (value as EnvironmentVariable).value,
}),
{}
),
}),
})
if (!response.ok) {
throw new Error(`Failed to save environment variables: ${response.statusText}`)
}
set({ isLoading: false })
get().clearWorkspaceEnvCache()
} catch (error) {
logger.error('Error saving environment variables:', { error })
set({
error: error instanceof Error ? error.message : 'Unknown error',
isLoading: false,
})
get().loadEnvironmentVariables()
}
get().clearWorkspaceEnvCache()
},
rollback: (originalVariables) => {
set({ variables: originalVariables })
},
onComplete: () => {
set({ isLoading: false })
},
errorMessage: 'Error saving environment variables',
})
},
loadWorkspaceEnvironment: async (workspaceId: string) => {
// Check cache first
const cached = get().workspaceEnvCache.get(workspaceId)
if (cached) {
return {
@@ -121,7 +120,6 @@ export const useEnvironmentStore = create<EnvironmentStore>()((set, get) => ({
conflicts: string[]
}
// Cache the result
const cache = new Map(get().workspaceEnvCache)
cache.set(workspaceId, {
...envData,
@@ -150,7 +148,6 @@ export const useEnvironmentStore = create<EnvironmentStore>()((set, get) => ({
}
set({ isLoading: false })
// Invalidate cache for this workspace
get().clearWorkspaceEnvCache(workspaceId)
} catch (error) {
logger.error('Error updating workspace environment:', { error })
@@ -171,7 +168,6 @@ export const useEnvironmentStore = create<EnvironmentStore>()((set, get) => ({
}
set({ isLoading: false })
// Invalidate cache for this workspace
get().clearWorkspaceEnvCache(workspaceId)
} catch (error) {
logger.error('Error removing workspace environment keys:', { error })
@@ -189,7 +185,6 @@ export const useEnvironmentStore = create<EnvironmentStore>()((set, get) => ({
cache.delete(workspaceId)
set({ workspaceEnvCache: cache })
} else {
// Clear all caches
set({ workspaceEnvCache: new Map() })
}
},

View File

@@ -2,6 +2,7 @@ import { create } from 'zustand'
import { devtools, persist } from 'zustand/middleware'
import { createLogger } from '@/lib/logs/console/logger'
import { syncThemeToNextThemes } from '@/lib/theme-sync'
import { withOptimisticUpdate } from '@/lib/utils'
import type { General, GeneralStore, UserSettings } from '@/stores/settings/general/types'
const logger = createLogger('GeneralStore')
@@ -41,34 +42,28 @@ export const useGeneralStore = create<GeneralStore>()(
isSuperUserModeLoading: false,
}
// Optimistic update helper
const updateSettingOptimistic = async <K extends keyof UserSettings>(
key: K,
value: UserSettings[K],
loadingKey: keyof General,
stateKey: keyof General
) => {
// Prevent multiple simultaneous updates
if ((get() as any)[loadingKey]) return
const originalValue = (get() as any)[stateKey]
// Optimistic update
set({ [stateKey]: value, [loadingKey]: true } as any)
try {
await get().updateSetting(key, value)
set({ [loadingKey]: false } as any)
} catch (error) {
// Rollback on error
set({ [stateKey]: originalValue, [loadingKey]: false } as any)
logger.error(`Failed to update ${String(key)}, rolled back:`, error)
}
await withOptimisticUpdate({
getCurrentState: () => (get() as any)[stateKey],
optimisticUpdate: () => set({ [stateKey]: value, [loadingKey]: true } as any),
apiCall: async () => {
await get().updateSetting(key, value)
},
rollback: (originalValue) => set({ [stateKey]: originalValue } as any),
onComplete: () => set({ [loadingKey]: false } as any),
errorMessage: `Failed to update ${String(key)}, rolled back`,
})
}
return {
...store,
// Basic Actions with optimistic updates
toggleAutoConnect: async () => {
if (get().isAutoConnectLoading) return
const newValue = !get().isAutoConnectEnabled
@@ -138,25 +133,22 @@ export const useGeneralStore = create<GeneralStore>()(
setTheme: async (theme) => {
if (get().isThemeLoading) return
const originalTheme = get().theme
// Optimistic update
set({ theme, isThemeLoading: true })
// Update next-themes immediately for instant feedback
syncThemeToNextThemes(theme)
try {
// Sync to DB for authenticated users
await get().updateSetting('theme', theme)
set({ isThemeLoading: false })
} catch (error) {
// Rollback on error
set({ theme: originalTheme, isThemeLoading: false })
syncThemeToNextThemes(originalTheme)
logger.error('Failed to sync theme to database:', error)
throw error
}
await withOptimisticUpdate({
getCurrentState: () => get().theme,
optimisticUpdate: () => {
set({ theme, isThemeLoading: true })
syncThemeToNextThemes(theme)
},
apiCall: async () => {
await get().updateSetting('theme', theme)
},
rollback: (originalTheme) => {
set({ theme: originalTheme })
syncThemeToNextThemes(originalTheme)
},
onComplete: () => set({ isThemeLoading: false }),
errorMessage: 'Failed to sync theme to database',
})
},
setTelemetryEnabled: async (enabled) => {

View File

@@ -2,6 +2,7 @@ import { create } from 'zustand'
import { devtools } from 'zustand/middleware'
import { createLogger } from '@/lib/logs/console/logger'
import { generateCreativeWorkflowName } from '@/lib/naming'
import { withOptimisticUpdate } from '@/lib/utils'
import { buildDefaultWorkflowArtifacts } from '@/lib/workflows/defaults'
import { API_ENDPOINTS } from '@/stores/constants'
import { useVariablesStore } from '@/stores/panel/variables/store'
@@ -753,100 +754,120 @@ export const useWorkflowRegistry = create<WorkflowRegistry>()(
return id
},
// Delete workflow and clean up associated storage
removeWorkflow: async (id: string) => {
const { workflows } = get()
const { workflows, activeWorkflowId } = get()
const workflowToDelete = workflows[id]
if (!workflowToDelete) {
logger.warn(`Attempted to delete non-existent workflow: ${id}`)
return
}
set({ isLoading: true, error: null })
try {
// Call DELETE endpoint to remove from database
const response = await fetch(`/api/workflows/${id}`, {
method: 'DELETE',
})
const isDeletingActiveWorkflow = activeWorkflowId === id
if (!response.ok) {
const error = await response.json().catch(() => ({ error: 'Unknown error' }))
throw new Error(error.error || 'Failed to delete workflow')
}
await withOptimisticUpdate({
getCurrentState: () => ({
workflows: { ...get().workflows },
activeWorkflowId: get().activeWorkflowId,
subBlockValues: { ...useSubBlockStore.getState().workflowValues },
workflowStoreState: isDeletingActiveWorkflow
? {
blocks: { ...useWorkflowStore.getState().blocks },
edges: [...useWorkflowStore.getState().edges],
loops: { ...useWorkflowStore.getState().loops },
parallels: { ...useWorkflowStore.getState().parallels },
isDeployed: useWorkflowStore.getState().isDeployed,
deployedAt: useWorkflowStore.getState().deployedAt,
lastSaved: useWorkflowStore.getState().lastSaved,
}
: null,
}),
optimisticUpdate: () => {
const newWorkflows = { ...get().workflows }
delete newWorkflows[id]
logger.info(`Successfully deleted workflow ${id} from database`)
} catch (error) {
logger.error(`Failed to delete workflow ${id} from database:`, error)
set({
error: `Failed to delete workflow: ${error instanceof Error ? error.message : 'Unknown error'}`,
isLoading: false,
})
return
}
// Only update local state after successful deletion from database
set((state) => {
const newWorkflows = { ...state.workflows }
delete newWorkflows[id]
// Clean up subblock values for this workflow
useSubBlockStore.setState((subBlockState) => {
const newWorkflowValues = { ...subBlockState.workflowValues }
const currentSubBlockValues = useSubBlockStore.getState().workflowValues
const newWorkflowValues = { ...currentSubBlockValues }
delete newWorkflowValues[id]
return { workflowValues: newWorkflowValues }
})
useSubBlockStore.setState({ workflowValues: newWorkflowValues })
// If deleting active workflow, clear active workflow ID immediately
// Don't automatically switch to another workflow to prevent race conditions
let newActiveWorkflowId = state.activeWorkflowId
if (state.activeWorkflowId === id) {
newActiveWorkflowId = null
let newActiveWorkflowId = get().activeWorkflowId
if (isDeletingActiveWorkflow) {
newActiveWorkflowId = null
// Clear workflow store state immediately when deleting active workflow
useWorkflowStore.setState({
blocks: {},
edges: [],
loops: {},
parallels: {},
isDeployed: false,
deployedAt: undefined,
lastSaved: Date.now(),
})
logger.info(
`Cleared active workflow ${id} - user will need to manually select another workflow`
)
}
// Cancel any schedule for this workflow (async, don't wait)
fetch(API_ENDPOINTS.SCHEDULE, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
workflowId: id,
state: {
useWorkflowStore.setState({
blocks: {},
edges: [],
loops: {},
},
}),
}).catch((error) => {
logger.error(`Error cancelling schedule for deleted workflow ${id}:`, error)
})
parallels: {},
isDeployed: false,
deployedAt: undefined,
lastSaved: Date.now(),
})
logger.info(`Removed workflow ${id} from local state`)
logger.info(
`Cleared active workflow ${id} - user will need to manually select another workflow`
)
}
return {
workflows: newWorkflows,
activeWorkflowId: newActiveWorkflowId,
error: null,
isLoading: false, // Clear loading state after successful deletion
}
set({
workflows: newWorkflows,
activeWorkflowId: newActiveWorkflowId,
isLoading: true,
error: null,
})
logger.info(`Removed workflow ${id} from local state (optimistic)`)
},
apiCall: async () => {
const response = await fetch(`/api/workflows/${id}`, {
method: 'DELETE',
})
if (!response.ok) {
const error = await response.json().catch(() => ({ error: 'Unknown error' }))
throw new Error(error.error || 'Failed to delete workflow')
}
logger.info(`Successfully deleted workflow ${id} from database`)
fetch(API_ENDPOINTS.SCHEDULE, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify({
workflowId: id,
state: {
blocks: {},
edges: [],
loops: {},
},
}),
}).catch((error) => {
logger.error(`Error cancelling schedule for deleted workflow ${id}:`, error)
})
},
rollback: (originalState) => {
set({
workflows: originalState.workflows,
activeWorkflowId: originalState.activeWorkflowId,
})
useSubBlockStore.setState({ workflowValues: originalState.subBlockValues })
if (originalState.workflowStoreState) {
useWorkflowStore.setState(originalState.workflowStoreState)
logger.info(`Restored workflow store state for workflow ${id}`)
}
logger.info(`Rolled back deletion of workflow ${id}`)
},
onComplete: () => {
set({ isLoading: false })
},
errorMessage: `Failed to delete workflow ${id}`,
})
},
// Update workflow metadata
updateWorkflow: async (id: string, metadata: Partial<WorkflowMetadata>) => {
const { workflows } = get()
const workflow = workflows[id]
@@ -855,71 +876,70 @@ export const useWorkflowRegistry = create<WorkflowRegistry>()(
return
}
// Optimistically update local state first
set((state) => ({
workflows: {
...state.workflows,
[id]: {
...workflow,
...metadata,
lastModified: new Date(),
createdAt: workflow.createdAt, // Preserve creation date
},
},
error: null,
}))
// Persist to database via API
try {
const response = await fetch(`/api/workflows/${id}`, {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(metadata),
})
if (!response.ok) {
const error = await response.json()
throw new Error(error.error || 'Failed to update workflow')
}
const { workflow: updatedWorkflow } = await response.json()
logger.info(`Successfully updated workflow ${id} metadata`, metadata)
// Update with server response to ensure consistency
set((state) => ({
workflows: {
...state.workflows,
[id]: {
...state.workflows[id],
name: updatedWorkflow.name,
description: updatedWorkflow.description,
color: updatedWorkflow.color,
folderId: updatedWorkflow.folderId,
lastModified: new Date(updatedWorkflow.updatedAt),
createdAt: updatedWorkflow.createdAt
? new Date(updatedWorkflow.createdAt)
: state.workflows[id].createdAt,
await withOptimisticUpdate({
getCurrentState: () => workflow,
optimisticUpdate: () => {
set((state) => ({
workflows: {
...state.workflows,
[id]: {
...workflow,
...metadata,
lastModified: new Date(),
createdAt: workflow.createdAt, // Preserve creation date
},
},
},
}))
} catch (error) {
logger.error(`Failed to update workflow ${id} metadata:`, error)
error: null,
}))
},
apiCall: async () => {
const response = await fetch(`/api/workflows/${id}`, {
method: 'PUT',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(metadata),
})
// Revert optimistic update on error
set((state) => ({
workflows: {
...state.workflows,
[id]: workflow, // Revert to original state
},
error: `Failed to update workflow: ${error instanceof Error ? error.message : 'Unknown error'}`,
}))
}
if (!response.ok) {
const error = await response.json()
throw new Error(error.error || 'Failed to update workflow')
}
const { workflow: updatedWorkflow } = await response.json()
logger.info(`Successfully updated workflow ${id} metadata`, metadata)
set((state) => ({
workflows: {
...state.workflows,
[id]: {
...state.workflows[id],
name: updatedWorkflow.name,
description: updatedWorkflow.description,
color: updatedWorkflow.color,
folderId: updatedWorkflow.folderId,
lastModified: new Date(updatedWorkflow.updatedAt),
createdAt: updatedWorkflow.createdAt
? new Date(updatedWorkflow.createdAt)
: state.workflows[id].createdAt,
},
},
}))
},
rollback: (originalWorkflow) => {
set((state) => ({
workflows: {
...state.workflows,
[id]: originalWorkflow, // Revert to original state
},
error: `Failed to update workflow: ${metadata.name ? 'name' : 'metadata'}`,
}))
},
errorMessage: `Failed to update workflow ${id} metadata`,
})
},
logout: () => {
logger.info('Logging out - clearing all workflow data')
// Clear all state
resetWorkflowStores()
set({

View File

@@ -0,0 +1,129 @@
# Enable the copilot service
copilot:
enabled: true
# Server configuration
server:
image:
repository: simstudioai/copilot
tag: latest
pullPolicy: Always
replicaCount: 2
# Node scheduling (OPTIONAL)
# By default, copilot runs on the same nodes as the main Sim platform
nodeSelector: {}
# nodeSelector:
# workload-type: copilot
resources:
limits:
memory: "2Gi"
cpu: "1000m"
requests:
memory: "1Gi"
cpu: "500m"
# Required secrets (set via values or provide your own secret)
env:
PORT: "8080"
SERVICE_NAME: "copilot"
ENVIRONMENT: "production"
AGENT_API_DB_ENCRYPTION_KEY: "" # openssl rand -hex 32
INTERNAL_API_SECRET: "" # reuse Sim INTERNAL_API_SECRET
LICENSE_KEY: "" # Provided by Sim team
OPENAI_API_KEY_1: "" # At least one provider key required
ANTHROPIC_API_KEY_1: "" # Optional secondary provider
SIM_BASE_URL: "https://sim.example.com" # Base URL for Sim deployment
SIM_AGENT_API_KEY: "" # Must match SIM-side COPILOT_API_KEY
REDIS_URL: "redis://default:password@redis:6379"
# Optional configuration
LOG_LEVEL: "info"
CORS_ALLOWED_ORIGINS: "https://sim.example.com"
OTEL_EXPORTER_OTLP_ENDPOINT: ""
# Create a Secret from the values above. Set create=false to reference an existing secret instead.
secret:
create: true
name: ""
annotations: {}
extraEnv: []
extraEnvFrom: []
service:
type: ClusterIP
port: 8080
targetPort: 8080
# Internal PostgreSQL database (disable to use an external database)
postgresql:
enabled: true
image:
repository: postgres
tag: 16-alpine
pullPolicy: IfNotPresent
auth:
username: copilot
password: "" # REQUIRED - set via --set copilot.postgresql.auth.password
database: copilot
nodeSelector: {}
# nodeSelector:
# workload-type: copilot
resources:
limits:
memory: "1Gi"
cpu: "500m"
requests:
memory: "512Mi"
cpu: "250m"
persistence:
enabled: true
size: 10Gi
# External database configuration (only used when postgresql.enabled=false)
database:
existingSecretName: ""
secretKey: DATABASE_URL
url: ""
# Migration job
migrations:
enabled: true
resources:
limits:
memory: "512Mi"
cpu: "500m"
requests:
memory: "256Mi"
cpu: "100m"
# Optional: Configure ingress to expose copilot service
# Uncomment if you need external access to copilot
# ingress:
# enabled: true
# className: nginx
# annotations:
# cert-manager.io/cluster-issuer: letsencrypt-prod
# nginx.ingress.kubernetes.io/force-ssl-redirect: "true"
# copilot:
# host: copilot.yourdomain.com
# paths:
# - path: /
# pathType: Prefix
# tls:
# enabled: true
# secretName: copilot-tls-secret
# If using private Docker Hub repository
# global:
# imagePullSecrets:
# - name: dockerhub-secret

View File

@@ -300,4 +300,69 @@ Affinity
affinity:
{{- toYaml .affinity | nindent 2 }}
{{- end }}
{{- end }}
{{/*
Copilot environment secret name
*/}}
{{- define "sim.copilot.envSecretName" -}}
{{- if and .Values.copilot.server.secret.name (ne .Values.copilot.server.secret.name "") -}}
{{- .Values.copilot.server.secret.name -}}
{{- else -}}
{{- printf "%s-copilot-env" (include "sim.fullname" .) -}}
{{- end -}}
{{- end }}
{{/*
Copilot database secret name
*/}}
{{- define "sim.copilot.databaseSecretName" -}}
{{- if .Values.copilot.postgresql.enabled -}}
{{- printf "%s-copilot-postgresql-secret" (include "sim.fullname" .) -}}
{{- else if and .Values.copilot.database.existingSecretName (ne .Values.copilot.database.existingSecretName "") -}}
{{- .Values.copilot.database.existingSecretName -}}
{{- else -}}
{{- printf "%s-copilot-database-secret" (include "sim.fullname" .) -}}
{{- end -}}
{{- end }}
{{/*
Copilot database secret key
*/}}
{{- define "sim.copilot.databaseSecretKey" -}}
{{- default "DATABASE_URL" .Values.copilot.database.secretKey -}}
{{- end }}
{{/*
Validate Copilot configuration
*/}}
{{- define "sim.copilot.validate" -}}
{{- if .Values.copilot.enabled -}}
{{- if and (not .Values.copilot.server.secret.create) (or (not .Values.copilot.server.secret.name) (eq .Values.copilot.server.secret.name "")) -}}
{{- fail "copilot.server.secret.name must be provided when copilot.server.secret.create=false" -}}
{{- end -}}
{{- if .Values.copilot.server.secret.create -}}
{{- $env := .Values.copilot.server.env -}}
{{- $required := list "AGENT_API_DB_ENCRYPTION_KEY" "INTERNAL_API_SECRET" "LICENSE_KEY" "SIM_BASE_URL" "SIM_AGENT_API_KEY" "REDIS_URL" -}}
{{- range $key := $required -}}
{{- if not (and $env (index $env $key) (ne (index $env $key) "")) -}}
{{- fail (printf "copilot.server.env.%s is required when copilot is enabled" $key) -}}
{{- end -}}
{{- end -}}
{{- $hasOpenAI := and $env (ne (default "" (index $env "OPENAI_API_KEY_1")) "") -}}
{{- $hasAnthropic := and $env (ne (default "" (index $env "ANTHROPIC_API_KEY_1")) "") -}}
{{- if not (or $hasOpenAI $hasAnthropic) -}}
{{- fail "Set at least one of copilot.server.env.OPENAI_API_KEY_1 or copilot.server.env.ANTHROPIC_API_KEY_1" -}}
{{- end -}}
{{- end -}}
{{- if .Values.copilot.postgresql.enabled -}}
{{- if or (not .Values.copilot.postgresql.auth.password) (eq .Values.copilot.postgresql.auth.password "") -}}
{{- fail "copilot.postgresql.auth.password is required when copilot.postgresql.enabled=true" -}}
{{- end -}}
{{- else -}}
{{- if and (or (not .Values.copilot.database.existingSecretName) (eq .Values.copilot.database.existingSecretName "")) (or (not .Values.copilot.database.url) (eq .Values.copilot.database.url "")) -}}
{{- fail "Provide copilot.database.existingSecretName or copilot.database.url when copilot.postgresql.enabled=false" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end }}

View File

@@ -0,0 +1,109 @@
{{- if .Values.copilot.enabled }}
{{- include "sim.copilot.validate" . }}
apiVersion: v1
kind: Service
metadata:
name: {{ include "sim.fullname" . }}-copilot
namespace: {{ .Release.Namespace }}
labels:
{{- include "sim.labels" . | nindent 4 }}
app.kubernetes.io/component: copilot
spec:
type: {{ .Values.copilot.server.service.type }}
ports:
- port: {{ .Values.copilot.server.service.port }}
targetPort: {{ .Values.copilot.server.service.targetPort }}
protocol: TCP
name: http
selector:
app.kubernetes.io/name: {{ include "sim.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: copilot
---
apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "sim.fullname" . }}-copilot
namespace: {{ .Release.Namespace }}
labels:
{{- include "sim.labels" . | nindent 4 }}
app.kubernetes.io/component: copilot
spec:
replicas: {{ .Values.copilot.server.replicaCount }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "sim.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: copilot
template:
metadata:
annotations:
{{- with .Values.podAnnotations }}
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
app.kubernetes.io/name: {{ include "sim.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: copilot
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.copilot.server.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.copilot.server.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: copilot
image: {{ include "sim.image" (dict "context" . "image" .Values.copilot.server.image) }}
imagePullPolicy: {{ .Values.copilot.server.image.pullPolicy }}
ports:
- name: http
containerPort: {{ .Values.copilot.server.service.targetPort }}
protocol: TCP
envFrom:
- secretRef:
name: {{ include "sim.copilot.envSecretName" . }}
- secretRef:
name: {{ include "sim.copilot.databaseSecretName" . }}
{{- with .Values.copilot.server.extraEnvFrom }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.copilot.server.extraEnv }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.copilot.server.livenessProbe }}
livenessProbe:
{{- toYaml .Values.copilot.server.livenessProbe | nindent 12 }}
{{- end }}
{{- if .Values.copilot.server.readinessProbe }}
readinessProbe:
{{- toYaml .Values.copilot.server.readinessProbe | nindent 12 }}
{{- end }}
{{- with .Values.copilot.server.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.copilot.server.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}

View File

@@ -21,6 +21,9 @@ spec:
{{- if .Values.realtime.enabled }}
- {{ .Values.ingress.realtime.host }}
{{- end }}
{{- if and .Values.copilot.enabled .Values.ingress.copilot }}
- {{ .Values.ingress.copilot.host }}
{{- end }}
secretName: {{ .Values.ingress.tls.secretName }}
{{- end }}
rules:
@@ -52,4 +55,19 @@ spec:
number: {{ $.Values.realtime.service.port }}
{{- end }}
{{- end }}
{{- if and .Values.copilot.enabled .Values.ingress.copilot }}
# Copilot service ingress rule
- host: {{ .Values.ingress.copilot.host }}
http:
paths:
{{- range .Values.ingress.copilot.paths }}
- path: {{ .path }}
pathType: {{ .pathType }}
backend:
service:
name: {{ include "sim.fullname" $ }}-copilot
port:
number: {{ $.Values.copilot.server.service.port }}
{{- end }}
{{- end }}
{{- end }}

View File

@@ -0,0 +1,83 @@
{{- if and .Values.copilot.enabled .Values.copilot.migrations.enabled }}
apiVersion: batch/v1
kind: Job
metadata:
name: {{ include "sim.fullname" . }}-copilot-migrations
namespace: {{ .Release.Namespace }}
labels:
{{- include "sim.labels" . | nindent 4 }}
app.kubernetes.io/component: copilot-migrations
annotations:
"helm.sh/hook": post-install,post-upgrade
"helm.sh/hook-weight": "-5"
"helm.sh/hook-delete-policy": before-hook-creation
spec:
backoffLimit: {{ .Values.copilot.migrations.backoffLimit }}
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "sim.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: copilot-migrations
spec:
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
restartPolicy: {{ .Values.copilot.migrations.restartPolicy }}
{{- with .Values.copilot.migrations.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.copilot.server.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- if .Values.copilot.postgresql.enabled }}
initContainers:
- name: wait-for-postgres
image: postgres:16-alpine
command:
- /bin/sh
- -c
- |
until pg_isready -h {{ include "sim.fullname" . }}-copilot-postgresql -p {{ .Values.copilot.postgresql.service.port }} -U {{ .Values.copilot.postgresql.auth.username }}; do
echo "Waiting for Copilot PostgreSQL to be ready..."
sleep 2
done
echo "Copilot PostgreSQL is ready!"
envFrom:
- secretRef:
name: {{ include "sim.fullname" . }}-copilot-postgresql-secret
{{- with .Values.copilot.migrations.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}
containers:
- name: migrations
image: {{ include "sim.image" (dict "context" . "image" .Values.copilot.migrations.image) }}
imagePullPolicy: {{ .Values.copilot.migrations.image.pullPolicy }}
command: ["/usr/local/bin/migrate"]
envFrom:
- secretRef:
name: {{ include "sim.copilot.envSecretName" . }}
- secretRef:
name: {{ include "sim.copilot.databaseSecretName" . }}
{{- with .Values.copilot.server.extraEnvFrom }}
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.copilot.server.extraEnv }}
env:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.copilot.migrations.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.copilot.migrations.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- end }}

View File

@@ -50,3 +50,32 @@ spec:
{{- include "sim.realtime.selectorLabels" $ | nindent 6 }}
{{- end }}
{{- end }}
{{- if and .Values.copilot.enabled .Values.copilot.server.podDisruptionBudget.enabled }}
{{- with .Values.copilot.server.podDisruptionBudget }}
---
apiVersion: policy/v1
kind: PodDisruptionBudget
metadata:
name: {{ include "sim.fullname" $ }}-copilot-pdb
namespace: {{ $.Release.Namespace }}
labels:
{{- include "sim.labels" $ | nindent 4 }}
app.kubernetes.io/component: copilot
spec:
{{- if .minAvailable }}
minAvailable: {{ .minAvailable }}
{{- else if .maxUnavailable }}
maxUnavailable: {{ .maxUnavailable }}
{{- else }}
maxUnavailable: 1
{{- end }}
{{- if .unhealthyPodEvictionPolicy }}
unhealthyPodEvictionPolicy: {{ .unhealthyPodEvictionPolicy }}
{{- end }}
selector:
matchLabels:
app.kubernetes.io/name: {{ include "sim.name" $ }}
app.kubernetes.io/instance: {{ $.Release.Name }}
app.kubernetes.io/component: copilot
{{- end }}
{{- end }}

View File

@@ -0,0 +1,37 @@
{{- if and .Values.copilot.enabled .Values.copilot.server.secret.create }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "sim.copilot.envSecretName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "sim.labels" . | nindent 4 }}
app.kubernetes.io/component: copilot
{{- with .Values.copilot.server.secret.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
type: Opaque
stringData:
{{- range $key, $value := .Values.copilot.server.env }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- end }}
{{- if and .Values.copilot.enabled (not .Values.copilot.postgresql.enabled) (or (not .Values.copilot.database.existingSecretName) (eq .Values.copilot.database.existingSecretName "")) (ne .Values.copilot.database.url "") }}
---
apiVersion: v1
kind: Secret
metadata:
name: {{ include "sim.copilot.databaseSecretName" . }}
namespace: {{ .Release.Namespace }}
labels:
{{- include "sim.labels" . | nindent 4 }}
app.kubernetes.io/component: copilot
{{- with .Values.copilot.server.secret.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
type: Opaque
stringData:
{{ include "sim.copilot.databaseSecretKey" . }}: {{ required "copilot.database.url is required when using an external database" .Values.copilot.database.url | quote }}
{{- end }}

View File

@@ -0,0 +1,134 @@
{{- if and .Values.copilot.enabled .Values.copilot.postgresql.enabled }}
apiVersion: v1
kind: Secret
metadata:
name: {{ include "sim.fullname" . }}-copilot-postgresql-secret
namespace: {{ .Release.Namespace }}
labels:
{{- include "sim.labels" . | nindent 4 }}
app.kubernetes.io/component: copilot-postgresql
type: Opaque
stringData:
POSTGRES_USER: {{ .Values.copilot.postgresql.auth.username | quote }}
POSTGRES_PASSWORD: {{ required "copilot.postgresql.auth.password is required when copilot is enabled" .Values.copilot.postgresql.auth.password | quote }}
POSTGRES_DB: {{ .Values.copilot.postgresql.auth.database | quote }}
DATABASE_URL: "postgresql://{{ .Values.copilot.postgresql.auth.username }}:{{ .Values.copilot.postgresql.auth.password }}@{{ include "sim.fullname" . }}-copilot-postgresql:{{ .Values.copilot.postgresql.service.port }}/{{ .Values.copilot.postgresql.auth.database }}"
---
apiVersion: v1
kind: Service
metadata:
name: {{ include "sim.fullname" . }}-copilot-postgresql
namespace: {{ .Release.Namespace }}
labels:
{{- include "sim.labels" . | nindent 4 }}
app.kubernetes.io/component: copilot-postgresql
spec:
type: {{ .Values.copilot.postgresql.service.type }}
ports:
- port: {{ .Values.copilot.postgresql.service.port }}
targetPort: {{ .Values.copilot.postgresql.service.targetPort }}
protocol: TCP
name: postgresql
selector:
app.kubernetes.io/name: {{ include "sim.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: copilot-postgresql
---
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "sim.fullname" . }}-copilot-postgresql
namespace: {{ .Release.Namespace }}
labels:
{{- include "sim.labels" . | nindent 4 }}
app.kubernetes.io/component: copilot-postgresql
spec:
serviceName: {{ include "sim.fullname" . }}-copilot-postgresql
replicas: 1
selector:
matchLabels:
app.kubernetes.io/name: {{ include "sim.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: copilot-postgresql
template:
metadata:
labels:
app.kubernetes.io/name: {{ include "sim.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/component: copilot-postgresql
{{- with .Values.podLabels }}
{{- toYaml . | nindent 8 }}
{{- end }}
spec:
{{- with .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.copilot.postgresql.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.copilot.postgresql.podSecurityContext }}
securityContext:
{{- toYaml . | nindent 8 }}
{{- end }}
containers:
- name: postgresql
image: {{ include "sim.image" (dict "context" . "image" .Values.copilot.postgresql.image) }}
imagePullPolicy: {{ .Values.copilot.postgresql.image.pullPolicy }}
ports:
- name: postgresql
containerPort: {{ .Values.copilot.postgresql.service.targetPort }}
protocol: TCP
envFrom:
- secretRef:
name: {{ include "sim.fullname" . }}-copilot-postgresql-secret
{{- if .Values.copilot.postgresql.livenessProbe }}
livenessProbe:
{{- toYaml .Values.copilot.postgresql.livenessProbe | nindent 12 }}
{{- end }}
{{- if .Values.copilot.postgresql.readinessProbe }}
readinessProbe:
{{- toYaml .Values.copilot.postgresql.readinessProbe | nindent 12 }}
{{- end }}
{{- with .Values.copilot.postgresql.resources }}
resources:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- with .Values.copilot.postgresql.securityContext }}
securityContext:
{{- toYaml . | nindent 12 }}
{{- end }}
{{- if .Values.copilot.postgresql.persistence.enabled }}
volumeMounts:
- name: data
mountPath: /var/lib/postgresql/data
subPath: pgdata
{{- end }}
{{- if .Values.copilot.postgresql.persistence.enabled }}
volumeClaimTemplates:
- metadata:
name: data
labels:
{{- include "sim.labels" . | nindent 10 }}
app.kubernetes.io/component: copilot-postgresql
spec:
accessModes:
{{- range .Values.copilot.postgresql.persistence.accessModes }}
- {{ . | quote }}
{{- end }}
{{- if .Values.copilot.postgresql.persistence.storageClass }}
{{- if (eq "-" .Values.copilot.postgresql.persistence.storageClass) }}
storageClassName: ""
{{- else }}
storageClassName: {{ .Values.copilot.postgresql.persistence.storageClass | quote }}
{{- end }}
{{- else if .Values.global.storageClass }}
storageClassName: {{ .Values.global.storageClass | quote }}
{{- end }}
resources:
requests:
storage: {{ .Values.copilot.postgresql.persistence.size | quote }}
{{- end }}
{{- end }}

View File

@@ -642,6 +642,186 @@
}
}
},
"copilot": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable the Copilot microservice"
},
"server": {
"type": "object",
"properties": {
"replicaCount": {
"type": "integer",
"minimum": 1,
"description": "Number of Copilot replicas"
},
"image": {
"type": "object",
"properties": {
"repository": {
"type": "string",
"description": "Copilot image repository"
},
"tag": {
"type": "string",
"description": "Copilot image tag"
},
"pullPolicy": {
"type": "string",
"enum": ["Always", "IfNotPresent", "Never"],
"description": "Image pull policy"
}
}
},
"resources": {
"type": "object",
"properties": {
"limits": { "type": "object" },
"requests": { "type": "object" }
}
},
"nodeSelector": {
"type": "object",
"additionalProperties": { "type": "string" }
},
"env": {
"type": "object",
"additionalProperties": { "type": "string" },
"description": "Environment variables for Copilot"
},
"extraEnv": {
"type": "array",
"items": { "type": "object" },
"description": "Additional environment variable definitions"
},
"extraEnvFrom": {
"type": "array",
"items": { "type": "object" },
"description": "Additional envFrom sources"
},
"secret": {
"type": "object",
"properties": {
"create": {
"type": "boolean",
"description": "Whether to create a secret from copilot.server.env"
},
"name": {
"type": "string",
"description": "Override name for the Copilot secret"
},
"annotations": {
"type": "object",
"additionalProperties": { "type": "string" },
"description": "Annotations added to the Copilot secret"
}
}
},
"service": {
"type": "object",
"properties": {
"type": { "type": "string" },
"port": { "type": "integer" },
"targetPort": { "type": "integer" }
}
},
"podDisruptionBudget": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable PodDisruptionBudget for Copilot server"
},
"minAvailable": {
"type": "integer",
"description": "Minimum number of available pods"
},
"maxUnavailable": {
"type": "integer",
"description": "Maximum number of unavailable pods"
}
}
}
}
},
"postgresql": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Deploy an internal PostgreSQL instance for Copilot"
},
"auth": {
"type": "object",
"properties": {
"username": { "type": "string" },
"password": { "type": "string" },
"database": { "type": "string" }
}
},
"persistence": {
"type": "object",
"properties": {
"enabled": { "type": "boolean" },
"size": { "type": "string" },
"storageClass": { "type": "string" }
}
}
}
},
"database": {
"type": "object",
"properties": {
"existingSecretName": {
"type": "string",
"description": "Existing secret containing the Copilot DATABASE_URL"
},
"secretKey": {
"type": "string",
"description": "Key name inside the database secret"
},
"url": {
"type": "string",
"description": "External database connection string"
}
}
},
"migrations": {
"type": "object",
"properties": {
"enabled": {
"type": "boolean",
"description": "Enable the Copilot migration job"
},
"image": {
"type": "object",
"properties": {
"repository": { "type": "string" },
"tag": { "type": "string" },
"pullPolicy": { "type": "string" }
}
},
"resources": {
"type": "object",
"properties": {
"limits": { "type": "object" },
"requests": { "type": "object" }
}
},
"backoffLimit": {
"type": "integer",
"minimum": 0
},
"restartPolicy": {
"type": "string",
"enum": ["Never", "OnFailure"]
}
}
}
}
},
"ingress": {
"type": "object",
"properties": {
@@ -673,6 +853,27 @@
}
}
},
"copilot": {
"type": "object",
"properties": {
"host": {
"type": "string",
"format": "hostname",
"description": "Copilot service hostname"
},
"paths": {
"type": "array",
"items": {
"type": "object",
"properties": {
"path": { "type": "string" },
"pathType": { "type": "string" }
}
},
"description": "Ingress paths for Copilot service"
}
}
},
"tls": {
"type": "object",
"properties": {

View File

@@ -744,4 +744,212 @@ telemetry:
enabled: false
endpoint: "http://otlp-collector:4317"
tls:
enabled: false
enabled: false
# Copilot service configuration (optional microservice)
copilot:
# Enable/disable the copilot service
enabled: false
# Server deployment configuration
server:
# Image configuration
image:
repository: simstudioai/copilot
tag: latest
pullPolicy: Always
# Number of replicas
replicaCount: 1
# Resource limits and requests
resources:
limits:
memory: "2Gi"
cpu: "1000m"
requests:
memory: "1Gi"
cpu: "500m"
# Node selector for pod scheduling
# Leave empty to run on same infrastructure as main Sim platform
# Or specify labels to isolate on dedicated nodes: { "workload-type": "copilot" }
nodeSelector: {}
# Pod security context
podSecurityContext:
fsGroup: 1001
# Container security context
securityContext:
runAsNonRoot: true
runAsUser: 1001
# Environment variables (required and optional)
env:
PORT: "8080"
SERVICE_NAME: "copilot"
ENVIRONMENT: "production"
AGENT_API_DB_ENCRYPTION_KEY: ""
INTERNAL_API_SECRET: ""
LICENSE_KEY: ""
OPENAI_API_KEY_1: ""
ANTHROPIC_API_KEY_1: ""
SIM_BASE_URL: ""
SIM_AGENT_API_KEY: ""
REDIS_URL: ""
# Optional configuration
LOG_LEVEL: "info"
CORS_ALLOWED_ORIGINS: ""
OTEL_EXPORTER_OTLP_ENDPOINT: ""
# Optional: additional static environment variables
extraEnv: []
# Optional: references to existing ConfigMaps/Secrets
extraEnvFrom: []
# Secret generation configuration (set create=false to use an existing secret)
secret:
create: true
name: ""
annotations: {}
# Service configuration
service:
type: ClusterIP
port: 8080
targetPort: 8080
# Health checks
readinessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 5
periodSeconds: 10
timeoutSeconds: 5
failureThreshold: 3
livenessProbe:
httpGet:
path: /healthz
port: 8080
initialDelaySeconds: 15
periodSeconds: 30
timeoutSeconds: 5
failureThreshold: 3
# Pod Disruption Budget for high availability
podDisruptionBudget:
enabled: false
minAvailable: 1
# PostgreSQL database for copilot (separate from main Sim database)
postgresql:
# Enable/disable internal PostgreSQL for copilot
enabled: true
# Image configuration
image:
repository: postgres
tag: 16-alpine
pullPolicy: IfNotPresent
# Authentication configuration
auth:
username: copilot
password: "" # REQUIRED - set via --set flag or external secret manager
database: copilot
# Node selector for database pod scheduling
# Leave empty to run on same infrastructure as main Sim platform
# Or specify labels to isolate on dedicated nodes: { "workload-type": "copilot" }
nodeSelector: {}
# Resource limits and requests
resources:
limits:
memory: "1Gi"
cpu: "500m"
requests:
memory: "512Mi"
cpu: "250m"
# Pod security context
podSecurityContext:
fsGroup: 999
# Container security context
securityContext:
runAsUser: 999
# Persistence configuration
persistence:
enabled: true
storageClass: ""
size: 10Gi
accessModes:
- ReadWriteOnce
# Service configuration
service:
type: ClusterIP
port: 5432
targetPort: 5432
# Health checks
livenessProbe:
exec:
command: ["pg_isready", "-U", "copilot", "-d", "copilot"]
initialDelaySeconds: 10
periodSeconds: 5
timeoutSeconds: 5
failureThreshold: 10
readinessProbe:
exec:
command: ["pg_isready", "-U", "copilot", "-d", "copilot"]
initialDelaySeconds: 5
periodSeconds: 3
timeoutSeconds: 5
failureThreshold: 10
# External database configuration (use when connecting to a managed database)
database:
existingSecretName: ""
secretKey: DATABASE_URL
url: ""
# Migration job configuration
migrations:
# Enable/disable migrations job
enabled: true
# Image configuration (same as server)
image:
repository: simstudioai/copilot
tag: latest
pullPolicy: Always
# Resource limits and requests
resources:
limits:
memory: "512Mi"
cpu: "500m"
requests:
memory: "256Mi"
cpu: "100m"
# Pod security context
podSecurityContext:
fsGroup: 1001
# Container security context
securityContext:
runAsNonRoot: true
runAsUser: 1001
# Job configuration
backoffLimit: 3
restartPolicy: OnFailure