v0.3.7: ms teams webhook, docker fixes, condition block dropdown, fixed routing with workflow block

This commit is contained in:
Waleed Latif
2025-07-21 17:20:31 -07:00
committed by GitHub
35 changed files with 1223 additions and 155 deletions

View File

@@ -91,6 +91,12 @@ docker compose -f docker-compose.prod.yml up -d
### Option 4: Manual Setup
**Requirements:**
- [Bun](https://bun.sh/) runtime
- PostgreSQL 12+ with [pgvector extension](https://github.com/pgvector/pgvector) (required for AI embeddings)
**Note:** Sim Studio uses vector embeddings for AI features like knowledge bases and semantic search, which requires the `pgvector` PostgreSQL extension.
1. Clone and install dependencies:
```bash
@@ -99,20 +105,43 @@ cd sim
bun install
```
2. Set up environment:
2. Set up PostgreSQL with pgvector:
You need PostgreSQL with the `vector` extension for embedding support. Choose one option:
**Option A: Using Docker (Recommended)**
```bash
# Start PostgreSQL with pgvector extension
docker run --name simstudio-db \
-e POSTGRES_PASSWORD=your_password \
-e POSTGRES_DB=simstudio \
-p 5432:5432 -d \
pgvector/pgvector:pg17
```
**Option B: Manual Installation**
- Install PostgreSQL 12+ and the pgvector extension
- See [pgvector installation guide](https://github.com/pgvector/pgvector#installation)
3. Set up environment:
```bash
cd apps/sim
cp .env.example .env # Configure with required variables (DATABASE_URL, BETTER_AUTH_SECRET, BETTER_AUTH_URL)
```
3. Set up the database:
Update your `.env` file with the database URL:
```bash
bunx drizzle-kit push
DATABASE_URL="postgresql://postgres:your_password@localhost:5432/simstudio"
```
4. Start the development servers:
4. Set up the database:
```bash
bunx drizzle-kit migrate
```
5. Start the development servers:
**Recommended approach - run both servers together (from project root):**

View File

@@ -1,6 +1,9 @@
import { NextResponse } from 'next/server'
import { createLogger } from '@/lib/logs/console-logger'
import { getConfluenceCloudId } from '@/tools/confluence/utils'
const logger = createLogger('ConfluencePages')
export const dynamic = 'force-dynamic'
export async function POST(request: Request) {
@@ -39,7 +42,7 @@ export async function POST(request: Request) {
const queryString = queryParams.toString()
const url = queryString ? `${baseUrl}?${queryString}` : baseUrl
console.log(`Fetching Confluence pages from: ${url}`)
logger.info(`Fetching Confluence pages from: ${url}`)
// Make the request to Confluence API with OAuth Bearer token
const response = await fetch(url, {
@@ -50,23 +53,23 @@ export async function POST(request: Request) {
},
})
console.log('Response status:', response.status, response.statusText)
logger.info('Response status:', response.status, response.statusText)
if (!response.ok) {
console.error(`Confluence API error: ${response.status} ${response.statusText}`)
logger.error(`Confluence API error: ${response.status} ${response.statusText}`)
let errorMessage
try {
const errorData = await response.json()
console.error('Error details:', JSON.stringify(errorData, null, 2))
logger.error('Error details:', JSON.stringify(errorData, null, 2))
errorMessage = errorData.message || `Failed to fetch Confluence pages (${response.status})`
} catch (e) {
console.error('Could not parse error response as JSON:', e)
logger.error('Could not parse error response as JSON:', e)
// Try to get the response text for more context
try {
const text = await response.text()
console.error('Response text:', text)
logger.error('Response text:', text)
errorMessage = `Failed to fetch Confluence pages: ${response.status} ${response.statusText}`
} catch (_textError) {
errorMessage = `Failed to fetch Confluence pages: ${response.status} ${response.statusText}`
@@ -77,13 +80,13 @@ export async function POST(request: Request) {
}
const data = await response.json()
console.log('Confluence API response:', `${JSON.stringify(data, null, 2).substring(0, 300)}...`)
console.log(`Found ${data.results?.length || 0} pages`)
logger.info('Confluence API response:', `${JSON.stringify(data, null, 2).substring(0, 300)}...`)
logger.info(`Found ${data.results?.length || 0} pages`)
if (data.results && data.results.length > 0) {
console.log('First few pages:')
logger.info('First few pages:')
for (const page of data.results.slice(0, 3)) {
console.log(`- ${page.id}: ${page.title}`)
logger.info(`- ${page.id}: ${page.title}`)
}
}
@@ -99,7 +102,7 @@ export async function POST(request: Request) {
})),
})
} catch (error) {
console.error('Error fetching Confluence pages:', error)
logger.error('Error fetching Confluence pages:', error)
return NextResponse.json(
{ error: (error as Error).message || 'Internal server error' },
{ status: 500 }

View File

@@ -465,6 +465,58 @@ export async function GET(request: NextRequest) {
})
}
case 'microsoftteams': {
const hmacSecret = providerConfig.hmacSecret
if (!hmacSecret) {
logger.warn(`[${requestId}] Microsoft Teams webhook missing HMAC secret: ${webhookId}`)
return NextResponse.json(
{ success: false, error: 'Microsoft Teams webhook requires HMAC secret' },
{ status: 400 }
)
}
logger.info(`[${requestId}] Microsoft Teams webhook test successful: ${webhookId}`)
return NextResponse.json({
success: true,
webhook: {
id: foundWebhook.id,
url: webhookUrl,
isActive: foundWebhook.isActive,
},
message: 'Microsoft Teams outgoing webhook configuration is valid.',
setup: {
url: webhookUrl,
hmacSecretConfigured: !!hmacSecret,
instructions: [
'Create an outgoing webhook in Microsoft Teams',
'Set the callback URL to the webhook URL above',
'Copy the HMAC security token to the configuration',
'Users can trigger the webhook by @mentioning it in Teams',
],
},
test: {
curlCommand: `curl -X POST "${webhookUrl}" \\
-H "Content-Type: application/json" \\
-H "Authorization: HMAC <signature>" \\
-d '{"type":"message","text":"Hello from Microsoft Teams!","from":{"id":"test","name":"Test User"}}'`,
samplePayload: {
type: 'message',
id: '1234567890',
timestamp: new Date().toISOString(),
text: 'Hello Sim Studio Bot!',
from: {
id: '29:1234567890abcdef',
name: 'Test User',
},
conversation: {
id: '19:meeting_abcdef@thread.v2',
},
},
},
})
}
default: {
// Generic webhook test
logger.info(`[${requestId}] Generic webhook test successful: ${webhookId}`)

View File

@@ -11,6 +11,7 @@ import {
processGenericDeduplication,
processWebhook,
processWhatsAppDeduplication,
validateMicrosoftTeamsSignature,
} from '@/lib/webhooks/utils'
import { loadWorkflowFromNormalizedTables } from '@/lib/workflows/db-helpers'
import { db } from '@/db'
@@ -243,6 +244,51 @@ export async function POST(
return slackChallengeResponse
}
// Handle Microsoft Teams outgoing webhook signature verification (must be done before timeout)
if (foundWebhook.provider === 'microsoftteams') {
const providerConfig = (foundWebhook.providerConfig as Record<string, any>) || {}
if (providerConfig.hmacSecret) {
const authHeader = request.headers.get('authorization')
if (!authHeader || !authHeader.startsWith('HMAC ')) {
logger.warn(
`[${requestId}] Microsoft Teams outgoing webhook missing HMAC authorization header`
)
return new NextResponse('Unauthorized - Missing HMAC signature', { status: 401 })
}
// Get the raw body for HMAC verification
const rawBody = await request.text()
const isValidSignature = validateMicrosoftTeamsSignature(
providerConfig.hmacSecret,
authHeader,
rawBody
)
if (!isValidSignature) {
logger.warn(`[${requestId}] Microsoft Teams HMAC signature verification failed`)
return new NextResponse('Unauthorized - Invalid HMAC signature', { status: 401 })
}
logger.debug(`[${requestId}] Microsoft Teams HMAC signature verified successfully`)
// Parse the body again since we consumed it for verification
try {
body = JSON.parse(rawBody)
} catch (parseError) {
logger.error(
`[${requestId}] Failed to parse Microsoft Teams webhook body after verification`,
{
error: parseError instanceof Error ? parseError.message : String(parseError),
}
)
return new NextResponse('Invalid JSON payload', { status: 400 })
}
}
}
// Skip processing if another instance is already handling this request
if (!hasExecutionLock) {
logger.info(`[${requestId}] Skipping execution as lock was not acquired`)

View File

@@ -2,9 +2,12 @@ import crypto from 'crypto'
import { and, desc, eq, isNull } from 'drizzle-orm'
import { NextResponse } from 'next/server'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console-logger'
import { db } from '@/db'
import { permissions, workflow, workflowBlocks, workspace } from '@/db/schema'
const logger = createLogger('Workspaces')
// Get all workspaces for the current user
export async function GET() {
const session = await getSession()
@@ -244,12 +247,12 @@ async function createWorkspace(userId: string, name: string) {
updatedAt: now,
})
console.log(
`Created workspace ${workspaceId} with initial workflow ${workflowId} for user ${userId}`
logger.info(
`Created workspace ${workspaceId} with initial workflow ${workflowId} for user ${userId}`
)
})
} catch (error) {
console.error(`Failed to create workspace ${workspaceId} with initial workflow:`, error)
logger.error(`Failed to create workspace ${workspaceId} with initial workflow:`, error)
throw error
}
@@ -276,7 +279,7 @@ async function migrateExistingWorkflows(userId: string, workspaceId: string) {
return // No orphaned workflows to migrate
}
console.log(
logger.info(
`Migrating ${orphanedWorkflows.length} workflows to workspace ${workspaceId} for user ${userId}`
)
@@ -308,6 +311,6 @@ async function ensureWorkflowsHaveWorkspace(userId: string, defaultWorkspaceId:
})
.where(and(eq(workflow.userId, userId), isNull(workflow.workspaceId)))
console.log(`Fixed ${orphanedWorkflows.length} orphaned workflows for user ${userId}`)
logger.info(`Fixed ${orphanedWorkflows.length} orphaned workflows for user ${userId}`)
}
}

View File

@@ -1,11 +1,14 @@
import { and, eq } from 'drizzle-orm'
import { notFound } from 'next/navigation'
import { getSession } from '@/lib/auth'
import { createLogger } from '@/lib/logs/console-logger'
import { db } from '@/db'
import { templateStars, templates } from '@/db/schema'
import type { Template } from '../templates'
import TemplateDetails from './template'
const logger = createLogger('TemplatePage')
interface TemplatePageProps {
params: Promise<{
workspaceId: string
@@ -58,7 +61,7 @@ export default async function TemplatePage({ params }: TemplatePageProps) {
// Validate that required fields are present
if (!template.id || !template.name || !template.author) {
console.error('Template missing required fields:', {
logger.error('Template missing required fields:', {
id: template.id,
name: template.name,
author: template.author,
@@ -100,9 +103,9 @@ export default async function TemplatePage({ params }: TemplatePageProps) {
isStarred,
}
console.log('Template from DB:', template)
console.log('Serialized template:', serializedTemplate)
console.log('Template state from DB:', template.state)
logger.info('Template from DB:', template)
logger.info('Serialized template:', serializedTemplate)
logger.info('Template state from DB:', template.state)
return (
<TemplateDetails

View File

@@ -143,7 +143,7 @@ export default function TemplateDetails({
const renderWorkflowPreview = () => {
// Follow the same pattern as deployed-workflow-card.tsx
if (!template?.state) {
console.log('Template has no state:', template)
logger.info('Template has no state:', template)
return (
<div className='flex h-full items-center justify-center text-center'>
<div className='text-muted-foreground'>
@@ -154,10 +154,10 @@ export default function TemplateDetails({
)
}
console.log('Template state:', template.state)
console.log('Template state type:', typeof template.state)
console.log('Template state blocks:', template.state.blocks)
console.log('Template state edges:', template.state.edges)
logger.info('Template state:', template.state)
logger.info('Template state type:', typeof template.state)
logger.info('Template state blocks:', template.state.blocks)
logger.info('Template state edges:', template.state.edges)
try {
return (

View File

@@ -92,7 +92,7 @@ export default function Templates({ initialTemplates, currentUserId }: Templates
const handleCreateNew = () => {
// TODO: Open create template modal or navigate to create page
console.log('Create new template')
logger.info('Create new template')
}
// Handle star change callback from template card

View File

@@ -17,11 +17,14 @@ import {
import { Input } from '@/components/ui/input'
import { ScrollArea } from '@/components/ui/scroll-area'
import { Tooltip, TooltipContent, TooltipTrigger } from '@/components/ui/tooltip'
import { createLogger } from '@/lib/logs/console-logger'
import { validateName } from '@/lib/utils'
import { useVariablesStore } from '@/stores/panel/variables/store'
import type { Variable, VariableType } from '@/stores/panel/variables/types'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
const logger = createLogger('Variables')
export function Variables() {
const { activeWorkflowId, workflows } = useWorkflowRegistry()
const {
@@ -190,7 +193,7 @@ export function Variables() {
return undefined // Valid object
} catch (e) {
console.log('Object parsing error:', e)
logger.info('Object parsing error:', e)
return 'Invalid object syntax'
}
case 'array':
@@ -215,7 +218,7 @@ export function Variables() {
return undefined // Valid array
} catch (e) {
console.log('Array parsing error:', e)
logger.info('Array parsing error:', e)
return 'Invalid array syntax'
}
default:

View File

@@ -231,46 +231,6 @@ export function ConditionInput({
}
}, [])
// Update block value with trigger checks - handle both tag and env var triggers consistently
const updateBlockValue = (
blockId: string,
newValue: string,
textarea: HTMLTextAreaElement | null
) => {
if (isPreview || disabled) return
try {
setConditionalBlocks((blocks) =>
blocks.map((block) => {
if (block.id === blockId) {
const pos = textarea?.selectionStart ?? 0
const tagTrigger = checkTagTrigger(newValue, pos)
const envVarTrigger = checkEnvVarTrigger(newValue, pos)
// Check triggers for both tags and env vars
const lastCharTyped = newValue.charAt(pos - 1)
const shouldShowTags = tagTrigger.show || lastCharTyped === '<'
const shouldShowEnvVars = envVarTrigger.show || lastCharTyped === '$'
return {
...block,
value: newValue,
showTags: shouldShowTags,
showEnvVars: shouldShowEnvVars,
searchTerm: shouldShowEnvVars ? envVarTrigger.searchTerm : '',
cursorPosition: pos,
// Maintain activeSourceBlockId only when tags are showing
activeSourceBlockId: shouldShowTags ? block.activeSourceBlockId : null,
}
}
return block
})
)
} catch (error) {
logger.error('Error updating block value:', { error, blockId, newValue })
}
}
// Update the line counting logic to be block-specific
useEffect(() => {
if (!editorRef.current || conditionalBlocks.length === 0) return
@@ -541,9 +501,6 @@ export function ConditionInput({
})
}, [conditionalBlocks.length])
// Use preview value when in preview mode, otherwise use store value
const value = isPreview ? previewValue : storeValue
// Show loading or empty state if not ready or no blocks
if (!isReady || conditionalBlocks.length === 0) {
return (
@@ -698,11 +655,33 @@ export function ConditionInput({
<Editor
value={block.value}
onValueChange={(newCode) => {
if (!isPreview) {
if (!isPreview && !disabled) {
const textarea = editorRef.current?.querySelector(
`[data-block-id="${block.id}"] textarea`
)
updateBlockValue(block.id, newCode, textarea as HTMLTextAreaElement | null)
) as HTMLTextAreaElement | null
if (textarea) {
const pos = textarea.selectionStart ?? 0
const tagTrigger = checkTagTrigger(newCode, pos)
const envVarTrigger = checkEnvVarTrigger(newCode, pos)
setConditionalBlocks((blocks) =>
blocks.map((b) => {
if (b.id === block.id) {
return {
...b,
value: newCode,
showTags: tagTrigger.show,
showEnvVars: envVarTrigger.show,
searchTerm: envVarTrigger.show ? envVarTrigger.searchTerm : '',
cursorPosition: pos,
activeSourceBlockId: tagTrigger.show ? b.activeSourceBlockId : null,
}
}
return b
})
)
}
}
}}
onKeyDown={(e) => {

View File

@@ -13,6 +13,7 @@ import {
CommandList,
} from '@/components/ui/command'
import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover'
import { createLogger } from '@/lib/logs/console-logger'
import {
type Credential,
getProviderIdFromServiceId,
@@ -21,6 +22,8 @@ import {
} from '@/lib/oauth'
import { OAuthRequiredModal } from '../../credential-selector/components/oauth-required-modal'
const logger = createLogger('ConfluenceFileSelector')
export interface ConfluenceFileInfo {
id: string
name: string
@@ -138,7 +141,7 @@ export function ConfluenceFileSelector({
}
}
} catch (error) {
console.error('Error fetching credentials:', error)
logger.error('Error fetching credentials:', error)
} finally {
setIsLoading(false)
}
@@ -205,7 +208,7 @@ export function ConfluenceFileSelector({
onFileInfoChange?.(data.file)
}
} catch (error) {
console.error('Error fetching page info:', error)
logger.error('Error fetching page info:', error)
setError((error as Error).message)
} finally {
setIsLoading(false)
@@ -247,7 +250,7 @@ export function ConfluenceFileSelector({
if (!tokenResponse.ok) {
const errorData = await tokenResponse.json()
console.error('Access token error:', errorData)
logger.error('Access token error:', errorData)
// If there's a token error, we might need to reconnect the account
setError('Authentication failed. Please reconnect your Confluence account.')
@@ -259,7 +262,7 @@ export function ConfluenceFileSelector({
const accessToken = tokenData.accessToken
if (!accessToken) {
console.error('No access token returned')
logger.error('No access token returned')
setError('Authentication failed. Please reconnect your Confluence account.')
setIsLoading(false)
return
@@ -281,12 +284,12 @@ export function ConfluenceFileSelector({
if (!response.ok) {
const errorData = await response.json()
console.error('Confluence API error:', errorData)
logger.error('Confluence API error:', errorData)
throw new Error(errorData.error || 'Failed to fetch pages')
}
const data = await response.json()
console.log(`Received ${data.files?.length || 0} files from API`)
logger.info(`Received ${data.files?.length || 0} files from API`)
setFiles(data.files || [])
// If we have a selected file ID, find the file info
@@ -301,7 +304,7 @@ export function ConfluenceFileSelector({
}
}
} catch (error) {
console.error('Error fetching pages:', error)
logger.error('Error fetching pages:', error)
setError((error as Error).message)
setFiles([])
} finally {

View File

@@ -1,4 +1,5 @@
import { useCallback, useEffect, useState } from 'react'
import { logger } from '@trigger.dev/sdk/v3'
import { PlusIcon, WrenchIcon, XIcon } from 'lucide-react'
import { Button } from '@/components/ui/button'
import { Popover, PopoverContent, PopoverTrigger } from '@/components/ui/popover'
@@ -684,34 +685,25 @@ export function ToolInput({
}
const handleOperationChange = (toolIndex: number, operation: string) => {
console.log('🔄 handleOperationChange called:', { toolIndex, operation, isPreview, disabled })
if (isPreview || disabled) {
console.log('❌ Early return: preview or disabled')
logger.info('❌ Early return: preview or disabled')
return
}
const tool = selectedTools[toolIndex]
console.log('🔧 Current tool:', tool)
const newToolId = getToolIdForOperation(tool.type, operation)
console.log('🆔 getToolIdForOperation result:', { toolType: tool.type, operation, newToolId })
if (!newToolId) {
console.log('❌ Early return: no newToolId')
logger.info('❌ Early return: no newToolId')
return
}
// Get parameters for the new tool
const toolParams = getToolParametersConfig(newToolId, tool.type)
console.log('📋 getToolParametersConfig result:', {
newToolId,
toolType: tool.type,
toolParams,
})
if (!toolParams) {
console.log('❌ Early return: no toolParams')
logger.info('❌ Early return: no toolParams')
return
}

View File

@@ -0,0 +1,130 @@
import { Shield, Terminal } from 'lucide-react'
import { Alert, AlertDescription, AlertTitle } from '@/components/ui/alert'
import { CodeBlock } from '@/components/ui/code-block'
import { Input } from '@/components/ui/input'
import { ConfigField } from '../ui/config-field'
import { ConfigSection } from '../ui/config-section'
import { InstructionsSection } from '../ui/instructions-section'
import { TestResultDisplay } from '../ui/test-result'
interface MicrosoftTeamsConfigProps {
hmacSecret: string
setHmacSecret: (secret: string) => void
isLoadingToken: boolean
testResult: {
success: boolean
message?: string
test?: any
} | null
copied: string | null
copyToClipboard: (text: string, type: string) => void
testWebhook: () => Promise<void>
}
const teamsWebhookExample = JSON.stringify(
{
type: 'message',
id: '1234567890',
timestamp: '2023-01-01T00:00:00.000Z',
localTimestamp: '2023-01-01T00:00:00.000Z',
serviceUrl: 'https://smba.trafficmanager.net/amer/',
channelId: 'msteams',
from: {
id: '29:1234567890abcdef',
name: 'John Doe',
},
conversation: {
id: '19:meeting_abcdef@thread.v2',
},
text: 'Hello Sim Studio Bot!',
},
null,
2
)
export function MicrosoftTeamsConfig({
hmacSecret,
setHmacSecret,
isLoadingToken,
testResult,
copied,
copyToClipboard,
testWebhook,
}: MicrosoftTeamsConfigProps) {
return (
<div className='space-y-4'>
<ConfigSection title='Microsoft Teams Configuration'>
<ConfigField
id='teams-hmac-secret'
label='HMAC Secret'
description='The security token provided by Teams when creating an outgoing webhook. Used to verify request authenticity.'
>
<Input
id='teams-hmac-secret'
value={hmacSecret}
onChange={(e) => setHmacSecret(e.target.value)}
placeholder='Enter HMAC secret from Teams'
disabled={isLoadingToken}
type='password'
/>
</ConfigField>
</ConfigSection>
<TestResultDisplay
testResult={testResult}
copied={copied}
copyToClipboard={copyToClipboard}
showCurlCommand={true}
/>
<InstructionsSection
title='Setting up Outgoing Webhook in Microsoft Teams'
tip='Create an outgoing webhook in Teams to receive messages from Teams in Sim Studio.'
>
<ol className='list-inside list-decimal space-y-1'>
<li>Open Microsoft Teams and go to the team where you want to add the webhook.</li>
<li>Click the three dots () next to the team name and select "Manage team".</li>
<li>Go to the "Apps" tab and click "Create an outgoing webhook".</li>
<li>Provide a name, description, and optionally a profile picture.</li>
<li>Set the callback URL to your Sim Studio webhook URL (shown above).</li>
<li>Copy the HMAC security token and paste it into the "HMAC Secret" field above.</li>
<li>Click "Create" to finish setup.</li>
</ol>
</InstructionsSection>
<InstructionsSection title='Receiving Messages from Teams'>
<p>
When users mention your webhook in Teams (using @mention), Teams will send a POST request
to your Sim Studio webhook URL with a payload like this:
</p>
<CodeBlock language='json' code={teamsWebhookExample} className='mt-2 text-sm' />
<ul className='mt-3 list-outside list-disc space-y-1 pl-4'>
<li>Messages are triggered by @mentioning the webhook name in Teams.</li>
<li>Requests include HMAC signature for authentication.</li>
<li>You have 5 seconds to respond to the webhook request.</li>
</ul>
</InstructionsSection>
<Alert>
<Shield className='h-4 w-4' />
<AlertTitle>Security</AlertTitle>
<AlertDescription>
The HMAC secret is used to verify that requests are actually coming from Microsoft Teams.
Keep it secure and never share it publicly.
</AlertDescription>
</Alert>
<Alert>
<Terminal className='h-4 w-4' />
<AlertTitle>Requirements</AlertTitle>
<AlertDescription>
<ul className='mt-1 list-outside list-disc space-y-1 pl-4'>
<li>Your Sim Studio webhook URL must use HTTPS and be publicly accessible.</li>
<li>Self-signed SSL certificates are not supported by Microsoft Teams.</li>
<li>For local testing, use a tunneling service like ngrok or Cloudflare Tunnel.</li>
</ul>
</AlertDescription>
</Alert>
</div>
)
}

View File

@@ -15,6 +15,7 @@ import { DiscordConfig } from './providers/discord'
import { GenericConfig } from './providers/generic'
import { GithubConfig } from './providers/github'
import { GmailConfig } from './providers/gmail'
import { MicrosoftTeamsConfig } from './providers/microsoftteams'
import { SlackConfig } from './providers/slack'
import { StripeConfig } from './providers/stripe'
import { TelegramConfig } from './providers/telegram'
@@ -79,6 +80,8 @@ export function WebhookModal({
const [discordAvatarUrl, setDiscordAvatarUrl] = useState('')
const [slackSigningSecret, setSlackSigningSecret] = useState('')
const [telegramBotToken, setTelegramBotToken] = useState('')
// Microsoft Teams-specific state
const [microsoftTeamsHmacSecret, setMicrosoftTeamsHmacSecret] = useState('')
// Airtable-specific state
const [airtableWebhookSecret, _setAirtableWebhookSecret] = useState('')
const [airtableBaseId, setAirtableBaseId] = useState('')
@@ -103,6 +106,7 @@ export function WebhookModal({
airtableTableId: '',
airtableIncludeCellValues: false,
telegramBotToken: '',
microsoftTeamsHmacSecret: '',
selectedLabels: ['INBOX'] as string[],
labelFilterBehavior: 'INCLUDE',
markAsRead: false,
@@ -259,6 +263,15 @@ export function WebhookModal({
includeRawEmail: config.includeRawEmail,
}))
}
} else if (webhookProvider === 'microsoftteams') {
const hmacSecret = config.hmacSecret || ''
setMicrosoftTeamsHmacSecret(hmacSecret)
setOriginalValues((prev) => ({
...prev,
microsoftTeamsHmacSecret: hmacSecret,
}))
}
}
}
@@ -303,7 +316,9 @@ export function WebhookModal({
!originalValues.selectedLabels.every((label) => selectedLabels.includes(label)) ||
labelFilterBehavior !== originalValues.labelFilterBehavior ||
markAsRead !== originalValues.markAsRead ||
includeRawEmail !== originalValues.includeRawEmail))
includeRawEmail !== originalValues.includeRawEmail)) ||
(webhookProvider === 'microsoftteams' &&
microsoftTeamsHmacSecret !== originalValues.microsoftTeamsHmacSecret)
setHasUnsavedChanges(hasChanges)
}, [
@@ -327,6 +342,7 @@ export function WebhookModal({
labelFilterBehavior,
markAsRead,
includeRawEmail,
microsoftTeamsHmacSecret,
])
// Validate required fields for current provider
@@ -354,6 +370,9 @@ export function WebhookModal({
case 'gmail':
isValid = selectedLabels.length > 0
break
case 'microsoftteams':
isValid = microsoftTeamsHmacSecret.trim() !== ''
break
}
setIsCurrentConfigValid(isValid)
}, [
@@ -364,6 +383,7 @@ export function WebhookModal({
whatsappVerificationToken,
telegramBotToken,
selectedLabels,
microsoftTeamsHmacSecret,
])
// Use the provided path or generate a UUID-based path
@@ -433,6 +453,10 @@ export function WebhookModal({
return {
botToken: telegramBotToken || undefined,
}
case 'microsoftteams':
return {
hmacSecret: microsoftTeamsHmacSecret,
}
default:
return {}
}
@@ -482,6 +506,7 @@ export function WebhookModal({
airtableTableId,
airtableIncludeCellValues,
telegramBotToken,
microsoftTeamsHmacSecret,
selectedLabels,
labelFilterBehavior,
markAsRead,
@@ -727,6 +752,18 @@ export function WebhookModal({
webhookUrl={webhookUrl}
/>
)
case 'microsoftteams':
return (
<MicrosoftTeamsConfig
hmacSecret={microsoftTeamsHmacSecret}
setHmacSecret={setMicrosoftTeamsHmacSecret}
isLoadingToken={isLoadingToken}
testResult={testResult}
copied={copied}
copyToClipboard={copyToClipboard}
testWebhook={testWebhook}
/>
)
default:
return (
<GenericConfig

View File

@@ -6,6 +6,7 @@ import {
DiscordIcon,
GithubIcon,
GmailIcon,
MicrosoftTeamsIcon,
SlackIcon,
StripeIcon,
TelegramIcon,
@@ -85,6 +86,10 @@ export interface TelegramConfig {
botToken?: string
}
export interface MicrosoftTeamsConfig {
hmacSecret: string
}
// Union type for all provider configurations
export type ProviderConfig =
| WhatsAppConfig
@@ -96,6 +101,7 @@ export type ProviderConfig =
| AirtableWebhookConfig
| TelegramConfig
| GmailConfig
| MicrosoftTeamsConfig
| Record<string, never>
// Define available webhook providers
@@ -280,6 +286,20 @@ export const WEBHOOK_PROVIDERS: { [key: string]: WebhookProvider } = {
},
},
},
microsoftteams: {
id: 'microsoftteams',
name: 'Microsoft Teams',
icon: (props) => <MicrosoftTeamsIcon {...props} />,
configFields: {
hmacSecret: {
type: 'string',
label: 'HMAC Secret',
placeholder: 'Enter HMAC secret from Teams outgoing webhook',
description:
'The security token provided by Teams when creating an outgoing webhook. Used to verify request authenticity.',
},
},
},
}
interface WebhookConfigProps {

View File

@@ -605,9 +605,9 @@ export function useWorkflowExecution() {
}
try {
console.log('Executing debug step with blocks:', pendingBlocks)
logger.info('Executing debug step with blocks:', pendingBlocks)
const result = await executor!.continueExecution(pendingBlocks, debugContext!)
console.log('Debug step execution result:', result)
logger.info('Debug step execution result:', result)
if (isDebugSessionComplete(result)) {
await handleDebugSessionComplete(result)
@@ -660,7 +660,7 @@ export function useWorkflowExecution() {
let currentContext = { ...debugContext! }
let currentPendingBlocks = [...pendingBlocks]
console.log('Starting resume execution with blocks:', currentPendingBlocks)
logger.info('Starting resume execution with blocks:', currentPendingBlocks)
// Continue execution until there are no more pending blocks
let iterationCount = 0

View File

@@ -402,7 +402,6 @@ const WorkflowContent = React.memo(() => {
}
const { type } = event.detail
console.log('🛠️ Adding block from toolbar:', type)
if (!type) return
if (type === 'connectionBlock') return

View File

@@ -4,12 +4,15 @@ import { useCallback, useEffect, useMemo, useState } from 'react'
import clsx from 'clsx'
import { useParams, usePathname } from 'next/navigation'
import { Skeleton } from '@/components/ui/skeleton'
import { createLogger } from '@/lib/logs/console-logger'
import { type FolderTreeNode, useFolderStore } from '@/stores/folders/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import type { WorkflowMetadata } from '@/stores/workflows/registry/types'
import { FolderItem } from './components/folder-item'
import { WorkflowItem } from './components/workflow-item'
const logger = createLogger('FolderTree')
interface FolderSectionProps {
folder: FolderTreeNode
level: number
@@ -282,9 +285,9 @@ function useDragHandlers(
for (const workflowId of workflowIds) {
await updateWorkflow(workflowId, { folderId: targetFolderId })
}
console.log(logMessage || `Moved ${workflowIds.length} workflow(s)`)
logger.info(logMessage || `Moved ${workflowIds.length} workflow(s)`)
} catch (error) {
console.error('Failed to move workflows:', error)
logger.error('Failed to move workflows:', error)
}
}
@@ -298,7 +301,7 @@ function useDragHandlers(
// Prevent circular references - don't allow dropping a folder into itself or its descendants
if (targetFolderId === folderIdData) {
console.log('Cannot move folder into itself')
logger.info('Cannot move folder into itself')
return
}
@@ -308,21 +311,21 @@ function useDragHandlers(
targetFolderId &&
draggedFolderPath.some((ancestor) => ancestor.id === targetFolderId)
) {
console.log('Cannot move folder into its own descendant')
logger.info('Cannot move folder into its own descendant')
return
}
// If target folder is already at level 1 (has 1 parent), we can't nest another folder
if (targetFolderPath.length >= 1) {
console.log('Cannot nest folder: Maximum 2 levels of nesting allowed. Drop prevented.')
logger.info('Cannot nest folder: Maximum 2 levels of nesting allowed. Drop prevented.')
return // Prevent the drop entirely
}
// Target folder is at root level, safe to nest
await updateFolder(folderIdData, { parentId: targetFolderId })
console.log(`Moved folder to ${targetFolderId ? `folder ${targetFolderId}` : 'root'}`)
logger.info(`Moved folder to ${targetFolderId ? `folder ${targetFolderId}` : 'root'}`)
} catch (error) {
console.error('Failed to move folder:', error)
logger.error('Failed to move folder:', error)
}
}
}
@@ -416,9 +419,9 @@ export function FolderTree({
for (const folder of deepFolders) {
try {
await updateFolderAPI(folder.id, { parentId: null })
console.log(`Moved deeply nested folder "${folder.name}" to root level`)
logger.info(`Moved deeply nested folder "${folder.name}" to root level`)
} catch (error) {
console.error(`Failed to move folder "${folder.name}":`, error)
logger.error(`Failed to move folder "${folder.name}":`, error)
}
}
}, [workspaceId])

View File

@@ -3,6 +3,7 @@ import {
DiscordIcon,
GithubIcon,
GmailIcon,
MicrosoftTeamsIcon,
SignalIcon,
SlackIcon,
StripeIcon,
@@ -23,6 +24,7 @@ const getWebhookProviderIcon = (provider: string) => {
github: GithubIcon,
discord: DiscordIcon,
stripe: StripeIcon,
microsoftteams: MicrosoftTeamsIcon,
}
return iconMap[provider.toLowerCase()]
@@ -52,6 +54,7 @@ export const WebhookBlock: BlockConfig = {
'github',
'discord',
'stripe',
'microsoftteams',
].map((provider) => {
const providerLabels = {
slack: 'Slack',
@@ -63,6 +66,7 @@ export const WebhookBlock: BlockConfig = {
github: 'GitHub',
discord: 'Discord',
stripe: 'Stripe',
microsoftteams: 'Microsoft Teams',
}
const icon = getWebhookProviderIcon(provider)

View File

@@ -805,7 +805,7 @@ describe('Executor', () => {
executedBlocks,
mockContext
)
expect(nonSelectedResult).toBe(true) // router executed + target NOT selected = dependency auto-met
expect(nonSelectedResult).toBe(false) // router executed + target NOT selected = dependency NOT met
})
test('should handle condition decisions correctly in dependency checking', () => {
@@ -837,7 +837,7 @@ describe('Executor', () => {
{ source: 'condition1', target: 'falseTarget', sourceHandle: 'condition-false' },
]
const falseResult = checkDependencies(falseConnections, executedBlocks, mockContext)
expect(falseResult).toBe(true) // condition executed + path NOT selected = dependency auto-met
expect(falseResult).toBe(false) // condition executed + path NOT selected = dependency NOT met
})
test('should handle regular sequential dependencies correctly', () => {

View File

@@ -1123,9 +1123,9 @@ export class Executor {
const conditionId = conn.sourceHandle.replace('condition-', '')
const selectedCondition = context.decisions.condition.get(conn.source)
// If source is executed and this is not the selected path, consider it met
// If source is executed and this is not the selected path, dependency is NOT met
if (sourceExecuted && selectedCondition && conditionId !== selectedCondition) {
return true
return false
}
// Otherwise, this dependency is met only if source is executed and this is the selected path
@@ -1137,9 +1137,9 @@ export class Executor {
if (sourceBlock?.metadata?.id === BlockType.ROUTER) {
const selectedTarget = context.decisions.router.get(conn.source)
// If source is executed and this is not the selected target, consider it met
// If source is executed and this is not the selected target, dependency is NOT met
if (sourceExecuted && selectedTarget && conn.target !== selectedTarget) {
return true
return false
}
// Otherwise, this dependency is met only if source is executed and this is the selected target

View File

@@ -7,6 +7,7 @@ describe('Routing', () => {
it.concurrent('should categorize flow control blocks correctly', () => {
expect(Routing.getCategory(BlockType.PARALLEL)).toBe(BlockCategory.FLOW_CONTROL)
expect(Routing.getCategory(BlockType.LOOP)).toBe(BlockCategory.FLOW_CONTROL)
expect(Routing.getCategory(BlockType.WORKFLOW)).toBe(BlockCategory.FLOW_CONTROL)
})
it.concurrent('should categorize routing blocks correctly', () => {
@@ -19,6 +20,8 @@ describe('Routing', () => {
expect(Routing.getCategory(BlockType.AGENT)).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory(BlockType.API)).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory(BlockType.STARTER)).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory(BlockType.RESPONSE)).toBe(BlockCategory.REGULAR_BLOCK)
expect(Routing.getCategory(BlockType.EVALUATOR)).toBe(BlockCategory.REGULAR_BLOCK)
})
it.concurrent('should default to regular block for unknown types', () => {
@@ -36,6 +39,7 @@ describe('Routing', () => {
it.concurrent('should return false for flow control blocks', () => {
expect(Routing.shouldActivateDownstream(BlockType.PARALLEL)).toBe(false)
expect(Routing.shouldActivateDownstream(BlockType.LOOP)).toBe(false)
expect(Routing.shouldActivateDownstream(BlockType.WORKFLOW)).toBe(false)
})
it.concurrent('should return true for regular blocks', () => {
@@ -53,6 +57,7 @@ describe('Routing', () => {
it.concurrent('should return true for flow control blocks', () => {
expect(Routing.requiresActivePathCheck(BlockType.PARALLEL)).toBe(true)
expect(Routing.requiresActivePathCheck(BlockType.LOOP)).toBe(true)
expect(Routing.requiresActivePathCheck(BlockType.WORKFLOW)).toBe(true)
})
it.concurrent('should return false for routing blocks', () => {
@@ -75,6 +80,7 @@ describe('Routing', () => {
it.concurrent('should return true for flow control blocks', () => {
expect(Routing.shouldSkipInSelectiveActivation(BlockType.PARALLEL)).toBe(true)
expect(Routing.shouldSkipInSelectiveActivation(BlockType.LOOP)).toBe(true)
expect(Routing.shouldSkipInSelectiveActivation(BlockType.WORKFLOW)).toBe(true)
})
it.concurrent('should return false for routing blocks', () => {

View File

@@ -7,31 +7,70 @@ export enum BlockCategory {
}
export interface RoutingBehavior {
shouldActivateDownstream: boolean
requiresActivePathCheck: boolean
skipInSelectiveActivation: boolean
shouldActivateDownstream: boolean // Whether this block should activate downstream blocks when it completes
requiresActivePathCheck: boolean // Whether this block's handler needs routing-aware logic (NOT universal path checking)
skipInSelectiveActivation: boolean // Whether to skip this block type during connection filtering in selective activation
}
/**
* Centralized routing strategy that defines how different block types
* should behave in the execution path system.
*
* IMPORTANT: This system works in conjunction with the executor's universal
* active path checking (executor/index.ts lines 992-994). The flags here
* control specialized behavior, not basic path enforcement.
*
* ## Execution Flow Architecture:
*
* 1. **Universal Path Check** (Executor Level):
* - ALL blocks are subject to `context.activeExecutionPath.has(block.id)`
* - This prevents unselected blocks from executing (fixes router bypass bug)
*
* 2. **Specialized Routing Behavior** (Handler Level):
* - Some block handlers need additional routing logic
* - Controlled by `requiresActivePathCheck` flag
*
* ## Block Categories Explained:
*
* ### ROUTING_BLOCK (Router, Condition)
* - **Role**: Decision makers that CREATE active execution paths
* - **Path Check**: NO - they must execute to make routing decisions
* - **Downstream**: YES - they activate their selected targets
* - **Selective**: NO - they participate in making routing decisions
*
* ### FLOW_CONTROL (Parallel, Loop, Workflow)
* - **Role**: Complex blocks that CONSUME routing decisions
* - **Path Check**: YES - their handlers need routing awareness for internal logic
* - **Downstream**: NO - they manage their own internal activation patterns
* - **Selective**: YES - skip them during connection filtering to prevent premature activation
*
* ### REGULAR_BLOCK (Function, Agent, API, etc.)
* - **Role**: Standard execution blocks with simple activation patterns
* - **Path Check**: NO - they rely on dependency logic and universal path checking
* - **Downstream**: YES - they activate all downstream blocks normally
* - **Selective**: NO - they participate in normal activation patterns
*
* ## Multi-Input Support:
* The dependency checking logic (executor/index.ts lines 1149-1153) allows blocks
* with multiple inputs to execute when ANY valid input is available, supporting
* scenarios like agents that reference multiple router destinations.
*/
export class Routing {
private static readonly BEHAVIOR_MAP: Record<BlockCategory, RoutingBehavior> = {
[BlockCategory.ROUTING_BLOCK]: {
shouldActivateDownstream: true,
requiresActivePathCheck: false,
skipInSelectiveActivation: false,
shouldActivateDownstream: true, // Routing blocks activate their SELECTED targets (not all connected targets)
requiresActivePathCheck: false, // They don't need handler-level path checking - they CREATE the paths
skipInSelectiveActivation: false, // They participate in routing decisions, so don't skip during activation
},
[BlockCategory.FLOW_CONTROL]: {
shouldActivateDownstream: false,
requiresActivePathCheck: true,
skipInSelectiveActivation: true,
shouldActivateDownstream: false, // Flow control blocks manage their own complex internal activation
requiresActivePathCheck: true, // Their handlers need routing context for internal decision making
skipInSelectiveActivation: true, // Skip during selective activation to prevent bypassing routing decisions
},
[BlockCategory.REGULAR_BLOCK]: {
shouldActivateDownstream: true,
requiresActivePathCheck: false,
skipInSelectiveActivation: false,
shouldActivateDownstream: true, // Regular blocks activate all connected downstream blocks
requiresActivePathCheck: false, // They use universal path checking + dependency logic instead
skipInSelectiveActivation: false, // They participate in normal activation patterns
},
}
@@ -39,6 +78,7 @@ export class Routing {
// Flow control blocks
[BlockType.PARALLEL]: BlockCategory.FLOW_CONTROL,
[BlockType.LOOP]: BlockCategory.FLOW_CONTROL,
[BlockType.WORKFLOW]: BlockCategory.FLOW_CONTROL,
// Routing blocks
[BlockType.ROUTER]: BlockCategory.ROUTING_BLOCK,
@@ -50,7 +90,6 @@ export class Routing {
[BlockType.API]: BlockCategory.REGULAR_BLOCK,
[BlockType.EVALUATOR]: BlockCategory.REGULAR_BLOCK,
[BlockType.RESPONSE]: BlockCategory.REGULAR_BLOCK,
[BlockType.WORKFLOW]: BlockCategory.REGULAR_BLOCK,
[BlockType.STARTER]: BlockCategory.REGULAR_BLOCK,
}
@@ -67,16 +106,31 @@ export class Routing {
return Routing.getBehavior(blockType).shouldActivateDownstream
}
/**
* Determines if a block's HANDLER needs routing-aware logic.
* Note: This is NOT the same as universal path checking done by the executor.
*
* @param blockType The block type to check
* @returns true if the block handler should implement routing-aware behavior
*/
static requiresActivePathCheck(blockType: string): boolean {
return Routing.getBehavior(blockType).requiresActivePathCheck
}
/**
* Determines if a block type should be skipped during selective activation.
* Used to prevent certain block types from being prematurely activated
* when they should wait for explicit routing decisions.
*/
static shouldSkipInSelectiveActivation(blockType: string): boolean {
return Routing.getBehavior(blockType).skipInSelectiveActivation
}
/**
* Checks if a connection should be skipped during selective activation
* Checks if a connection should be skipped during selective activation.
*
* This prevents certain types of connections from triggering premature
* activation of blocks that should wait for explicit routing decisions.
*/
static shouldSkipConnection(sourceHandle: string | undefined, targetBlockType: string): boolean {
// Skip flow control specific connections (internal flow control handles)

View File

@@ -0,0 +1,253 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { BlockType } from '@/executor/consts'
import { Executor } from '@/executor/index'
import type { SerializedWorkflow } from '@/serializer/types'
describe('Multi-Input Routing Scenarios', () => {
let workflow: SerializedWorkflow
let executor: Executor
beforeEach(() => {
workflow = {
version: '2.0',
blocks: [
{
id: 'start',
position: { x: 0, y: 0 },
metadata: { id: BlockType.STARTER, name: 'Start' },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'router-1',
position: { x: 150, y: 0 },
metadata: { id: BlockType.ROUTER, name: 'Router 1' },
config: {
tool: BlockType.ROUTER,
params: {
prompt: 'if the input is x, go to function 1.\notherwise, go to function 2.\ny',
model: 'gpt-4o',
},
},
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-1',
position: { x: 300, y: -100 },
metadata: { id: BlockType.FUNCTION, name: 'Function 1' },
config: {
tool: BlockType.FUNCTION,
params: { code: "return 'hi'" },
},
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-2',
position: { x: 300, y: 100 },
metadata: { id: BlockType.FUNCTION, name: 'Function 2' },
config: {
tool: BlockType.FUNCTION,
params: { code: "return 'bye'" },
},
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'agent-1',
position: { x: 500, y: 0 },
metadata: { id: BlockType.AGENT, name: 'Agent 1' },
config: {
tool: BlockType.AGENT,
params: {
systemPrompt: 'return the following in urdu roman english',
userPrompt: '<function1.result>\n<function2.result>',
model: 'gpt-4o',
},
},
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{ source: 'start', target: 'router-1' },
{ source: 'router-1', target: 'function-1' },
{ source: 'router-1', target: 'function-2' },
{ source: 'function-1', target: 'agent-1' }, // Agent depends on function-1
{ source: 'function-2', target: 'agent-1' }, // Agent depends on function-2
],
loops: {},
parallels: {},
}
executor = new Executor(workflow, {}, {})
})
it('should handle multi-input target when router selects function-1', async () => {
// Test scenario: Router selects function-1, agent should still execute with function-1's output
const context = (executor as any).createExecutionContext('test-workflow', new Date())
// Step 1: Execute start block
context.executedBlocks.add('start')
context.activeExecutionPath.add('start')
context.activeExecutionPath.add('router-1')
// Step 2: Router selects function-1 (not function-2)
context.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'function-1',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 1',
},
},
executed: true,
executionTime: 876,
})
context.executedBlocks.add('router-1')
context.decisions.router.set('router-1', 'function-1')
// Update execution paths after router-1
const pathTracker = (executor as any).pathTracker
pathTracker.updateExecutionPaths(['router-1'], context)
// Verify only function-1 is active
expect(context.activeExecutionPath.has('function-1')).toBe(true)
expect(context.activeExecutionPath.has('function-2')).toBe(false)
// Step 3: Execute function-1
context.blockStates.set('function-1', {
output: { result: 'hi', stdout: '' },
executed: true,
executionTime: 66,
})
context.executedBlocks.add('function-1')
// Update paths after function-1
pathTracker.updateExecutionPaths(['function-1'], context)
// Step 4: Check agent-1 dependencies
const agent1Connections = workflow.connections.filter((conn) => conn.target === 'agent-1')
// Check dependencies for agent-1
const agent1DependenciesMet = (executor as any).checkDependencies(
agent1Connections,
context.executedBlocks,
context
)
// Step 5: Get next execution layer
const nextLayer = (executor as any).getNextExecutionLayer(context)
// CRITICAL TEST: Agent should be able to execute even though it has multiple inputs
// The key is that the dependency logic should handle this correctly:
// - function-1 executed and is selected → dependency met
// - function-2 not executed and not selected → dependency considered met (inactive source)
expect(agent1DependenciesMet).toBe(true)
expect(nextLayer).toContain('agent-1')
})
it('should handle multi-input target when router selects function-2', async () => {
// Test scenario: Router selects function-2, agent should still execute with function-2's output
const context = (executor as any).createExecutionContext('test-workflow', new Date())
// Step 1: Execute start and router-1 selecting function-2
context.executedBlocks.add('start')
context.activeExecutionPath.add('start')
context.activeExecutionPath.add('router-1')
context.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'function-2',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 2',
},
},
executed: true,
executionTime: 876,
})
context.executedBlocks.add('router-1')
context.decisions.router.set('router-1', 'function-2')
const pathTracker = (executor as any).pathTracker
pathTracker.updateExecutionPaths(['router-1'], context)
// Verify only function-2 is active
expect(context.activeExecutionPath.has('function-1')).toBe(false)
expect(context.activeExecutionPath.has('function-2')).toBe(true)
// Step 2: Execute function-2
context.blockStates.set('function-2', {
output: { result: 'bye', stdout: '' },
executed: true,
executionTime: 66,
})
context.executedBlocks.add('function-2')
pathTracker.updateExecutionPaths(['function-2'], context)
// Step 3: Check agent-1 dependencies
const agent1Connections = workflow.connections.filter((conn) => conn.target === 'agent-1')
const agent1DependenciesMet = (executor as any).checkDependencies(
agent1Connections,
context.executedBlocks,
context
)
// Step 4: Get next execution layer
const nextLayer = (executor as any).getNextExecutionLayer(context)
// CRITICAL TEST: Agent should execute with function-2's output
expect(agent1DependenciesMet).toBe(true)
expect(nextLayer).toContain('agent-1')
})
it('should verify the dependency logic for inactive sources', async () => {
// This test specifically validates the multi-input dependency logic
const context = (executor as any).createExecutionContext('test-workflow', new Date())
// Setup: Router executed and selected function-1, function-1 executed
context.executedBlocks.add('start')
context.executedBlocks.add('router-1')
context.executedBlocks.add('function-1')
context.decisions.router.set('router-1', 'function-1')
context.activeExecutionPath.add('start')
context.activeExecutionPath.add('router-1')
context.activeExecutionPath.add('function-1')
context.activeExecutionPath.add('agent-1') // Agent should be active due to function-1
// Test individual dependency checks
const checkDependencies = (executor as any).checkDependencies.bind(executor)
// Connection from function-1 (executed, selected) → should be met
const function1Connection = [{ source: 'function-1', target: 'agent-1' }]
const function1DepMet = checkDependencies(function1Connection, context.executedBlocks, context)
// Connection from function-2 (not executed, not selected) → should be met because of inactive source logic
const function2Connection = [{ source: 'function-2', target: 'agent-1' }]
const function2DepMet = checkDependencies(function2Connection, context.executedBlocks, context)
// Both connections together (the actual agent scenario)
const bothConnections = [
{ source: 'function-1', target: 'agent-1' },
{ source: 'function-2', target: 'agent-1' },
]
const bothDepMet = checkDependencies(bothConnections, context.executedBlocks, context)
// CRITICAL ASSERTIONS:
expect(function1DepMet).toBe(true) // Executed and active
expect(function2DepMet).toBe(true) // Not in active path, so considered met (line 1151)
expect(bothDepMet).toBe(true) // All dependencies should be met
})
})

View File

@@ -0,0 +1,305 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { BlockType } from '@/executor/consts'
import { PathTracker } from '@/executor/path/path'
import { Routing } from '@/executor/routing/routing'
import type { ExecutionContext } from '@/executor/types'
import type { SerializedWorkflow } from '@/serializer/types'
describe('Router → Workflow Block Execution Fix', () => {
let workflow: SerializedWorkflow
let pathTracker: PathTracker
let mockContext: ExecutionContext
beforeEach(() => {
workflow = {
version: '2.0',
blocks: [
{
id: 'starter',
position: { x: 0, y: 0 },
metadata: { id: BlockType.STARTER, name: 'Start' },
config: { tool: BlockType.STARTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'router-1',
position: { x: 100, y: 0 },
metadata: { id: BlockType.ROUTER, name: 'Router 1' },
config: { tool: BlockType.ROUTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-1',
position: { x: 200, y: -100 },
metadata: { id: BlockType.FUNCTION, name: 'Function 1' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'router-2',
position: { x: 200, y: 0 },
metadata: { id: BlockType.ROUTER, name: 'Router 2' },
config: { tool: BlockType.ROUTER, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'function-2',
position: { x: 300, y: -50 },
metadata: { id: BlockType.FUNCTION, name: 'Function 2' },
config: { tool: BlockType.FUNCTION, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
{
id: 'workflow-2',
position: { x: 300, y: 50 },
metadata: { id: BlockType.WORKFLOW, name: 'Workflow 2' },
config: { tool: BlockType.WORKFLOW, params: {} },
inputs: {},
outputs: {},
enabled: true,
},
],
connections: [
{ source: 'starter', target: 'router-1' },
{ source: 'router-1', target: 'function-1' },
{ source: 'router-1', target: 'router-2' },
{ source: 'router-2', target: 'function-2' },
{ source: 'router-2', target: 'workflow-2' },
],
loops: {},
parallels: {},
}
pathTracker = new PathTracker(workflow)
mockContext = {
workflowId: 'test-workflow',
blockStates: new Map(),
blockLogs: [],
metadata: { duration: 0 },
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopIterations: new Map(),
loopItems: new Map(),
completedLoops: new Set(),
executedBlocks: new Set(),
activeExecutionPath: new Set(),
workflow,
}
// Initialize starter as executed and in active path
mockContext.executedBlocks.add('starter')
mockContext.activeExecutionPath.add('starter')
mockContext.activeExecutionPath.add('router-1')
})
it('should categorize workflow blocks as flow control blocks requiring active path checks', () => {
// Verify that workflow blocks now have the correct routing behavior
expect(Routing.getCategory(BlockType.WORKFLOW)).toBe('flow-control')
expect(Routing.requiresActivePathCheck(BlockType.WORKFLOW)).toBe(true)
expect(Routing.shouldSkipInSelectiveActivation(BlockType.WORKFLOW)).toBe(true)
})
it('should prevent workflow blocks from executing when not selected by router', () => {
// This test recreates the exact bug scenario from the CSV data
// Step 1: Router 1 selects router-2 (not function-1)
mockContext.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'router-2',
blockType: BlockType.ROUTER,
blockTitle: 'Router 2',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-1')
// Update paths after router execution
pathTracker.updateExecutionPaths(['router-1'], mockContext)
// Verify router decision
expect(mockContext.decisions.router.get('router-1')).toBe('router-2')
// After router-1 execution, router-2 should be active but not function-1
expect(mockContext.activeExecutionPath.has('router-2')).toBe(true)
expect(mockContext.activeExecutionPath.has('function-1')).toBe(false)
// CRITICAL: Workflow block should NOT be activated yet
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
// Step 2: Router 2 selects function-2 (NOT workflow-2)
mockContext.blockStates.set('router-2', {
output: {
selectedPath: {
blockId: 'function-2',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 2',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-2')
// Update paths after router-2 execution
pathTracker.updateExecutionPaths(['router-2'], mockContext)
// Verify router-2 decision
expect(mockContext.decisions.router.get('router-2')).toBe('function-2')
// After router-2 execution, function-2 should be active
expect(mockContext.activeExecutionPath.has('function-2')).toBe(true)
// CRITICAL: Workflow block should still NOT be activated (this was the bug!)
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
// Step 3: Simulate what the executor's getNextExecutionLayer would do
// This mimics the logic from executor/index.ts lines 991-994
const blocksToExecute = workflow.blocks.filter(
(block) =>
!mockContext.executedBlocks.has(block.id) &&
block.enabled !== false &&
mockContext.activeExecutionPath.has(block.id)
)
const blockIds = blocksToExecute.map((b) => b.id)
// Should only include function-2, NOT workflow-2
expect(blockIds).toContain('function-2')
expect(blockIds).not.toContain('workflow-2')
// Verify that workflow block is not in active path
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
// Verify that isInActivePath also returns false for workflow block
const isWorkflowActive = pathTracker.isInActivePath('workflow-2', mockContext)
expect(isWorkflowActive).toBe(false)
})
it('should allow workflow blocks to execute when selected by router', () => {
// Test the positive case - workflow block should execute when actually selected
// Step 1: Router 1 selects router-2
mockContext.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'router-2',
blockType: BlockType.ROUTER,
blockTitle: 'Router 2',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-1')
pathTracker.updateExecutionPaths(['router-1'], mockContext)
// Step 2: Router 2 selects workflow-2 (NOT function-2)
mockContext.blockStates.set('router-2', {
output: {
selectedPath: {
blockId: 'workflow-2',
blockType: BlockType.WORKFLOW,
blockTitle: 'Workflow 2',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-2')
pathTracker.updateExecutionPaths(['router-2'], mockContext)
// Verify router-2 decision
expect(mockContext.decisions.router.get('router-2')).toBe('workflow-2')
// After router-2 execution, workflow-2 should be active
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(true)
// Function-2 should NOT be activated
expect(mockContext.activeExecutionPath.has('function-2')).toBe(false)
// Step 3: Verify workflow block would be included in next execution layer
const blocksToExecute = workflow.blocks.filter(
(block) =>
!mockContext.executedBlocks.has(block.id) &&
block.enabled !== false &&
mockContext.activeExecutionPath.has(block.id)
)
const blockIds = blocksToExecute.map((b) => b.id)
// Should include workflow-2, NOT function-2
expect(blockIds).toContain('workflow-2')
expect(blockIds).not.toContain('function-2')
})
it('should handle multiple sequential routers with workflow blocks correctly', () => {
// This test ensures the fix works with the exact scenario from the bug report:
// "The issue only seems to happen when there are multiple routing/conditional blocks"
// Simulate the exact execution order from the CSV:
// Router 1 → Function 1, Router 2 → Function 2, but Workflow 2 executed anyway
// Step 1: Router 1 selects function-1 (not router-2)
mockContext.blockStates.set('router-1', {
output: {
selectedPath: {
blockId: 'function-1',
blockType: BlockType.FUNCTION,
blockTitle: 'Function 1',
},
},
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('router-1')
pathTracker.updateExecutionPaths(['router-1'], mockContext)
// After router-1, only function-1 should be active
expect(mockContext.activeExecutionPath.has('function-1')).toBe(true)
expect(mockContext.activeExecutionPath.has('router-2')).toBe(false)
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
// Step 2: Execute function-1
mockContext.blockStates.set('function-1', {
output: { result: 'hi', stdout: '' },
executed: true,
executionTime: 0,
})
mockContext.executedBlocks.add('function-1')
// Step 3: Check what blocks would be available for next execution
const blocksToExecute = workflow.blocks.filter(
(block) =>
!mockContext.executedBlocks.has(block.id) &&
block.enabled !== false &&
mockContext.activeExecutionPath.has(block.id)
)
const blockIds = blocksToExecute.map((b) => b.id)
// CRITICAL: Neither router-2 nor workflow-2 should be eligible for execution
// because they were not selected by router-1
expect(blockIds).not.toContain('router-2')
expect(blockIds).not.toContain('workflow-2')
expect(blockIds).not.toContain('function-2')
// Verify none of the unselected blocks are in active path
expect(mockContext.activeExecutionPath.has('router-2')).toBe(false)
expect(mockContext.activeExecutionPath.has('workflow-2')).toBe(false)
expect(mockContext.activeExecutionPath.has('function-2')).toBe(false)
})
})

View File

@@ -1,4 +1,4 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { afterEach, describe, expect, it, vi } from 'vitest'
import {
cn,
convertScheduleOptionsToCron,
@@ -34,9 +34,11 @@ vi.mock('crypto', () => ({
}),
}))
beforeEach(() => {
process.env.ENCRYPTION_KEY = '1234567890abcdef1234567890abcdef1234567890abcdef1234567890abcdef'
})
vi.mock('@/lib/env', () => ({
env: {
ENCRYPTION_KEY: '0123456789abcdef0123456789abcdef0123456789abcdef0123456789abcdef',
},
}))
afterEach(() => {
vi.clearAllMocks()

View File

@@ -400,6 +400,61 @@ export function formatWebhookInput(
}
return body
}
if (foundWebhook.provider === 'microsoftteams') {
// Microsoft Teams outgoing webhook - Teams sending data to us
const messageText = body?.text || ''
const messageId = body?.id || ''
const timestamp = body?.timestamp || body?.localTimestamp || ''
const from = body?.from || {}
const conversation = body?.conversation || {}
return {
input: messageText, // Primary workflow input - the message text
microsoftteams: {
message: {
id: messageId,
text: messageText,
timestamp,
type: body?.type || 'message',
serviceUrl: body?.serviceUrl,
channelId: body?.channelId,
raw: body,
},
from: {
id: from.id,
name: from.name,
aadObjectId: from.aadObjectId,
},
conversation: {
id: conversation.id,
name: conversation.name,
conversationType: conversation.conversationType,
tenantId: conversation.tenantId,
},
activity: {
type: body?.type,
id: body?.id,
timestamp: body?.timestamp,
localTimestamp: body?.localTimestamp,
serviceUrl: body?.serviceUrl,
channelId: body?.channelId,
},
},
webhook: {
data: {
provider: 'microsoftteams',
path: foundWebhook.path,
providerConfig: foundWebhook.providerConfig,
payload: body,
headers: Object.fromEntries(request.headers.entries()),
method: request.method,
},
},
workflowId: foundWorkflow.id,
}
}
// Generic format for Slack and other providers
return {
webhook: {
@@ -790,6 +845,54 @@ export async function executeWorkflowFromPayload(
}
}
/**
* Validates a Microsoft Teams outgoing webhook request signature using HMAC SHA-256
* @param hmacSecret - Microsoft Teams HMAC secret (base64 encoded)
* @param signature - Authorization header value (should start with 'HMAC ')
* @param body - Raw request body string
* @returns Whether the signature is valid
*/
export function validateMicrosoftTeamsSignature(
hmacSecret: string,
signature: string,
body: string
): boolean {
try {
// Basic validation first
if (!hmacSecret || !signature || !body) {
return false
}
// Check if signature has correct format
if (!signature.startsWith('HMAC ')) {
return false
}
const providedSignature = signature.substring(5) // Remove 'HMAC ' prefix
// Compute HMAC SHA256 signature using Node.js crypto
const crypto = require('crypto')
const secretBytes = Buffer.from(hmacSecret, 'base64')
const bodyBytes = Buffer.from(body, 'utf8')
const computedHash = crypto.createHmac('sha256', secretBytes).update(bodyBytes).digest('base64')
// Constant-time comparison to prevent timing attacks
if (computedHash.length !== providedSignature.length) {
return false
}
let result = 0
for (let i = 0; i < computedHash.length; i++) {
result |= computedHash.charCodeAt(i) ^ providedSignature.charCodeAt(i)
}
return result === 0
} catch (error) {
console.error('Error validating Microsoft Teams signature:', error)
return false
}
}
/**
* Process webhook provider-specific verification
*/
@@ -850,6 +953,10 @@ export function verifyProviderWebhook(
break
}
case 'microsoftteams':
// Microsoft Teams webhook authentication is handled separately in the main flow
// due to the need for raw body access for HMAC verification
break
case 'generic':
// Generic auth logic: requireAuth, token, secretHeaderName, allowedIps
if (providerConfig.requireAuth) {
@@ -1350,10 +1457,10 @@ export async function processWebhook(
return NextResponse.json({ message: 'Airtable webhook processed' }, { status: 200 })
}
// --- Provider-specific Auth/Verification (excluding Airtable/WhatsApp/Slack handled earlier) ---
// --- Provider-specific Auth/Verification (excluding Airtable/WhatsApp/Slack/MicrosoftTeams handled earlier) ---
if (
foundWebhook.provider &&
!['airtable', 'whatsapp', 'slack'].includes(foundWebhook.provider)
!['airtable', 'whatsapp', 'slack', 'microsoftteams'].includes(foundWebhook.provider)
) {
const verificationResponse = verifyProviderWebhook(foundWebhook, request, requestId)
if (verificationResponse) {
@@ -1384,6 +1491,18 @@ export async function processWebhook(
// Since executeWorkflowFromPayload handles logging and errors internally,
// we just need to return a standard success response for synchronous webhooks.
// Note: The actual result isn't typically returned in the webhook response itself.
// For Microsoft Teams outgoing webhooks, return the expected response format
if (foundWebhook.provider === 'microsoftteams') {
return NextResponse.json(
{
type: 'message',
text: 'Webhook processed successfully',
},
{ status: 200 }
)
}
return NextResponse.json({ message: 'Webhook processed' }, { status: 200 })
} catch (error: any) {
// Catch errors *before* calling executeWorkflowFromPayload (e.g., auth errors)
@@ -1391,6 +1510,18 @@ export async function processWebhook(
`[${requestId}] Error in processWebhook *before* execution for ${foundWebhook.id} (Execution: ${executionId})`,
error
)
// For Microsoft Teams outgoing webhooks, return the expected error format
if (foundWebhook.provider === 'microsoftteams') {
return NextResponse.json(
{
type: 'message',
text: 'Webhook processing failed',
},
{ status: 200 }
) // Still return 200 to prevent Teams from showing additional error messages
}
return new NextResponse(`Internal Server Error: ${error.message}`, {
status: 500,
})

View File

@@ -43,7 +43,13 @@ const nextConfig: NextConfig = {
],
outputFileTracingRoot: path.join(__dirname, '../../'),
}),
transpilePackages: ['prettier', '@react-email/components', '@react-email/render'],
transpilePackages: [
'prettier',
'@react-email/components',
'@react-email/render',
'@t3-oss/env-nextjs',
'@t3-oss/env-core',
],
async headers() {
return [
{

View File

@@ -42,7 +42,6 @@ export const ollamaProvider: ProviderConfig = {
},
executeRequest: async (request: ProviderRequest): Promise<ProviderResponse> => {
console.log(request)
logger.info('Preparing Ollama request', {
model: request.model,
hasSystemPrompt: !!request.systemPrompt,

Binary file not shown.

Before

Width:  |  Height:  |  Size: 49 MiB

After

Width:  |  Height:  |  Size: 35 MiB

View File

@@ -72,7 +72,7 @@ function validateVariable(variable: Variable): string | undefined {
return undefined // Valid object
} catch (e) {
console.log('Object parsing error:', e)
logger.error('Object parsing error:', e)
return 'Invalid object syntax'
}
case 'array':

View File

@@ -1,6 +1,7 @@
import type { Edge } from 'reactflow'
import { create } from 'zustand'
import { devtools } from 'zustand/middleware'
import { createLogger } from '@/lib/logs/console-logger'
import { getBlock } from '@/blocks'
import { resolveOutputType } from '@/blocks/utils'
import { pushHistory, type WorkflowStoreWithHistory, withHistory } from '../middleware'
@@ -11,6 +12,8 @@ import { mergeSubblockState } from '../utils'
import type { Position, SubBlockState, SyncControl, WorkflowState } from './types'
import { generateLoopBlocks, generateParallelBlocks } from './utils'
const logger = createLogger('WorkflowStore')
const initialState = {
blocks: {},
edges: [],
@@ -209,11 +212,11 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
updateParentId: (id: string, parentId: string, extent: 'parent') => {
const block = get().blocks[id]
if (!block) {
console.warn(`Cannot set parent: Block ${id} not found`)
logger.warn(`Cannot set parent: Block ${id} not found`)
return
}
console.log('UpdateParentId called:', {
logger.info('UpdateParentId called:', {
blockId: id,
blockName: block.name,
blockType: block.type,
@@ -224,7 +227,7 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
// Skip if the parent ID hasn't changed
if (block.data?.parentId === parentId) {
console.log('Parent ID unchanged, skipping update')
logger.info('Parent ID unchanged, skipping update')
return
}
@@ -260,7 +263,7 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
parallels: { ...get().parallels },
}
console.log('[WorkflowStore/updateParentId] Updated parentId relationship:', {
logger.info('[WorkflowStore/updateParentId] Updated parentId relationship:', {
blockId: id,
newParentId: parentId || 'None (removed parent)',
keepingPosition: absolutePosition,
@@ -306,7 +309,7 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
// Start recursive search from the target block
findAllDescendants(id)
console.log('[WorkflowStore/removeBlock] Found blocks to remove:', {
logger.info('Found blocks to remove:', {
targetId: id,
totalBlocksToRemove: Array.from(blocksToRemove),
includesHierarchy: blocksToRemove.size > 1,
@@ -390,7 +393,7 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
// Validate the edge exists
const edgeToRemove = get().edges.find((edge) => edge.id === edgeId)
if (!edgeToRemove) {
console.warn(`Attempted to remove non-existent edge: ${edgeId}`)
logger.warn(`Attempted to remove non-existent edge: ${edgeId}`)
return
}
@@ -810,7 +813,7 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId
if (!activeWorkflowId) {
console.error('Cannot revert: no active workflow ID')
logger.error('Cannot revert: no active workflow ID')
return
}
@@ -883,13 +886,13 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
if (!response.ok) {
const errorData = await response.json()
console.error('Failed to persist revert to deployed state:', errorData.error)
logger.error('Failed to persist revert to deployed state:', errorData.error)
// Don't throw error to avoid breaking the UI, but log it
} else {
console.log('Successfully persisted revert to deployed state')
logger.info('Successfully persisted revert to deployed state')
}
} catch (error) {
console.error('Error calling revert to deployed API:', error)
logger.error('Error calling revert to deployed API:', error)
// Don't throw error to avoid breaking the UI
}
},

View File

@@ -1,6 +1,9 @@
import { createLogger } from '@/lib/logs/console-logger'
import type { TypeformInsightsParams, TypeformInsightsResponse } from '@/tools/typeform/types'
import type { ToolConfig } from '@/tools/types'
const logger = createLogger('TypeformInsightsTool')
export const insightsTool: ToolConfig<TypeformInsightsParams, TypeformInsightsResponse> = {
id: 'typeform_insights',
name: 'Typeform Insights',
@@ -38,7 +41,7 @@ export const insightsTool: ToolConfig<TypeformInsightsParams, TypeformInsightsRe
try {
const errorData = await response.json()
console.log('Typeform API error response:', JSON.stringify(errorData, null, 2))
logger.info('Typeform API error response:', JSON.stringify(errorData, null, 2))
if (errorData?.message) {
errorMessage = errorData.message
@@ -68,7 +71,7 @@ Details from API: ${errorMessage}${errorDetails}`,
}
} catch (e) {
// If we can't parse the error as JSON, just use the status text
console.log('Error parsing Typeform API error:', e)
logger.error('Error parsing Typeform API error:', e)
}
throw new Error(`Typeform API error (${response.status}): ${errorMessage}${errorDetails}`)

View File

@@ -57,7 +57,7 @@ services:
limits:
memory: 8G
healthcheck:
test: ['CMD', 'wget', '--spider', '--quiet', 'http://127.0.0.1:3002']
test: ['CMD', 'wget', '--spider', '--quiet', 'http://127.0.0.1:3002/health']
interval: 90s
timeout: 5s
retries: 3

View File

@@ -55,7 +55,7 @@ services:
db:
condition: service_healthy
healthcheck:
test: ['CMD', 'wget', '--spider', '--quiet', 'http://127.0.0.1:3002']
test: ['CMD', 'wget', '--spider', '--quiet', 'http://127.0.0.1:3002/health']
interval: 90s
timeout: 5s
retries: 3