Compare commits

...

52 Commits

Author SHA1 Message Date
Vikhyath Mondreti
eb07a080fb v0.5.35: helm updates, copilot improvements, 404 for docs, salesforce fixes, subflow resize clamping 2025-12-18 16:23:19 -08:00
Priyanshu Solanki
2a7f51a2f6 adding clamps for subflow drag and drops of blocks (#2460)
Co-authored-by: priyanshu.solanki <priyanshu.solanki@saviynt.com>
2025-12-18 16:57:58 -07:00
Waleed
90c3c43607 fix(blog): add back unoptimized tag, fix styling (#2461) 2025-12-18 15:55:47 -08:00
Siddharth Ganesan
83d813a7cc improvement(copilot): add edge handle validation to copilot edit workflow (#2448)
* Add edge handle validation

* Clean

* Fix lint

* Fix empty target handle
2025-12-18 15:40:00 -08:00
Vikhyath Mondreti
811c736705 fix failing lint from os contributor (#2459) 2025-12-18 15:03:31 -08:00
Vikhyath Mondreti
c6757311af Merge branch 'main' into staging 2025-12-18 14:58:48 -08:00
div
b5b12ba2d1 fix(teams): webhook notifications crash (#2426)
* fix(docs): clarify working directory for drizzle migration (#2375)

* fix(landing): prevent url encoding for spaces for footer links (#2376)

* fix: handle empty body.value in Teams webhook notification parser (#2425)

* Update directory path for migration command

---------

Co-authored-by: Vikhyath Mondreti <vikhyathvikku@gmail.com>
Co-authored-by: Waleed <walif6@gmail.com>
Co-authored-by: waleedlatif1 <waleedlatif1@users.noreply.github.com>
Co-authored-by: icecrasher321 <icecrasher321@users.noreply.github.com>
Co-authored-by: Siddharth Ganesan <33737564+Sg312@users.noreply.github.com>
Co-authored-by: mosa <mosaxiv@gmail.com>
Co-authored-by: Emir Karabeg <78010029+emir-karabeg@users.noreply.github.com>
Co-authored-by: Adam Gough <77861281+aadamgough@users.noreply.github.com>
Co-authored-by: Shivam <shivamprajapati035@gmail.com>
Co-authored-by: Gaurav Chadha <65453826+Chadha93@users.noreply.github.com>
Co-authored-by: root <root@Delta.localdomain>
2025-12-18 14:57:27 -08:00
Waleed
0d30676e34 fix(blog): revert back to using next image tags in blog (#2458) 2025-12-18 13:51:58 -08:00
Waleed
36bdccb449 fix(ui): fixed visibility issue on reset passowrd page (#2456) 2025-12-18 13:24:32 -08:00
Waleed
f45730a89e improvement(helm): added SSO and cloud storage variables to helm charts (#2454)
* improvement(helm): added SSO and cloud storage variables to helm charts

* consolidated sf types
2025-12-18 13:12:21 -08:00
Vikhyath Mondreti
04cd837e9c fix(notifs): inactivity polling filters, consolidate trigger types, minor consistency issue with filter parsing (#2452)
* fix(notifs-slac): display name for account

* fix inactivity polling check

* consolidate trigger types

* remove redundant defaults

* fix
2025-12-18 12:49:58 -08:00
Waleed
c23130a26e Revert "fix(salesforce): updated to more flexible oauth that allows production, developer, and custom domain salesforce orgs (#2441) (#2444)" (#2453)
This reverts commit 9da19e84b7.
2025-12-18 12:46:24 -08:00
Priyanshu Solanki
7575cd6f27 Merge pull request #2451 from simstudioai/improvement/SIM-514-useWebhookUrl-conditioning
improvement(useWebhookUrl): GET api/webhook is called when useWebhookUrl:true
2025-12-18 13:31:06 -07:00
priyanshu.solanki
fbde64f0b0 fixing lint errors 2025-12-18 13:04:25 -07:00
Waleed
25f7ed20f6 feat(docs): added 404 page for the docs (#2450)
* feat(docs): added 404 page for the docs

* added metadata
2025-12-18 11:46:42 -08:00
priyanshu.solanki
261aa3d72d fixing a react component: 2025-12-18 12:39:47 -07:00
Waleed
9da19e84b7 fix(salesforce): updated to more flexible oauth that allows production, developer, and custom domain salesforce orgs (#2441) (#2444)
* fix(oauth): updated oauth providers that had unstable reference IDs leading to duplicate oauth records (#2441)

* fix(oauth): updated oauth providers that had unstable reference IDs leading to duplicate oauth records

* ack PR comments

* ack PR comments

* cleanup salesforce refresh logic

* ack more PR comments
2025-12-18 11:39:28 -08:00
priyanshu.solanki
e83afc0a62 fixing the useWbehookManangement call to only call the loadwebhookorgenerateurl function when the useWebhookurl flag is true 2025-12-18 12:31:18 -07:00
Vikhyath Mondreti
1720fa8749 feat(compare-schema): ci check to make sure schema.ts never goes out of sync with migrations (#2449)
* feat(compare-schema): ci check to make sure schema.ts never goes out of sync with migrations

* test out of sync [do not merge]

* Revert "test out of sync [do not merge]"

This reverts commit 9771f66b84.
2025-12-18 11:25:19 -08:00
Waleed
f3ad7750af fix(auth): added same-origin validation to forget password route, added confirmation for disable auth FF (#2447)
* fix(auth): added same-origin validation to forget password route, added confirmation for disable auth FF

* ack PR comments
2025-12-18 11:07:25 -08:00
Vikhyath Mondreti
78b7643e65 fix(condition): async execution isolated vm error (#2446)
* fix(condition): async execution isolated vm error

* fix tests
2025-12-18 11:02:01 -08:00
Siddharth Ganesan
7ef1150383 fix(workflow-state, copilot): prevent copilot from setting undefined state, fix order of operations for copilot edit workflow, add sleep tool (#2440)
* Fix copilot ooo

* Add copilot sleep tool

* Fix lint
2025-12-18 09:57:01 -08:00
Waleed
67cfb21d08 v0.5.34: servicenow, code cleanup, prevent cyclic edge connections, custom tool fixes 2025-12-17 23:39:10 -08:00
Waleed
a337af92bc fix(custom-tools): added missing _toolSchema to internal param set for agents calling custom tools (#2445) 2025-12-17 23:38:36 -08:00
Waleed
b4a99779eb feat(i18n): update translations (#2443)
Co-authored-by: waleedlatif1 <waleedlatif1@users.noreply.github.com>
2025-12-17 20:55:06 -08:00
Waleed
471cb4747c fix(oauth): updated oauth providers that had unstable reference IDs leading to duplicate oauth records (#2441)
* fix(oauth): updated oauth providers that had unstable reference IDs leading to duplicate oauth records

* ack PR comments
2025-12-17 20:45:38 -08:00
Priyanshu Solanki
491bd783b5 fix(servicenow): update servicenow block to use basic auth instead of oauth (#2435)
* fix adding client ID and secret fields to supprot ouath

* revert servicenow to use basic auth instead of oauth

* fix failing tests

---------

Co-authored-by: priyanshu.solanki <priyanshu.solanki@saviynt.com>
Co-authored-by: waleed <walif6@gmail.com>
2025-12-17 20:41:46 -08:00
Waleed
5516fa39c3 fix(graph): prevent cyclic dependencies in graph following ReactFlow examples (#2439)
* fix(graph): prevent cyclic dependencies in graph following ReactFlow examples

* ack PR comment
2025-12-17 19:13:39 -08:00
Waleed
21fa92bc41 feat(i18n): update translations (#2438)
Co-authored-by: waleedlatif1 <waleedlatif1@users.noreply.github.com>
2025-12-17 18:01:43 -08:00
Vikhyath Mondreti
26ca37328a fix(conditions): make outputs correct (#2437) 2025-12-17 17:15:16 -08:00
Waleed
731997f768 fix(envvars): cleanup unused envvars (#2436)
* fix(envvars): cleanup unused envvars

* removed unused react-google-drive-picker dep
2025-12-17 17:13:01 -08:00
Waleed
1d6975db49 v0.5.33: loops, chat fixes, subflow resizing refactor, terminal updates 2025-12-17 15:45:39 -08:00
Waleed
c4a6d11cc0 fix(condition): used isolated vms for condition block RCE (#2432)
* fix(condition): used isolated vms for condition block RCE

* ack PR comment

* one more

* remove inputForm from sched, update loop condition to also use isolated vm

* hide servicenow
2025-12-17 15:29:25 -08:00
Waleed
7b5405e968 feat(vertex): added vertex to list of supported providers (#2430)
* feat(vertex): added vertex to list of supported providers

* added utils files for each provider, consolidated gemini utils, added dynamic verbosity and reasoning fetcher
2025-12-17 14:57:58 -08:00
Vikhyath Mondreti
1ae3b47f5c fix(inactivity-poll): need to respect level and trigger filters (#2431) 2025-12-17 14:50:33 -08:00
Waleed
3120a785df fix(terminal): fix text wrap for errors and messages with long strings (#2429) 2025-12-17 13:42:43 -08:00
Vikhyath Mondreti
8775e76c32 improvement(subflow): resize vertical height estimate (#2428)
* improvement(node-dims): share constants for node padding

* fix vertical height estimation
2025-12-17 12:07:57 -08:00
Vikhyath Mondreti
9a6c68789d fix(subflow): resizing live update 2025-12-17 11:49:24 -08:00
Waleed
08bc1125bd fix(cmd-k): when navigating to current workspace/workflow, close modal instead of navigating (#2420)
* fix(cmd-k): when navigating to current workspace, close modal instead of navigating

* ack PR comment
2025-12-17 10:21:35 -08:00
Waleed
f4f74da1dc feat(i18n): update translations (#2421)
Co-authored-by: icecrasher321 <icecrasher321@users.noreply.github.com>
2025-12-17 10:21:15 -08:00
Vikhyath Mondreti
de330d80f5 improvement(mcp): restructure mcp tools caching/fetching info to improve UX (#2416)
* feat(mcp): improve cache practice

* restructure mcps fetching, caching, UX indicators

* fix schema

* styling improvements

* fix tooltips and render issue

* fix loading sequence + add redis

---------

Co-authored-by: waleed <walif6@gmail.com>
2025-12-16 21:23:18 -08:00
Emir Karabeg
b7228d57f7 feat(service-now): added service now block (#2404)
* feat(service-now): added service now block

* fix: bun lock

* improvement: fixed @trigger.dev/sdk imports and removal of sentry blocks

* improvement: fixed @trigger.dev/sdk import

* improvement: fixed @trigger.dev/sdk import

* fix(servicenow): save accessTokenExpiresAt on initial OAuth account creation

* docs(servicenow): add ServiceNow tool documentation and icon mapping

* fixing bun lint issues

* fixing username/password fields

* fixing test file for refreshaccesstoken to support instance uri

* removing basic auth and fixing undo-redo/store.ts

* removed import set api code, changed CRUD operations to CRUD_record and added wand configuration to help users to generate JSON Arrays

---------

Co-authored-by: priyanshu.solanki <priyanshu.solanki@saviynt.com>
2025-12-16 21:16:09 -08:00
Waleed
dcbeca1abe fix(subflow): fix json stringification in subflow collections (#2419)
* fix(subflow): fix json stringification in subflow collections

* cleanup
2025-12-16 20:47:58 -08:00
Waleed
27ea333974 fix(chat): fix stale closure in workflow runner for chat (#2418) 2025-12-16 19:59:02 -08:00
Waleed
9861d3a0ac improvement(helm): added more to helm charts, remove instance selector for various cloud providers (#2412)
* improvement(helm): added more to helm charts, remove instance selector for various cloud providers

* ack PR comment
2025-12-16 18:24:00 -08:00
Waleed
fdbf8be79b fix(logs-search): restored support for log search queries (#2417) 2025-12-16 18:18:46 -08:00
Adam Gough
6f4f4e22f0 fix(loop): increased max loop iterations to 1000 (#2413) 2025-12-16 16:08:56 -08:00
Waleed
837aabca5e v0.5.32: google sheets fix, schedule input format 2025-12-16 15:41:04 -08:00
Vikhyath Mondreti
f7d2c9667f fix(serializer): condition check should check if any condition are met (#2410)
* fix(serializer): condition check should check if any condition are met

* remove comments

* remove more comments
2025-12-16 14:36:40 -08:00
Waleed
29befbc5f6 feat(schedule): add input form to schedule (#2405)
* feat(schedule): add input form to schedule

* change placeholder
2025-12-16 11:23:57 -08:00
Vikhyath Mondreti
f9cfca92bf v0.5.31: add zod as direct dep 2025-12-15 20:40:02 -08:00
Vikhyath Mondreti
9cf8aaee1b fix(sockets): add zod as direct sockets server dep (#2397)
* fix(sockets): add zod as direct sockets server dep

* fix bun lock
2025-12-15 20:25:40 -08:00
240 changed files with 16628 additions and 3393 deletions

View File

@@ -48,6 +48,19 @@ jobs:
ENCRYPTION_KEY: '7cf672e460e430c1fba707575c2b0e2ad5a99dddf9b7b7e3b5646e630861db1c' # dummy key for CI only
run: bun run test
- name: Check schema and migrations are in sync
working-directory: packages/db
run: |
bunx drizzle-kit generate --config=./drizzle.config.ts
if [ -n "$(git status --porcelain ./migrations)" ]; then
echo "❌ Schema and migrations are out of sync!"
echo "Run 'cd packages/db && bunx drizzle-kit generate' and commit the new migrations."
git status --porcelain ./migrations
git diff ./migrations
exit 1
fi
echo "✅ Schema and migrations are in sync"
- name: Build application
env:
NODE_OPTIONS: '--no-warnings'

View File

@@ -188,7 +188,7 @@ DATABASE_URL="postgresql://postgres:your_password@localhost:5432/simstudio"
Then run the migrations:
```bash
cd apps/sim # Required so drizzle picks correct .env file
cd packages/db # Required so drizzle picks correct .env file
bunx drizzle-kit migrate --config=./drizzle.config.ts
```

View File

@@ -0,0 +1,23 @@
import { DocsBody, DocsPage } from 'fumadocs-ui/page'
export const metadata = {
title: 'Page Not Found',
}
export default function NotFound() {
return (
<DocsPage>
<DocsBody>
<div className='flex min-h-[60vh] flex-col items-center justify-center text-center'>
<h1 className='mb-4 bg-gradient-to-b from-[#8357FF] to-[#6F3DFA] bg-clip-text font-bold text-8xl text-transparent'>
404
</h1>
<h2 className='mb-2 font-semibold text-2xl text-foreground'>Page Not Found</h2>
<p className='text-muted-foreground'>
The page you're looking for doesn't exist or has been moved.
</p>
</div>
</DocsBody>
</DocsPage>
)
}

View File

@@ -2452,6 +2452,56 @@ export const GeminiIcon = (props: SVGProps<SVGSVGElement>) => (
</svg>
)
export const VertexIcon = (props: SVGProps<SVGSVGElement>) => (
<svg
{...props}
id='standard_product_icon'
xmlns='http://www.w3.org/2000/svg'
version='1.1'
viewBox='0 0 512 512'
>
<g id='bounding_box'>
<rect width='512' height='512' fill='none' />
</g>
<g id='art'>
<path
d='M128,244.99c-8.84,0-16-7.16-16-16v-95.97c0-8.84,7.16-16,16-16s16,7.16,16,16v95.97c0,8.84-7.16,16-16,16Z'
fill='#ea4335'
/>
<path
d='M256,458c-2.98,0-5.97-.83-8.59-2.5l-186-122c-7.46-4.74-9.65-14.63-4.91-22.09,4.75-7.46,14.64-9.65,22.09-4.91l177.41,116.53,177.41-116.53c7.45-4.74,17.34-2.55,22.09,4.91,4.74,7.46,2.55,17.34-4.91,22.09l-186,122c-2.62,1.67-5.61,2.5-8.59,2.5Z'
fill='#fbbc04'
/>
<path
d='M256,388.03c-8.84,0-16-7.16-16-16v-73.06c0-8.84,7.16-16,16-16s16,7.16,16,16v73.06c0,8.84-7.16,16-16,16Z'
fill='#34a853'
/>
<circle cx='128' cy='70' r='16' fill='#ea4335' />
<circle cx='128' cy='292' r='16' fill='#ea4335' />
<path
d='M384.23,308.01c-8.82,0-15.98-7.14-16-15.97l-.23-94.01c-.02-8.84,7.13-16.02,15.97-16.03h.04c8.82,0,15.98,7.14,16,15.97l.23,94.01c.02,8.84-7.13,16.02-15.97,16.03h-.04Z'
fill='#4285f4'
/>
<circle cx='384' cy='70' r='16' fill='#4285f4' />
<circle cx='384' cy='134' r='16' fill='#4285f4' />
<path
d='M320,220.36c-8.84,0-16-7.16-16-16v-103.02c0-8.84,7.16-16,16-16s16,7.16,16,16v103.02c0,8.84-7.16,16-16,16Z'
fill='#fbbc04'
/>
<circle cx='256' cy='171' r='16' fill='#34a853' />
<circle cx='256' cy='235' r='16' fill='#34a853' />
<circle cx='320' cy='265' r='16' fill='#fbbc04' />
<circle cx='320' cy='329' r='16' fill='#fbbc04' />
<path
d='M192,217.36c-8.84,0-16-7.16-16-16v-100.02c0-8.84,7.16-16,16-16s16,7.16,16,16v100.02c0,8.84-7.16,16-16,16Z'
fill='#fbbc04'
/>
<circle cx='192' cy='265' r='16' fill='#fbbc04' />
<circle cx='192' cy='329' r='16' fill='#fbbc04' />
</g>
</svg>
)
export const CerebrasIcon = (props: SVGProps<SVGSVGElement>) => (
<svg
{...props}
@@ -3335,6 +3385,21 @@ export function SalesforceIcon(props: SVGProps<SVGSVGElement>) {
)
}
export function ServiceNowIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 71.1 63.6'>
<path
fillRule='evenodd'
clipRule='evenodd'
fill='#62D84E'
d='M35.8,0C16.1,0,0,15.9,0,35.6c0,9.8,4,19.3,11.2,26c2.5,2.4,6.4,2.6,9.2,0.5c9-6.7,21.4-6.7,30.4,0
c2.8,2.1,6.7,1.9,9.2-0.5C74.3,48,74.9,25.4,61.3,11.1C54.7,4.1,45.4,0.1,35.8,0 M35.6,53.5C26,53.8,18,46.2,17.8,36.7
c0-0.3,0-0.6,0-0.9c0-9.8,8-17.8,17.8-17.8s17.8,8,17.8,17.8c0.3,9.6-7.3,17.5-16.8,17.8C36.2,53.5,35.9,53.5,35.6,53.5'
/>
</svg>
)
}
export function ApolloIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg

View File

@@ -85,6 +85,7 @@ import {
SendgridIcon,
SentryIcon,
SerperIcon,
ServiceNowIcon,
SftpIcon,
ShopifyIcon,
SlackIcon,
@@ -119,116 +120,117 @@ import {
type IconComponent = ComponentType<SVGProps<SVGSVGElement>>
export const blockTypeToIconMap: Record<string, IconComponent> = {
calendly: CalendlyIcon,
mailchimp: MailchimpIcon,
postgresql: PostgresIcon,
twilio_voice: TwilioIcon,
elasticsearch: ElasticsearchIcon,
rds: RDSIcon,
translate: TranslateIcon,
dynamodb: DynamoDBIcon,
wordpress: WordpressIcon,
tavily: TavilyIcon,
zoom: ZoomIcon,
zep: ZepIcon,
zendesk: ZendeskIcon,
youtube: YouTubeIcon,
supabase: SupabaseIcon,
vision: EyeIcon,
zoom: ZoomIcon,
confluence: ConfluenceIcon,
arxiv: ArxivIcon,
webflow: WebflowIcon,
pinecone: PineconeIcon,
apollo: ApolloIcon,
whatsapp: WhatsAppIcon,
typeform: TypeformIcon,
qdrant: QdrantIcon,
shopify: ShopifyIcon,
asana: AsanaIcon,
sqs: SQSIcon,
apify: ApifyIcon,
memory: BrainIcon,
gitlab: GitLabIcon,
polymarket: PolymarketIcon,
serper: SerperIcon,
linear: LinearIcon,
exa: ExaAIIcon,
telegram: TelegramIcon,
salesforce: SalesforceIcon,
hubspot: HubspotIcon,
hunter: HunterIOIcon,
linkup: LinkupIcon,
mongodb: MongoDBIcon,
airtable: AirtableIcon,
discord: DiscordIcon,
ahrefs: AhrefsIcon,
neo4j: Neo4jIcon,
tts: TTSIcon,
jina: JinaAIIcon,
google_docs: GoogleDocsIcon,
perplexity: PerplexityIcon,
google_search: GoogleIcon,
x: xIcon,
kalshi: KalshiIcon,
google_calendar: GoogleCalendarIcon,
zep: ZepIcon,
posthog: PosthogIcon,
grafana: GrafanaIcon,
google_slides: GoogleSlidesIcon,
microsoft_planner: MicrosoftPlannerIcon,
thinking: BrainIcon,
pipedrive: PipedriveIcon,
dropbox: DropboxIcon,
stagehand: StagehandIcon,
google_forms: GoogleFormsIcon,
file: DocumentIcon,
mistral_parse: MistralIcon,
gmail: GmailIcon,
openai: OpenAIIcon,
outlook: OutlookIcon,
incidentio: IncidentioIcon,
onedrive: MicrosoftOneDriveIcon,
resend: ResendIcon,
google_vault: GoogleVaultIcon,
sharepoint: MicrosoftSharepointIcon,
huggingface: HuggingFaceIcon,
sendgrid: SendgridIcon,
video_generator: VideoIcon,
smtp: SmtpIcon,
google_groups: GoogleGroupsIcon,
mailgun: MailgunIcon,
clay: ClayIcon,
jira: JiraIcon,
search: SearchIcon,
linkedin: LinkedInIcon,
wealthbox: WealthboxIcon,
notion: NotionIcon,
elevenlabs: ElevenLabsIcon,
microsoft_teams: MicrosoftTeamsIcon,
github: GithubIcon,
sftp: SftpIcon,
ssh: SshIcon,
google_drive: GoogleDriveIcon,
sentry: SentryIcon,
reddit: RedditIcon,
parallel_ai: ParallelIcon,
spotify: SpotifyIcon,
stripe: StripeIcon,
s3: S3Icon,
trello: TrelloIcon,
mem0: Mem0Icon,
knowledge: PackageSearchIcon,
intercom: IntercomIcon,
twilio_sms: TwilioIcon,
duckduckgo: DuckDuckGoIcon,
slack: SlackIcon,
datadog: DatadogIcon,
microsoft_excel: MicrosoftExcelIcon,
image_generator: ImageIcon,
google_sheets: GoogleSheetsIcon,
wordpress: WordpressIcon,
wikipedia: WikipediaIcon,
cursor: CursorIcon,
firecrawl: FirecrawlIcon,
mysql: MySQLIcon,
browser_use: BrowserUseIcon,
whatsapp: WhatsAppIcon,
webflow: WebflowIcon,
wealthbox: WealthboxIcon,
vision: EyeIcon,
video_generator: VideoIcon,
typeform: TypeformIcon,
twilio_voice: TwilioIcon,
twilio_sms: TwilioIcon,
tts: TTSIcon,
trello: TrelloIcon,
translate: TranslateIcon,
thinking: BrainIcon,
telegram: TelegramIcon,
tavily: TavilyIcon,
supabase: SupabaseIcon,
stt: STTIcon,
stripe: StripeIcon,
stagehand: StagehandIcon,
ssh: SshIcon,
sqs: SQSIcon,
spotify: SpotifyIcon,
smtp: SmtpIcon,
slack: SlackIcon,
shopify: ShopifyIcon,
sharepoint: MicrosoftSharepointIcon,
sftp: SftpIcon,
servicenow: ServiceNowIcon,
serper: SerperIcon,
sentry: SentryIcon,
sendgrid: SendgridIcon,
search: SearchIcon,
salesforce: SalesforceIcon,
s3: S3Icon,
resend: ResendIcon,
reddit: RedditIcon,
rds: RDSIcon,
qdrant: QdrantIcon,
posthog: PosthogIcon,
postgresql: PostgresIcon,
polymarket: PolymarketIcon,
pipedrive: PipedriveIcon,
pinecone: PineconeIcon,
perplexity: PerplexityIcon,
parallel_ai: ParallelIcon,
outlook: OutlookIcon,
openai: OpenAIIcon,
onedrive: MicrosoftOneDriveIcon,
notion: NotionIcon,
neo4j: Neo4jIcon,
mysql: MySQLIcon,
mongodb: MongoDBIcon,
mistral_parse: MistralIcon,
microsoft_teams: MicrosoftTeamsIcon,
microsoft_planner: MicrosoftPlannerIcon,
microsoft_excel: MicrosoftExcelIcon,
memory: BrainIcon,
mem0: Mem0Icon,
mailgun: MailgunIcon,
mailchimp: MailchimpIcon,
linkup: LinkupIcon,
linkedin: LinkedInIcon,
linear: LinearIcon,
knowledge: PackageSearchIcon,
kalshi: KalshiIcon,
jira: JiraIcon,
jina: JinaAIIcon,
intercom: IntercomIcon,
incidentio: IncidentioIcon,
image_generator: ImageIcon,
hunter: HunterIOIcon,
huggingface: HuggingFaceIcon,
hubspot: HubspotIcon,
grafana: GrafanaIcon,
google_vault: GoogleVaultIcon,
google_slides: GoogleSlidesIcon,
google_sheets: GoogleSheetsIcon,
google_groups: GoogleGroupsIcon,
google_forms: GoogleFormsIcon,
google_drive: GoogleDriveIcon,
google_docs: GoogleDocsIcon,
google_calendar: GoogleCalendarIcon,
google_search: GoogleIcon,
gmail: GmailIcon,
gitlab: GitLabIcon,
github: GithubIcon,
firecrawl: FirecrawlIcon,
file: DocumentIcon,
exa: ExaAIIcon,
elevenlabs: ElevenLabsIcon,
elasticsearch: ElasticsearchIcon,
dynamodb: DynamoDBIcon,
duckduckgo: DuckDuckGoIcon,
dropbox: DropboxIcon,
discord: DiscordIcon,
datadog: DatadogIcon,
cursor: CursorIcon,
confluence: ConfluenceIcon,
clay: ClayIcon,
calendly: CalendlyIcon,
browser_use: BrowserUseIcon,
asana: AsanaIcon,
arxiv: ArxivIcon,
apollo: ApolloIcon,
apify: ApifyIcon,
airtable: AirtableIcon,
ahrefs: AhrefsIcon,
}

View File

@@ -111,26 +111,24 @@ Verschiedene Blocktypen erzeugen unterschiedliche Ausgabestrukturen. Hier ist, w
```json
{
"content": "Original content passed through",
"conditionResult": true,
"selectedPath": {
"blockId": "2acd9007-27e8-4510-a487-73d3b825e7c1",
"blockType": "agent",
"blockTitle": "Follow-up Agent"
},
"selectedConditionId": "condition-1"
"selectedOption": "condition-1"
}
```
### Ausgabefelder des Condition-Blocks
- **content**: Der ursprüngliche, durchgeleitete Inhalt
- **conditionResult**: Boolesches Ergebnis der Bedingungsauswertung
- **selectedPath**: Informationen über den ausgewählten Pfad
- **blockId**: ID des nächsten Blocks im ausgewählten Pfad
- **blockType**: Typ des nächsten Blocks
- **blockTitle**: Titel des nächsten Blocks
- **selectedConditionId**: ID der ausgewählten Bedingung
- **selectedOption**: ID der ausgewählten Bedingung
</Tab>
<Tab>

View File

@@ -0,0 +1,124 @@
---
title: ServiceNow
description: ServiceNow-Datensätze erstellen, lesen, aktualisieren und löschen
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="servicenow"
color="#032D42"
/>
{/* MANUAL-CONTENT-START:intro */}
[ServiceNow](https://www.servicenow.com/) ist eine leistungsstarke Cloud-Plattform zur Optimierung und Automatisierung von IT-Service-Management (ITSM), Workflows und Geschäftsprozessen in Ihrem Unternehmen. ServiceNow ermöglicht Ihnen die Verwaltung von Vorfällen, Anfragen, Aufgaben, Benutzern und mehr über seine umfangreiche API.
Mit ServiceNow können Sie:
- **IT-Workflows automatisieren**: Datensätze in jeder ServiceNow-Tabelle erstellen, lesen, aktualisieren und löschen, z. B. Vorfälle, Aufgaben, Änderungsanfragen und Benutzer.
- **Systeme integrieren**: ServiceNow mit Ihren anderen Tools und Prozessen für nahtlose Automatisierung verbinden.
- **Eine einzige Informationsquelle pflegen**: Alle Ihre Service- und Betriebsdaten organisiert und zugänglich halten.
- **Betriebliche Effizienz steigern**: Manuelle Arbeit reduzieren und Servicequalität mit anpassbaren Workflows und Automatisierung verbessern.
In Sim ermöglicht die ServiceNow-Integration Ihren Agenten, direkt mit Ihrer ServiceNow-Instanz als Teil ihrer Workflows zu interagieren. Agenten können Datensätze in jeder ServiceNow-Tabelle erstellen, lesen, aktualisieren oder löschen und Ticket- oder Benutzerdaten für ausgefeilte Automatisierung und Entscheidungsfindung nutzen. Diese Integration verbindet Ihre Workflow-Automatisierung und IT-Betrieb und befähigt Ihre Agenten, Serviceanfragen, Vorfälle, Benutzer und Assets ohne manuelle Eingriffe zu verwalten. Durch die Verbindung von Sim mit ServiceNow können Sie Service-Management-Aufgaben automatisieren, Reaktionszeiten verbessern und konsistenten, sicheren Zugriff auf die wichtigen Servicedaten Ihres Unternehmens gewährleisten.
{/* MANUAL-CONTENT-END */}
## Nutzungsanweisungen
Integrieren Sie ServiceNow in Ihren Workflow. Erstellen, lesen, aktualisieren und löschen Sie Datensätze in jeder ServiceNow-Tabelle, einschließlich Vorfälle, Aufgaben, Änderungsanfragen, Benutzer und mehr.
## Tools
### `servicenow_create_record`
Einen neuen Datensatz in einer ServiceNow-Tabelle erstellen
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Ja | ServiceNow-Instanz-URL \(z. B. https://instance.service-now.com\) |
| `username` | string | Ja | ServiceNow-Benutzername |
| `password` | string | Ja | ServiceNow-Passwort |
| `tableName` | string | Ja | Tabellenname \(z. B. incident, task, sys_user\) |
| `fields` | json | Ja | Felder, die für den Datensatz festgelegt werden sollen \(JSON-Objekt\) |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `record` | json | Erstellter ServiceNow-Datensatz mit sys_id und anderen Feldern |
| `metadata` | json | Metadaten der Operation |
### `servicenow_read_record`
Datensätze aus einer ServiceNow-Tabelle lesen
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Ja | ServiceNow-Instanz-URL \(z. B. https://instance.service-now.com\) |
| `username` | string | Ja | ServiceNow-Benutzername |
| `password` | string | Ja | ServiceNow-Passwort |
| `tableName` | string | Ja | Tabellenname |
| `sysId` | string | Nein | Spezifische Datensatz-sys_id |
| `number` | string | Nein | Datensatznummer \(z. B. INC0010001\) |
| `query` | string | Nein | Kodierte Abfragezeichenfolge \(z. B. "active=true^priority=1"\) |
| `limit` | number | Nein | Maximale Anzahl der zurückzugebenden Datensätze |
| `fields` | string | Nein | Durch Kommas getrennte Liste der zurückzugebenden Felder |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `records` | array | Array von ServiceNow-Datensätzen |
| `metadata` | json | Metadaten der Operation |
### `servicenow_update_record`
Einen bestehenden Datensatz in einer ServiceNow-Tabelle aktualisieren
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Ja | ServiceNow-Instanz-URL \(z. B. https://instance.service-now.com\) |
| `username` | string | Ja | ServiceNow-Benutzername |
| `password` | string | Ja | ServiceNow-Passwort |
| `tableName` | string | Ja | Tabellenname |
| `sysId` | string | Ja | Datensatz-sys_id zum Aktualisieren |
| `fields` | json | Ja | Zu aktualisierende Felder \(JSON-Objekt\) |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `record` | json | Aktualisierter ServiceNow-Datensatz |
| `metadata` | json | Metadaten der Operation |
### `servicenow_delete_record`
Einen Datensatz aus einer ServiceNow-Tabelle löschen
#### Eingabe
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Ja | ServiceNow-Instanz-URL \(z. B. https://instance.service-now.com\) |
| `username` | string | Ja | ServiceNow-Benutzername |
| `password` | string | Ja | ServiceNow-Passwort |
| `tableName` | string | Ja | Tabellenname |
| `sysId` | string | Ja | Datensatz-sys_id zum Löschen |
#### Ausgabe
| Parameter | Typ | Beschreibung |
| --------- | ---- | ----------- |
| `success` | boolean | Ob das Löschen erfolgreich war |
| `metadata` | json | Metadaten der Operation |
## Hinweise
- Kategorie: `tools`
- Typ: `servicenow`

View File

@@ -39,14 +39,16 @@ Senden Sie eine Chat-Completion-Anfrage an jeden unterstützten LLM-Anbieter
| Parameter | Typ | Erforderlich | Beschreibung |
| --------- | ---- | -------- | ----------- |
| `model` | string | Ja | Das zu verwendende Modell (z.B. gpt-4o, claude-sonnet-4-5, gemini-2.0-flash) |
| `systemPrompt` | string | Nein | System-Prompt zur Festlegung des Assistentenverhaltens |
| `context` | string | Ja | Die Benutzernachricht oder der Kontext, der an das Modell gesendet wird |
| `apiKey` | string | Nein | API-Schlüssel für den Anbieter (verwendet den Plattformschlüssel, wenn für gehostete Modelle nicht angegeben) |
| `temperature` | number | Nein | Temperatur für die Antwortgenerierung (0-2) |
| `maxTokens` | number | Nein | Maximale Tokens in der Antwort |
| `model` | string | Ja | Das zu verwendende Modell \(z. B. gpt-4o, claude-sonnet-4-5, gemini-2.0-flash\) |
| `systemPrompt` | string | Nein | System-Prompt zur Festlegung des Verhaltens des Assistenten |
| `context` | string | Ja | Die Benutzernachricht oder der Kontext, der an das Modell gesendet werden soll |
| `apiKey` | string | Nein | API-Schlüssel für den Anbieter \(verwendet Plattform-Schlüssel, falls nicht für gehostete Modelle angegeben\) |
| `temperature` | number | Nein | Temperatur für die Antwortgenerierung \(0-2\) |
| `maxTokens` | number | Nein | Maximale Anzahl von Tokens in der Antwort |
| `azureEndpoint` | string | Nein | Azure OpenAI-Endpunkt-URL |
| `azureApiVersion` | string | Nein | Azure OpenAI API-Version |
| `azureApiVersion` | string | Nein | Azure OpenAI-API-Version |
| `vertexProject` | string | Nein | Google Cloud-Projekt-ID für Vertex AI |
| `vertexLocation` | string | Nein | Google Cloud-Standort für Vertex AI \(Standard: us-central1\) |
#### Ausgabe

View File

@@ -106,26 +106,24 @@ Different block types produce different output structures. Here's what you can e
<Tab>
```json
{
"content": "Original content passed through",
"conditionResult": true,
"selectedPath": {
"blockId": "2acd9007-27e8-4510-a487-73d3b825e7c1",
"blockType": "agent",
"blockTitle": "Follow-up Agent"
},
"selectedConditionId": "condition-1"
"selectedOption": "condition-1"
}
```
### Condition Block Output Fields
- **content**: The original content passed through
- **conditionResult**: Boolean result of the condition evaluation
- **selectedPath**: Information about the selected path
- **blockId**: ID of the next block in the selected path
- **blockType**: Type of the next block
- **blockTitle**: Title of the next block
- **selectedConditionId**: ID of the selected condition
- **selectedOption**: ID of the selected condition
</Tab>
<Tab>

View File

@@ -80,6 +80,7 @@
"sendgrid",
"sentry",
"serper",
"servicenow",
"sftp",
"sharepoint",
"shopify",

View File

@@ -0,0 +1,129 @@
---
title: ServiceNow
description: Create, read, update, and delete ServiceNow records
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="servicenow"
color="#032D42"
/>
{/* MANUAL-CONTENT-START:intro */}
[ServiceNow](https://www.servicenow.com/) is a powerful cloud platform designed to streamline and automate IT service management (ITSM), workflows, and business processes across your organization. ServiceNow enables you to manage incidents, requests, tasks, users, and more using its extensive API.
With ServiceNow, you can:
- **Automate IT workflows**: Create, read, update, and delete records in any ServiceNow table, such as incidents, tasks, change requests, and users.
- **Integrate systems**: Connect ServiceNow with your other tools and processes for seamless automation.
- **Maintain a single source of truth**: Keep all your service and operations data organized and accessible.
- **Drive operational efficiency**: Reduce manual work and improve service quality with customizable workflows and automation.
In Sim, the ServiceNow integration enables your agents to interact directly with your ServiceNow instance as part of their workflows. Agents can create, read, update, or delete records in any ServiceNow table and leverage ticket or user data for sophisticated automation and decision-making. This integration bridges your workflow automation and IT operations, empowering your agents to manage service requests, incidents, users, and assets without manual intervention. By connecting Sim with ServiceNow, you can automate service management tasks, improve response times, and ensure consistent, secure access to your organization's vital service data.
{/* MANUAL-CONTENT-END */}
## Usage Instructions
Integrate ServiceNow into your workflow. Create, read, update, and delete records in any ServiceNow table including incidents, tasks, change requests, users, and more.
## Tools
### `servicenow_create_record`
Create a new record in a ServiceNow table
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Yes | ServiceNow instance URL \(e.g., https://instance.service-now.com\) |
| `username` | string | Yes | ServiceNow username |
| `password` | string | Yes | ServiceNow password |
| `tableName` | string | Yes | Table name \(e.g., incident, task, sys_user\) |
| `fields` | json | Yes | Fields to set on the record \(JSON object\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `record` | json | Created ServiceNow record with sys_id and other fields |
| `metadata` | json | Operation metadata |
### `servicenow_read_record`
Read records from a ServiceNow table
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Yes | ServiceNow instance URL \(e.g., https://instance.service-now.com\) |
| `username` | string | Yes | ServiceNow username |
| `password` | string | Yes | ServiceNow password |
| `tableName` | string | Yes | Table name |
| `sysId` | string | No | Specific record sys_id |
| `number` | string | No | Record number \(e.g., INC0010001\) |
| `query` | string | No | Encoded query string \(e.g., "active=true^priority=1"\) |
| `limit` | number | No | Maximum number of records to return |
| `fields` | string | No | Comma-separated list of fields to return |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `records` | array | Array of ServiceNow records |
| `metadata` | json | Operation metadata |
### `servicenow_update_record`
Update an existing record in a ServiceNow table
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Yes | ServiceNow instance URL \(e.g., https://instance.service-now.com\) |
| `username` | string | Yes | ServiceNow username |
| `password` | string | Yes | ServiceNow password |
| `tableName` | string | Yes | Table name |
| `sysId` | string | Yes | Record sys_id to update |
| `fields` | json | Yes | Fields to update \(JSON object\) |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `record` | json | Updated ServiceNow record |
| `metadata` | json | Operation metadata |
### `servicenow_delete_record`
Delete a record from a ServiceNow table
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Yes | ServiceNow instance URL \(e.g., https://instance.service-now.com\) |
| `username` | string | Yes | ServiceNow username |
| `password` | string | Yes | ServiceNow password |
| `tableName` | string | Yes | Table name |
| `sysId` | string | Yes | Record sys_id to delete |
#### Output
| Parameter | Type | Description |
| --------- | ---- | ----------- |
| `success` | boolean | Whether the deletion was successful |
| `metadata` | json | Operation metadata |
## Notes
- Category: `tools`
- Type: `servicenow`

View File

@@ -50,6 +50,8 @@ Send a chat completion request to any supported LLM provider
| `maxTokens` | number | No | Maximum tokens in the response |
| `azureEndpoint` | string | No | Azure OpenAI endpoint URL |
| `azureApiVersion` | string | No | Azure OpenAI API version |
| `vertexProject` | string | No | Google Cloud project ID for Vertex AI |
| `vertexLocation` | string | No | Google Cloud location for Vertex AI \(defaults to us-central1\) |
#### Output

View File

@@ -111,26 +111,24 @@ Diferentes tipos de bloques producen diferentes estructuras de salida. Esto es l
```json
{
"content": "Original content passed through",
"conditionResult": true,
"selectedPath": {
"blockId": "2acd9007-27e8-4510-a487-73d3b825e7c1",
"blockType": "agent",
"blockTitle": "Follow-up Agent"
},
"selectedConditionId": "condition-1"
"selectedOption": "condition-1"
}
```
### Campos de salida del bloque de condición
- **content**: El contenido original que se transmite
- **conditionResult**: Resultado booleano de la evaluación de la condición
- **selectedPath**: Información sobre la ruta seleccionada
- **conditionResult**: resultado booleano de la evaluación de la condición
- **selectedPath**: información sobre la ruta seleccionada
- **blockId**: ID del siguiente bloque en la ruta seleccionada
- **blockType**: Tipo del siguiente bloque
- **blockTitle**: Título del siguiente bloque
- **selectedConditionId**: ID de la condición seleccionada
- **blockType**: tipo del siguiente bloque
- **blockTitle**: título del siguiente bloque
- **selectedOption**: ID de la condición seleccionada
</Tab>
<Tab>

View File

@@ -0,0 +1,124 @@
---
title: ServiceNow
description: Crear, leer, actualizar y eliminar registros de ServiceNow
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="servicenow"
color="#032D42"
/>
{/* MANUAL-CONTENT-START:intro */}
[ServiceNow](https://www.servicenow.com/) es una potente plataforma en la nube diseñada para optimizar y automatizar la gestión de servicios de TI (ITSM), flujos de trabajo y procesos empresariales en toda tu organización. ServiceNow te permite gestionar incidencias, solicitudes, tareas, usuarios y más utilizando su amplia API.
Con ServiceNow, puedes:
- **Automatizar flujos de trabajo de TI**: crear, leer, actualizar y eliminar registros en cualquier tabla de ServiceNow, como incidencias, tareas, solicitudes de cambio y usuarios.
- **Integrar sistemas**: conectar ServiceNow con tus otras herramientas y procesos para una automatización fluida.
- **Mantener una única fuente de verdad**: mantener todos tus datos de servicio y operaciones organizados y accesibles.
- **Impulsar la eficiencia operativa**: reducir el trabajo manual y mejorar la calidad del servicio con flujos de trabajo personalizables y automatización.
En Sim, la integración de ServiceNow permite que tus agentes interactúen directamente con tu instancia de ServiceNow como parte de sus flujos de trabajo. Los agentes pueden crear, leer, actualizar o eliminar registros en cualquier tabla de ServiceNow y aprovechar datos de tickets o usuarios para automatización y toma de decisiones sofisticadas. Esta integración conecta tu automatización de flujos de trabajo y operaciones de TI, permitiendo que tus agentes gestionen solicitudes de servicio, incidencias, usuarios y activos sin intervención manual. Al conectar Sim con ServiceNow, puedes automatizar tareas de gestión de servicios, mejorar los tiempos de respuesta y garantizar un acceso consistente y seguro a los datos de servicio vitales de tu organización.
{/* MANUAL-CONTENT-END */}
## Instrucciones de uso
Integra ServiceNow en tu flujo de trabajo. Crea, lee, actualiza y elimina registros en cualquier tabla de ServiceNow, incluyendo incidencias, tareas, solicitudes de cambio, usuarios y más.
## Herramientas
### `servicenow_create_record`
Crear un nuevo registro en una tabla de ServiceNow
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Sí | URL de la instancia de ServiceNow \(p. ej., https://instance.service-now.com\) |
| `username` | string | Sí | Nombre de usuario de ServiceNow |
| `password` | string | Sí | Contraseña de ServiceNow |
| `tableName` | string | Sí | Nombre de la tabla \(p. ej., incident, task, sys_user\) |
| `fields` | json | Sí | Campos a establecer en el registro \(objeto JSON\) |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `record` | json | Registro de ServiceNow creado con sys_id y otros campos |
| `metadata` | json | Metadatos de la operación |
### `servicenow_read_record`
Leer registros de una tabla de ServiceNow
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Sí | URL de la instancia de ServiceNow \(p. ej., https://instance.service-now.com\) |
| `username` | string | Sí | Nombre de usuario de ServiceNow |
| `password` | string | Sí | Contraseña de ServiceNow |
| `tableName` | string | Sí | Nombre de la tabla |
| `sysId` | string | No | sys_id del registro específico |
| `number` | string | No | Número de registro \(p. ej., INC0010001\) |
| `query` | string | No | Cadena de consulta codificada \(p. ej., "active=true^priority=1"\) |
| `limit` | number | No | Número máximo de registros a devolver |
| `fields` | string | No | Lista de campos separados por comas a devolver |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `records` | array | Array de registros de ServiceNow |
| `metadata` | json | Metadatos de la operación |
### `servicenow_update_record`
Actualiza un registro existente en una tabla de ServiceNow
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Sí | URL de la instancia de ServiceNow \(ej., https://instance.service-now.com\) |
| `username` | string | Sí | Nombre de usuario de ServiceNow |
| `password` | string | Sí | Contraseña de ServiceNow |
| `tableName` | string | Sí | Nombre de la tabla |
| `sysId` | string | Sí | sys_id del registro a actualizar |
| `fields` | json | Sí | Campos a actualizar \(objeto JSON\) |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `record` | json | Registro de ServiceNow actualizado |
| `metadata` | json | Metadatos de la operación |
### `servicenow_delete_record`
Elimina un registro de una tabla de ServiceNow
#### Entrada
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Sí | URL de la instancia de ServiceNow \(ej., https://instance.service-now.com\) |
| `username` | string | Sí | Nombre de usuario de ServiceNow |
| `password` | string | Sí | Contraseña de ServiceNow |
| `tableName` | string | Sí | Nombre de la tabla |
| `sysId` | string | Sí | sys_id del registro a eliminar |
#### Salida
| Parámetro | Tipo | Descripción |
| --------- | ---- | ----------- |
| `success` | boolean | Si la eliminación fue exitosa |
| `metadata` | json | Metadatos de la operación |
## Notas
- Categoría: `tools`
- Tipo: `servicenow`

View File

@@ -37,16 +37,18 @@ Envía una solicitud de completado de chat a cualquier proveedor de LLM compatib
#### Entrada
| Parámetro | Tipo | Obligatorio | Descripción |
| Parámetro | Tipo | Requerido | Descripción |
| --------- | ---- | -------- | ----------- |
| `model` | string | Sí | El modelo a utilizar \(p. ej., gpt-4o, claude-sonnet-4-5, gemini-2.0-flash\) |
| `model` | string | Sí | El modelo a utilizar \(ej., gpt-4o, claude-sonnet-4-5, gemini-2.0-flash\) |
| `systemPrompt` | string | No | Prompt del sistema para establecer el comportamiento del asistente |
| `context` | string | Sí | El mensaje del usuario o contexto para enviar al modelo |
| `apiKey` | string | No | Clave API para el proveedor \(usa la clave de la plataforma si no se proporciona para modelos alojados\) |
| `context` | string | Sí | El mensaje del usuario o contexto a enviar al modelo |
| `apiKey` | string | No | Clave API del proveedor \(usa la clave de la plataforma si no se proporciona para modelos alojados\) |
| `temperature` | number | No | Temperatura para la generación de respuestas \(0-2\) |
| `maxTokens` | number | No | Tokens máximos en la respuesta |
| `azureEndpoint` | string | No | URL del endpoint de Azure OpenAI |
| `azureApiVersion` | string | No | Versión de la API de Azure OpenAI |
| `vertexProject` | string | No | ID del proyecto de Google Cloud para Vertex AI |
| `vertexLocation` | string | No | Ubicación de Google Cloud para Vertex AI \(por defecto us-central1\) |
#### Salida

View File

@@ -111,26 +111,24 @@ Différents types de blocs produisent différentes structures de sortie. Voici c
```json
{
"content": "Original content passed through",
"conditionResult": true,
"selectedPath": {
"blockId": "2acd9007-27e8-4510-a487-73d3b825e7c1",
"blockType": "agent",
"blockTitle": "Follow-up Agent"
},
"selectedConditionId": "condition-1"
"selectedOption": "condition-1"
}
```
### Champs de sortie du bloc de condition
- **content** : le contenu original transmis
- **conditionResult** : résultat booléen de l'évaluation de la condition
- **selectedPath** : informations sur le chemin sélectionné
- **blockId** : ID du bloc suivant dans le chemin sélectionné
- **blockType** : type du bloc suivant
- **blockTitle** : titre du bloc suivant
- **selectedConditionId** : ID de la condition sélectionnée
- **selectedOption** : ID de la condition sélectionnée
</Tab>
<Tab>

View File

@@ -0,0 +1,124 @@
---
title: ServiceNow
description: Créer, lire, mettre à jour et supprimer des enregistrements ServiceNow
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="servicenow"
color="#032D42"
/>
{/* MANUAL-CONTENT-START:intro */}
[ServiceNow](https://www.servicenow.com/) est une plateforme cloud puissante conçue pour rationaliser et automatiser la gestion des services informatiques (ITSM), les workflows et les processus métier au sein de votre organisation. ServiceNow vous permet de gérer les incidents, les demandes, les tâches, les utilisateurs et bien plus encore grâce à son API étendue.
Avec ServiceNow, vous pouvez :
- **Automatiser les workflows informatiques** : créer, lire, mettre à jour et supprimer des enregistrements dans n'importe quelle table ServiceNow, tels que les incidents, les tâches, les demandes de changement et les utilisateurs.
- **Intégrer les systèmes** : connecter ServiceNow avec vos autres outils et processus pour une automatisation transparente.
- **Maintenir une source unique de vérité** : garder toutes vos données de service et d'exploitation organisées et accessibles.
- **Améliorer l'efficacité opérationnelle** : réduire le travail manuel et améliorer la qualité du service grâce à des workflows personnalisables et à l'automatisation.
Dans Sim, l'intégration ServiceNow permet à vos agents d'interagir directement avec votre instance ServiceNow dans le cadre de leurs workflows. Les agents peuvent créer, lire, mettre à jour ou supprimer des enregistrements dans n'importe quelle table ServiceNow et exploiter les données de tickets ou d'utilisateurs pour une automatisation et une prise de décision sophistiquées. Cette intégration relie votre automatisation de workflow et vos opérations informatiques, permettant à vos agents de gérer les demandes de service, les incidents, les utilisateurs et les actifs sans intervention manuelle. En connectant Sim avec ServiceNow, vous pouvez automatiser les tâches de gestion des services, améliorer les temps de réponse et garantir un accès cohérent et sécurisé aux données de service vitales de votre organisation.
{/* MANUAL-CONTENT-END */}
## Instructions d'utilisation
Intégrez ServiceNow dans votre workflow. Créez, lisez, mettez à jour et supprimez des enregistrements dans n'importe quelle table ServiceNow, y compris les incidents, les tâches, les demandes de changement, les utilisateurs et bien plus encore.
## Outils
### `servicenow_create_record`
Créer un nouvel enregistrement dans une table ServiceNow
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Oui | URL de l'instance ServiceNow (par ex., https://instance.service-now.com) |
| `username` | string | Oui | Nom d'utilisateur ServiceNow |
| `password` | string | Oui | Mot de passe ServiceNow |
| `tableName` | string | Oui | Nom de la table (par ex., incident, task, sys_user) |
| `fields` | json | Oui | Champs à définir sur l'enregistrement (objet JSON) |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `record` | json | Enregistrement ServiceNow créé avec sys_id et autres champs |
| `metadata` | json | Métadonnées de l'opération |
### `servicenow_read_record`
Lire des enregistrements d'une table ServiceNow
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Oui | URL de l'instance ServiceNow (par ex., https://instance.service-now.com) |
| `username` | string | Oui | Nom d'utilisateur ServiceNow |
| `password` | string | Oui | Mot de passe ServiceNow |
| `tableName` | string | Oui | Nom de la table |
| `sysId` | string | Non | sys_id d'enregistrement spécifique |
| `number` | string | Non | Numéro d'enregistrement (par ex., INC0010001) |
| `query` | string | Non | Chaîne de requête encodée (par ex., "active=true^priority=1") |
| `limit` | number | Non | Nombre maximum d'enregistrements à retourner |
| `fields` | string | Non | Liste de champs à retourner, séparés par des virgules |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `records` | array | Tableau d'enregistrements ServiceNow |
| `metadata` | json | Métadonnées de l'opération |
### `servicenow_update_record`
Mettre à jour un enregistrement existant dans une table ServiceNow
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Oui | URL de l'instance ServiceNow \(par exemple, https://instance.service-now.com\) |
| `username` | string | Oui | Nom d'utilisateur ServiceNow |
| `password` | string | Oui | Mot de passe ServiceNow |
| `tableName` | string | Oui | Nom de la table |
| `sysId` | string | Oui | sys_id de l'enregistrement à mettre à jour |
| `fields` | json | Oui | Champs à mettre à jour \(objet JSON\) |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `record` | json | Enregistrement ServiceNow mis à jour |
| `metadata` | json | Métadonnées de l'opération |
### `servicenow_delete_record`
Supprimer un enregistrement d'une table ServiceNow
#### Entrée
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | Oui | URL de l'instance ServiceNow \(par exemple, https://instance.service-now.com\) |
| `username` | string | Oui | Nom d'utilisateur ServiceNow |
| `password` | string | Oui | Mot de passe ServiceNow |
| `tableName` | string | Oui | Nom de la table |
| `sysId` | string | Oui | sys_id de l'enregistrement à supprimer |
#### Sortie
| Paramètre | Type | Description |
| --------- | ---- | ----------- |
| `success` | boolean | Indique si la suppression a réussi |
| `metadata` | json | Métadonnées de l'opération |
## Remarques
- Catégorie : `tools`
- Type : `servicenow`

View File

@@ -37,16 +37,18 @@ Envoyez une requête de complétion de chat à n'importe quel fournisseur de LLM
#### Entrée
| Paramètre | Type | Obligatoire | Description |
| --------- | ---- | ---------- | ----------- |
| `model` | chaîne | Oui | Le modèle à utiliser (ex. : gpt-4o, claude-sonnet-4-5, gemini-2.0-flash) |
| `systemPrompt` | chaîne | Non | Instruction système pour définir le comportement de l'assistant |
| `context` | chaîne | Oui | Le message utilisateur ou le contexte à envoyer au modèle |
| `apiKey` | chaîne | Non | Clé API pour le fournisseur (utilise la clé de plateforme si non fournie pour les modèles hébergés) |
| `temperature` | nombre | Non | Température pour la génération de réponse (0-2) |
| `maxTokens` | nombre | Non | Nombre maximum de tokens dans la réponse |
| `azureEndpoint` | chaîne | Non | URL du point de terminaison Azure OpenAI |
| `azureApiVersion` | chaîne | Non | Version de l'API Azure OpenAI |
| Paramètre | Type | Requis | Description |
| --------- | ---- | -------- | ----------- |
| `model` | string | Oui | Le modèle à utiliser \(par exemple, gpt-4o, claude-sonnet-4-5, gemini-2.0-flash\) |
| `systemPrompt` | string | Non | Prompt système pour définir le comportement de l'assistant |
| `context` | string | Oui | Le message utilisateur ou le contexte à envoyer au modèle |
| `apiKey` | string | Non | Clé API pour le fournisseur \(utilise la clé de la plateforme si non fournie pour les modèles hébergés\) |
| `temperature` | number | Non | Température pour la génération de réponse \(0-2\) |
| `maxTokens` | number | Non | Nombre maximum de tokens dans la réponse |
| `azureEndpoint` | string | Non | URL du point de terminaison Azure OpenAI |
| `azureApiVersion` | string | Non | Version de l'API Azure OpenAI |
| `vertexProject` | string | Non | ID du projet Google Cloud pour Vertex AI |
| `vertexLocation` | string | Non | Emplacement Google Cloud pour Vertex AI \(par défaut us-central1\) |
#### Sortie

View File

@@ -110,26 +110,24 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
```json
{
"content": "Original content passed through",
"conditionResult": true,
"selectedPath": {
"blockId": "2acd9007-27e8-4510-a487-73d3b825e7c1",
"blockType": "agent",
"blockTitle": "Follow-up Agent"
},
"selectedConditionId": "condition-1"
"selectedOption": "condition-1"
}
```
### 条件ブロックの出力フィールド
- **content**: そのまま渡される元のコンテンツ
- **conditionResult**: 条件評価の真偽値結果
- **selectedPath**: 選択されたパスに関する情報
- **blockId**: 選択されたパスの次のブロックのID
- **blockType**: 次のブロックのタイプ
- **blockTitle**: 次のブロックのタイトル
- **selectedConditionId**: 選択された条件のID
- **selectedOption**: 選択された条件のID
</Tab>
<Tab>

View File

@@ -0,0 +1,124 @@
---
title: ServiceNow
description: ServiceNowレコードの作成、読み取り、更新、削除
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="servicenow"
color="#032D42"
/>
{/* MANUAL-CONTENT-START:intro */}
[ServiceNow](https://www.servicenow.com/)は、組織全体のITサービス管理ITSM、ワークフロー、ビジネスプロセスを効率化し自動化するために設計された強力なクラウドプラットフォームです。ServiceNowを使用すると、広範なAPIを使用してインシデント、リクエスト、タスク、ユーザーなどを管理できます。
ServiceNowでは、次のことができます。
- **ITワークフローの自動化**: インシデント、タスク、変更リクエスト、ユーザーなど、任意のServiceNowテーブルのレコードを作成、読み取り、更新、削除します。
- **システムの統合**: ServiceNowを他のツールやプロセスと接続して、シームレスな自動化を実現します。
- **単一の信頼できる情報源の維持**: すべてのサービスおよび運用データを整理してアクセス可能な状態に保ちます。
- **運用効率の向上**: カスタマイズ可能なワークフローと自動化により、手作業を削減し、サービス品質を向上させます。
Simでは、ServiceNow統合により、エージェントがワークフローの一部としてServiceNowインスタンスと直接やり取りできるようになります。エージェントは、任意のServiceNowテーブルのレコードを作成、読み取り、更新、削除でき、チケットやユーザーデータを活用して高度な自動化と意思決定を行うことができます。この統合により、ワークフロー自動化とIT運用が橋渡しされ、エージェントは手動介入なしでサービスリクエスト、インシデント、ユーザー、資産を管理できるようになります。SimとServiceNowを接続することで、サービス管理タスクを自動化し、応答時間を改善し、組織の重要なサービスデータへの一貫性のある安全なアクセスを確保できます。
{/* MANUAL-CONTENT-END */}
## 使用方法
ServiceNowをワークフローに統合します。インシデント、タスク、変更リクエスト、ユーザーなど、任意のServiceNowテーブルのレコードを作成、読み取り、更新、削除します。
## ツール
### `servicenow_create_record`
ServiceNowテーブルに新しいレコードを作成
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | はい | ServiceNowインスタンスURL(例: https://instance.service-now.com) |
| `username` | string | はい | ServiceNowユーザー名 |
| `password` | string | はい | ServiceNowパスワード |
| `tableName` | string | はい | テーブル名(例: incident、task、sys_user) |
| `fields` | json | はい | レコードに設定するフィールド(JSONオブジェクト) |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `record` | json | sys_idおよびその他のフィールドを含む作成されたServiceNowレコード |
| `metadata` | json | 操作メタデータ |
### `servicenow_read_record`
ServiceNowテーブルからレコードを読み取ります
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | はい | ServiceNowインスタンスURL(例: https://instance.service-now.com) |
| `username` | string | はい | ServiceNowユーザー名 |
| `password` | string | はい | ServiceNowパスワード |
| `tableName` | string | はい | テーブル名 |
| `sysId` | string | いいえ | 特定のレコードのsys_id |
| `number` | string | いいえ | レコード番号(例: INC0010001) |
| `query` | string | いいえ | エンコードされたクエリ文字列(例: "active=true^priority=1") |
| `limit` | number | いいえ | 返すレコードの最大数 |
| `fields` | string | いいえ | 返すフィールドのカンマ区切りリスト |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `records` | array | ServiceNowレコードの配列 |
| `metadata` | json | 操作メタデータ |
### `servicenow_update_record`
ServiceNowテーブル内の既存のレコードを更新
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | はい | ServiceNowインスタンスURLhttps://instance.service-now.com |
| `username` | string | はい | ServiceNowユーザー名 |
| `password` | string | はい | ServiceNowパスワード |
| `tableName` | string | はい | テーブル名 |
| `sysId` | string | はい | 更新するレコードのsys_id |
| `fields` | json | はい | 更新するフィールドJSONオブジェクト |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `record` | json | 更新されたServiceNowレコード |
| `metadata` | json | 操作メタデータ |
### `servicenow_delete_record`
ServiceNowテーブルからレコードを削除
#### 入力
| パラメータ | 型 | 必須 | 説明 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | はい | ServiceNowインスタンスURLhttps://instance.service-now.com |
| `username` | string | はい | ServiceNowユーザー名 |
| `password` | string | はい | ServiceNowパスワード |
| `tableName` | string | はい | テーブル名 |
| `sysId` | string | はい | 削除するレコードのsys_id |
#### 出力
| パラメータ | 型 | 説明 |
| --------- | ---- | ----------- |
| `success` | boolean | 削除が成功したかどうか |
| `metadata` | json | 操作メタデータ |
## 注意事項
- カテゴリ: `tools`
- タイプ: `servicenow`

View File

@@ -42,11 +42,13 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
| `model` | string | はい | 使用するモデルgpt-4o、claude-sonnet-4-5、gemini-2.0-flash |
| `systemPrompt` | string | いいえ | アシスタントの動作を設定するシステムプロンプト |
| `context` | string | はい | モデルに送信するユーザーメッセージまたはコンテキスト |
| `apiKey` | string | いいえ | プロバイダーのAPIキーホストされたモデルの場合、提供されなければプラットフォームキーを使用) |
| `apiKey` | string | いいえ | プロバイダーのAPIキーホストされたモデルの場合、提供されない場合はプラットフォームキーを使用) |
| `temperature` | number | いいえ | レスポンス生成の温度0-2 |
| `maxTokens` | number | いいえ | レスポンスの最大トークン数 |
| `azureEndpoint` | string | いいえ | Azure OpenAIエンドポイントURL |
| `azureApiVersion` | string | いいえ | Azure OpenAI APIバージョン |
| `vertexProject` | string | いいえ | Vertex AI用のGoogle CloudプロジェクトID |
| `vertexLocation` | string | いいえ | Vertex AI用のGoogle Cloudロケーションデフォルトはus-central1 |
#### 出力

View File

@@ -110,26 +110,24 @@ import { Tab, Tabs } from 'fumadocs-ui/components/tabs'
```json
{
"content": "Original content passed through",
"conditionResult": true,
"selectedPath": {
"blockId": "2acd9007-27e8-4510-a487-73d3b825e7c1",
"blockType": "agent",
"blockTitle": "Follow-up Agent"
},
"selectedConditionId": "condition-1"
"selectedOption": "condition-1"
}
```
### 条件模块输出字段
- **content**:传递的原始内容
- **conditionResult**:条件评估的布尔结果
- **selectedPath**:关于选定路径的信息
- **blockId**:选定路径中下一个块的 ID
- **blockType**:下一个块的类型
- **blockTitle**:下一个模块的标题
- **selectedConditionId**:选定条件的 ID
- **conditionResult**:条件判断的布尔值结果
- **selectedPath**:所选路径的信息
- **blockId**:所选路径下一个区块的 ID
- **blockType**下一个块的类型
- **blockTitle**:下一个块的标题
- **selectedOption**:所选条件的 ID
</Tab>
<Tab>

View File

@@ -0,0 +1,124 @@
---
title: ServiceNow
description: 创建、读取、更新和删除 ServiceNow 记录
---
import { BlockInfoCard } from "@/components/ui/block-info-card"
<BlockInfoCard
type="servicenow"
color="#032D42"
/>
{/* MANUAL-CONTENT-START:intro */}
[ServiceNow](https://www.servicenow.com/) 是一款强大的云平台,旨在简化和自动化 IT 服务管理ITSM、工作流以及企业各类业务流程。ServiceNow 让您能够通过其强大的 API 管理事件、请求、任务、用户等多种内容。
使用 ServiceNow您可以
- **自动化 IT 工作流**:在任意 ServiceNow 表中创建、读取、更新和删除记录,如事件、任务、变更请求和用户等。
- **集成系统**:将 ServiceNow 与您的其他工具和流程连接,实现无缝自动化。
- **维护单一数据源**:让所有服务和运营数据井然有序,便于访问。
- **提升运营效率**:通过可定制的工作流和自动化,减少手动操作,提高服务质量。
在 Sim 中ServiceNow 集成让您的代理能够在工作流中直接与 ServiceNow 实例交互。代理可以在任意 ServiceNow 表中创建、读取、更新或删除记录,并利用工单或用户数据实现复杂的自动化和决策。这一集成将您的工作流自动化与 IT 运维无缝衔接,使代理能够自动化管理服务请求、事件、用户和资产,无需人工干预。通过将 Sim 与 ServiceNow 连接,您可以自动化服务管理任务、提升响应速度,并确保对组织关键服务数据的持续、安全访问。
{/* MANUAL-CONTENT-END */}
## 使用说明
将 ServiceNow 集成到您的工作流中。在任意 ServiceNow 表(包括事件、任务、变更请求、用户等)中创建、读取、更新和删除记录。
## 工具
### `servicenow_create_record`
在 ServiceNow 表中创建新记录
#### 输入
| 参数 | 类型 | 是否必填 | 描述 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | 是 | ServiceNow 实例 URL例如https://instance.service-now.com |
| `username` | string | 是 | ServiceNow 用户名 |
| `password` | string | 是 | ServiceNow 密码 |
| `tableName` | string | 是 | 表名例如incident、task、sys_user |
| `fields` | json | 是 | 记录中要设置的字段JSON 对象) |
#### 输出
| 参数 | 类型 | 描述 |
| --------- | ---- | ----------- |
| `record` | json | 创建的 ServiceNow 记录,包含 sys_id 及其他字段 |
| `metadata` | json | 操作元数据 |
### `servicenow_read_record`
从 ServiceNow 表中读取记录
#### 输入
| 参数 | 类型 | 是否必填 | 描述 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | 是 | ServiceNow 实例 URL例如https://instance.service-now.com |
| `username` | string | 是 | ServiceNow 用户名 |
| `password` | string | 是 | ServiceNow 密码 |
| `tableName` | string | 是 | 表名 |
| `sysId` | string | 否 | 指定记录 sys_id |
| `number` | string | 否 | 记录编号例如INC0010001 |
| `query` | string | 否 | 编码查询字符串(例如:"active=true^priority=1" |
| `limit` | number | 否 | 返回的最大记录数 |
| `fields` | string | 否 | 要返回的字段列表(以逗号分隔) |
#### 输出
| 参数 | 类型 | 说明 |
| --------- | ---- | ----------- |
| `records` | array | ServiceNow 记录数组 |
| `metadata` | json | 操作元数据 |
### `servicenow_update_record`
更新 ServiceNow 表中的现有记录
#### 输入
| 参数 | 类型 | 必填 | 说明 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | 是 | ServiceNow 实例 URL例如https://instance.service-now.com |
| `username` | string | 是 | ServiceNow 用户名 |
| `password` | string | 是 | ServiceNow 密码 |
| `tableName` | string | 是 | 表名 |
| `sysId` | string | 是 | 要更新的记录 sys_id |
| `fields` | json | 是 | 要更新的字段JSON 对象) |
#### 输出
| 参数 | 类型 | 说明 |
| --------- | ---- | ----------- |
| `record` | json | 已更新的 ServiceNow 记录 |
| `metadata` | json | 操作元数据 |
### `servicenow_delete_record`
从 ServiceNow 表中删除记录
#### 输入
| 参数 | 类型 | 必填 | 说明 |
| --------- | ---- | -------- | ----------- |
| `instanceUrl` | string | 是 | ServiceNow 实例 URL例如https://instance.service-now.com |
| `username` | string | 是 | ServiceNow 用户名 |
| `password` | string | 是 | ServiceNow 密码 |
| `tableName` | string | 是 | 表名 |
| `sysId` | string | 是 | 要删除的记录 sys_id |
#### 输出
| 参数 | 类型 | 描述 |
| --------- | ---- | ----------- |
| `success` | boolean | 删除是否成功 |
| `metadata` | json | 操作元数据 |
## 备注
- 分类:`tools`
- 类型:`servicenow`

View File

@@ -37,16 +37,18 @@ import { BlockInfoCard } from "@/components/ui/block-info-card"
#### 输入
| 参数 | 类型 | 必 | 描述 |
| 参数 | 类型 | 必 | 说明 |
| --------- | ---- | -------- | ----------- |
| `model` | string | 是 | 要使用的模型 \(例如gpt-4o、claude-sonnet-4-5、gemini-2.0-flash\) |
| `systemPrompt` | string | 否 | 设置助手行为的系统提示 |
| `context` | string | 是 | 发送给模型的用户消息或上下文 |
| `apiKey` | string | 否 | 提供的 API 密钥 \(如果未为托管模型提供,则使用平台密钥\) |
| `temperature` | number | 否 | 响应生成的温度 \(0-2\) |
| `maxTokens` | number | 否 | 响应的最大令牌数 |
| `azureEndpoint` | string | 否 | Azure OpenAI 端点 URL |
| `model` | string | 是 | 要使用的模型例如 gpt-4o、claude-sonnet-4-5、gemini-2.0-flash |
| `systemPrompt` | string | 否 | 设置助手行为的 system prompt |
| `context` | string | 是 | 发送给模型的用户消息或上下文 |
| `apiKey` | string | 否 | 提供的 API key如未提供托管模型将使用平台密钥 |
| `temperature` | number | 否 | 响应生成的 temperature0-2 |
| `maxTokens` | number | 否 | 响应的最大 tokens 数 |
| `azureEndpoint` | string | 否 | Azure OpenAI endpoint URL |
| `azureApiVersion` | string | 否 | Azure OpenAI API 版本 |
| `vertexProject` | string | 否 | Vertex AI 的 Google Cloud 项目 ID |
| `vertexLocation` | string | 否 | Vertex AI 的 Google Cloud 区域(默认为 us-central1 |
#### 输出

View File

@@ -557,7 +557,7 @@ checksums:
content/8: 6325adefb6e1520835225285b18b6a45
content/9: b7fa85fce9c7476fe132df189e27dac1
content/10: 371d0e46b4bd2c23f559b8bc112f6955
content/11: 985f435f721b00df4d13fa0a5552684c
content/11: 7ad14ccfe548588081626cfe769ad492
content/12: bcadfc362b69078beee0088e5936c98b
content/13: 6af66efd0da20944a87fdb8d9defa358
content/14: b3f310d5ef115bea5a8b75bf25d7ea9a
@@ -4811,9 +4811,9 @@ checksums:
content/19: 85547efea8ae0e8170ac4e2030f6be25
content/20: 25c56dcdc4af1516c3fbf9d82d96b48d
content/21: 56dbe63da14a319cd520ab1615c94be7
content/22: e092cde0c92ef09c642a62636e7e3ae3
content/22: e039f6c905c8aa148cc3e7af19f05239
content/23: c7004f5db8f7134d7e3a36a1916691a2
content/24: bbc26961050b132b9bc4f14ba11f407a
content/24: 26555018b90fc8fb3ac65cece15f3966
content/25: 56dbe63da14a319cd520ab1615c94be7
content/26: 3e835ecc38acf2c76179034360d41670
content/27: a13bbc3dac7388e1ef4e9cbafdcc8241
@@ -49822,3 +49822,41 @@ checksums:
content/472: dbc5fceeefb3ab5fa505394becafef4e
content/473: b3f310d5ef115bea5a8b75bf25d7ea9a
content/474: 27c398e669b297cea076e4ce4cc0c5eb
9a28da736b42bf8de55126d4c06b6150:
meta/title: 418d5c8a18ad73520b38765741601f32
meta/description: 41cb31abf94297849fb8a4023cf0211d
content/0: 1b031fb0c62c46b177aeed5c3d3f8f80
content/1: e72670f88454b5b1c955b029de5fa8b5
content/2: d586e5af506d99add847369c0accfb4d
content/3: a2ce9ed4954ab55bcebed927cec8e890
content/4: 5fc7b723a6adcf201e8deb3f5ed9a9e3
content/5: a78981875c359a3343f26ed4d115f899
content/6: 821e6394b0a953e2b0842b04ae8f3105
content/7: 56a538eaccb1158fb1f7a01cc32f7331
content/8: 9c8aa3f09c9b2bd50ea4cdff3598ea4e
content/9: 263633aee6db9332de806ae50d87de05
content/10: 5a7e2171e5f73fec5eae21a50e5de661
content/11: 371d0e46b4bd2c23f559b8bc112f6955
content/12: 5905ef5d0db0354c08394acb0b5cda4b
content/13: bcadfc362b69078beee0088e5936c98b
content/14: d81ef802f80143282cf4e534561a9570
content/15: 02233e6212003c1d121424cfd8b86b62
content/16: efe2c6dd368708de68a1addbfdb11b0c
content/17: 371d0e46b4bd2c23f559b8bc112f6955
content/18: 2722e8bee100e7bc4590fa02710e9508
content/19: bcadfc362b69078beee0088e5936c98b
content/20: 953f353184dc27db1f20156db2a9ad90
content/21: 2011e87d0555cd0ab133ef2d35e7a37b
content/22: dbf08acb413d845ec419e45b1f986bdb
content/23: 371d0e46b4bd2c23f559b8bc112f6955
content/24: afc35de2990ed0e9bb8f98dc1b9609ce
content/25: bcadfc362b69078beee0088e5936c98b
content/26: c06a5bb458242baa23d34957034c2fe7
content/27: ff043e912417bc29ac7c64520160c07d
content/28: 9c2175ab469cb6ff9e62bc8bdcf7621d
content/29: 371d0e46b4bd2c23f559b8bc112f6955
content/30: 20e6bddad8e7f34a3d09e5b0c5678c13
content/31: bcadfc362b69078beee0088e5936c98b
content/32: fd0f38eb3fe5cf95be366a4ff6b4fb90
content/33: b3f310d5ef115bea5a8b75bf25d7ea9a
content/34: 4a7b2c644e487f3d12b6a6b54f8c6773

View File

@@ -573,10 +573,10 @@ export default function LoginPage({
<Dialog open={forgotPasswordOpen} onOpenChange={setForgotPasswordOpen}>
<DialogContent className='auth-card auth-card-shadow max-w-[540px] rounded-[10px] border backdrop-blur-sm'>
<DialogHeader>
<DialogTitle className='auth-text-primary font-semibold text-xl tracking-tight'>
<DialogTitle className='font-semibold text-black text-xl tracking-tight'>
Reset Password
</DialogTitle>
<DialogDescription className='auth-text-secondary text-sm'>
<DialogDescription className='text-muted-foreground text-sm'>
Enter your email address and we'll send you a link to reset your password if your
account exists.
</DialogDescription>

View File

@@ -70,6 +70,7 @@ export const FOOTER_TOOLS = [
'Salesforce',
'SendGrid',
'Serper',
'ServiceNow',
'SharePoint',
'Slack',
'Smtp',

View File

@@ -2,7 +2,6 @@ import { Suspense } from 'react'
import dynamic from 'next/dynamic'
import { Background, Footer, Nav, StructuredData } from '@/app/(landing)/components'
// Lazy load heavy components for better initial load performance
const Hero = dynamic(() => import('@/app/(landing)/components/hero/hero'), {
loading: () => <div className='h-[600px] animate-pulse bg-gray-50' />,
})

View File

@@ -1,8 +1,7 @@
import Image from 'next/image'
import Link from 'next/link'
import { Avatar, AvatarFallback, AvatarImage } from '@/components/ui/avatar'
import { getAllPostMeta } from '@/lib/blog/registry'
import { soehne } from '@/app/_styles/fonts/soehne/soehne'
import { PostGrid } from '@/app/(landing)/studio/post-grid'
export const revalidate = 3600
@@ -18,7 +17,6 @@ export default async function StudioIndex({
const all = await getAllPostMeta()
const filtered = tag ? all.filter((p) => p.tags.includes(tag)) : all
// Sort to ensure featured post is first on page 1
const sorted =
pageNum === 1
? filtered.sort((a, b) => {
@@ -63,69 +61,7 @@ export default async function StudioIndex({
</div> */}
{/* Grid layout for consistent rows */}
<div className='grid grid-cols-1 gap-4 md:grid-cols-2 md:gap-6 lg:grid-cols-3'>
{posts.map((p, i) => {
return (
<Link key={p.slug} href={`/studio/${p.slug}`} className='group flex flex-col'>
<div className='flex h-full flex-col overflow-hidden rounded-xl border border-gray-200 transition-colors duration-300 hover:border-gray-300'>
<Image
src={p.ogImage}
alt={p.title}
width={800}
height={450}
className='h-48 w-full object-cover'
sizes='(max-width: 768px) 100vw, (max-width: 1024px) 50vw, 33vw'
loading='lazy'
unoptimized
/>
<div className='flex flex-1 flex-col p-4'>
<div className='mb-2 text-gray-600 text-xs'>
{new Date(p.date).toLocaleDateString('en-US', {
month: 'short',
day: 'numeric',
year: 'numeric',
})}
</div>
<h3 className='shine-text mb-1 font-medium text-lg leading-tight'>{p.title}</h3>
<p className='mb-3 line-clamp-3 flex-1 text-gray-700 text-sm'>{p.description}</p>
<div className='flex items-center gap-2'>
<div className='-space-x-1.5 flex'>
{(p.authors && p.authors.length > 0 ? p.authors : [p.author])
.slice(0, 3)
.map((author, idx) => (
<Avatar key={idx} className='size-4 border border-white'>
<AvatarImage src={author?.avatarUrl} alt={author?.name} />
<AvatarFallback className='border border-white bg-gray-100 text-[10px] text-gray-600'>
{author?.name.slice(0, 2)}
</AvatarFallback>
</Avatar>
))}
</div>
<span className='text-gray-600 text-xs'>
{(p.authors && p.authors.length > 0 ? p.authors : [p.author])
.slice(0, 2)
.map((a) => a?.name)
.join(', ')}
{(p.authors && p.authors.length > 0 ? p.authors : [p.author]).length > 2 && (
<>
{' '}
and{' '}
{(p.authors && p.authors.length > 0 ? p.authors : [p.author]).length - 2}{' '}
other
{(p.authors && p.authors.length > 0 ? p.authors : [p.author]).length - 2 >
1
? 's'
: ''}
</>
)}
</span>
</div>
</div>
</div>
</Link>
)
})}
</div>
<PostGrid posts={posts} />
{totalPages > 1 && (
<div className='mt-10 flex items-center justify-center gap-3'>

View File

@@ -0,0 +1,90 @@
'use client'
import Image from 'next/image'
import Link from 'next/link'
import { Avatar, AvatarFallback, AvatarImage } from '@/components/ui/avatar'
interface Author {
id: string
name: string
avatarUrl?: string
url?: string
}
interface Post {
slug: string
title: string
description: string
date: string
ogImage: string
author: Author
authors?: Author[]
featured?: boolean
}
export function PostGrid({ posts }: { posts: Post[] }) {
return (
<div className='grid grid-cols-1 gap-4 md:grid-cols-2 md:gap-6 lg:grid-cols-3'>
{posts.map((p, index) => (
<Link key={p.slug} href={`/studio/${p.slug}`} className='group flex flex-col'>
<div className='flex h-full flex-col overflow-hidden rounded-xl border border-gray-200 transition-colors duration-300 hover:border-gray-300'>
{/* Image container with fixed aspect ratio to prevent layout shift */}
<div className='relative aspect-video w-full overflow-hidden'>
<Image
src={p.ogImage}
alt={p.title}
sizes='(max-width: 768px) 100vw, (max-width: 1024px) 50vw, 33vw'
unoptimized
priority={index < 6}
loading={index < 6 ? undefined : 'lazy'}
fill
style={{ objectFit: 'cover' }}
/>
</div>
<div className='flex flex-1 flex-col p-4'>
<div className='mb-2 text-gray-600 text-xs'>
{new Date(p.date).toLocaleDateString('en-US', {
month: 'short',
day: 'numeric',
year: 'numeric',
})}
</div>
<h3 className='shine-text mb-1 font-medium text-lg leading-tight'>{p.title}</h3>
<p className='mb-3 line-clamp-3 flex-1 text-gray-700 text-sm'>{p.description}</p>
<div className='flex items-center gap-2'>
<div className='-space-x-1.5 flex'>
{(p.authors && p.authors.length > 0 ? p.authors : [p.author])
.slice(0, 3)
.map((author, idx) => (
<Avatar key={idx} className='size-4 border border-white'>
<AvatarImage src={author?.avatarUrl} alt={author?.name} />
<AvatarFallback className='border border-white bg-gray-100 text-[10px] text-gray-600'>
{author?.name.slice(0, 2)}
</AvatarFallback>
</Avatar>
))}
</div>
<span className='text-gray-600 text-xs'>
{(p.authors && p.authors.length > 0 ? p.authors : [p.author])
.slice(0, 2)
.map((a) => a?.name)
.join(', ')}
{(p.authors && p.authors.length > 0 ? p.authors : [p.author]).length > 2 && (
<>
{' '}
and {(p.authors && p.authors.length > 0 ? p.authors : [p.author]).length - 2}{' '}
other
{(p.authors && p.authors.length > 0 ? p.authors : [p.author]).length - 2 > 1
? 's'
: ''}
</>
)}
</span>
</div>
</div>
</div>
</Link>
))}
</div>
)
}

View File

@@ -12,6 +12,7 @@ export function ThemeProvider({ children, ...props }: ThemeProviderProps) {
pathname === '/' ||
pathname.startsWith('/login') ||
pathname.startsWith('/signup') ||
pathname.startsWith('/reset-password') ||
pathname.startsWith('/sso') ||
pathname.startsWith('/terms') ||
pathname.startsWith('/privacy') ||

View File

@@ -759,3 +759,24 @@ input[type="search"]::-ms-clear {
--surface-elevated: #202020;
}
}
/**
* Remove backticks from inline code in prose (Tailwind Typography default)
*/
.prose code::before,
.prose code::after {
content: none !important;
}
/**
* Remove underlines from heading anchor links in prose
*/
.prose h1 a,
.prose h2 a,
.prose h3 a,
.prose h4 a,
.prose h5 a,
.prose h6 a {
text-decoration: none !important;
color: inherit !important;
}

View File

@@ -32,7 +32,17 @@ export async function GET(request: NextRequest) {
.from(account)
.where(and(...whereConditions))
return NextResponse.json({ accounts })
// Use the user's email as the display name (consistent with credential selector)
const userEmail = session.user.email
const accountsWithDisplayName = accounts.map((acc) => ({
id: acc.id,
accountId: acc.accountId,
providerId: acc.providerId,
displayName: userEmail || acc.providerId,
}))
return NextResponse.json({ accounts: accountsWithDisplayName })
} catch (error) {
logger.error('Failed to fetch accounts', { error })
return NextResponse.json({ error: 'Internal server error' }, { status: 500 })

View File

@@ -6,6 +6,10 @@
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
import { createMockRequest, setupAuthApiMocks } from '@/app/api/__test-utils__/utils'
vi.mock('@/lib/core/utils/urls', () => ({
getBaseUrl: vi.fn(() => 'https://app.example.com'),
}))
describe('Forget Password API Route', () => {
beforeEach(() => {
vi.resetModules()
@@ -15,7 +19,7 @@ describe('Forget Password API Route', () => {
vi.clearAllMocks()
})
it('should send password reset email successfully', async () => {
it('should send password reset email successfully with same-origin redirectTo', async () => {
setupAuthApiMocks({
operations: {
forgetPassword: { success: true },
@@ -24,7 +28,7 @@ describe('Forget Password API Route', () => {
const req = createMockRequest('POST', {
email: 'test@example.com',
redirectTo: 'https://example.com/reset',
redirectTo: 'https://app.example.com/reset',
})
const { POST } = await import('@/app/api/auth/forget-password/route')
@@ -39,12 +43,36 @@ describe('Forget Password API Route', () => {
expect(auth.auth.api.forgetPassword).toHaveBeenCalledWith({
body: {
email: 'test@example.com',
redirectTo: 'https://example.com/reset',
redirectTo: 'https://app.example.com/reset',
},
method: 'POST',
})
})
it('should reject external redirectTo URL', async () => {
setupAuthApiMocks({
operations: {
forgetPassword: { success: true },
},
})
const req = createMockRequest('POST', {
email: 'test@example.com',
redirectTo: 'https://evil.com/phishing',
})
const { POST } = await import('@/app/api/auth/forget-password/route')
const response = await POST(req)
const data = await response.json()
expect(response.status).toBe(400)
expect(data.message).toBe('Redirect URL must be a valid same-origin URL')
const auth = await import('@/lib/auth')
expect(auth.auth.api.forgetPassword).not.toHaveBeenCalled()
})
it('should send password reset email without redirectTo', async () => {
setupAuthApiMocks({
operations: {

View File

@@ -1,6 +1,7 @@
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { auth } from '@/lib/auth'
import { isSameOrigin } from '@/lib/core/utils/validation'
import { createLogger } from '@/lib/logs/console/logger'
export const dynamic = 'force-dynamic'
@@ -13,10 +14,15 @@ const forgetPasswordSchema = z.object({
.email('Please provide a valid email address'),
redirectTo: z
.string()
.url('Redirect URL must be a valid URL')
.optional()
.or(z.literal(''))
.transform((val) => (val === '' ? undefined : val)),
.transform((val) => (val === '' || val === undefined ? undefined : val))
.refine(
(val) => val === undefined || (z.string().url().safeParse(val).success && isSameOrigin(val)),
{
message: 'Redirect URL must be a valid same-origin URL',
}
),
})
export async function POST(request: NextRequest) {

View File

@@ -38,7 +38,6 @@ vi.mock('@/lib/logs/console/logger', () => ({
}))
import { db } from '@sim/db'
import { createLogger } from '@/lib/logs/console/logger'
import { refreshOAuthToken } from '@/lib/oauth/oauth'
import {
getCredential,
@@ -49,7 +48,6 @@ import {
const mockDb = db as any
const mockRefreshOAuthToken = refreshOAuthToken as any
const mockLogger = (createLogger as any)()
describe('OAuth Utils', () => {
beforeEach(() => {
@@ -87,7 +85,6 @@ describe('OAuth Utils', () => {
const userId = await getUserId('request-id')
expect(userId).toBeUndefined()
expect(mockLogger.warn).toHaveBeenCalled()
})
it('should return undefined if workflow is not found', async () => {
@@ -96,7 +93,6 @@ describe('OAuth Utils', () => {
const userId = await getUserId('request-id', 'nonexistent-workflow-id')
expect(userId).toBeUndefined()
expect(mockLogger.warn).toHaveBeenCalled()
})
})
@@ -121,7 +117,6 @@ describe('OAuth Utils', () => {
const credential = await getCredential('request-id', 'nonexistent-id', 'test-user-id')
expect(credential).toBeUndefined()
expect(mockLogger.warn).toHaveBeenCalled()
})
})
@@ -139,7 +134,6 @@ describe('OAuth Utils', () => {
expect(mockRefreshOAuthToken).not.toHaveBeenCalled()
expect(result).toEqual({ accessToken: 'valid-token', refreshed: false })
expect(mockLogger.info).toHaveBeenCalledWith(expect.stringContaining('Access token is valid'))
})
it('should refresh token when expired', async () => {
@@ -163,9 +157,6 @@ describe('OAuth Utils', () => {
expect(mockDb.update).toHaveBeenCalled()
expect(mockDb.set).toHaveBeenCalled()
expect(result).toEqual({ accessToken: 'new-token', refreshed: true })
expect(mockLogger.info).toHaveBeenCalledWith(
expect.stringContaining('Successfully refreshed')
)
})
it('should handle refresh token error', async () => {
@@ -182,8 +173,6 @@ describe('OAuth Utils', () => {
await expect(
refreshTokenIfNeeded('request-id', mockCredential, 'credential-id')
).rejects.toThrow('Failed to refresh token')
expect(mockLogger.error).toHaveBeenCalled()
})
it('should not attempt refresh if no refresh token', async () => {
@@ -251,7 +240,6 @@ describe('OAuth Utils', () => {
const token = await refreshAccessTokenIfNeeded('nonexistent-id', 'test-user-id', 'request-id')
expect(token).toBeNull()
expect(mockLogger.warn).toHaveBeenCalled()
})
it('should return null if refresh fails', async () => {
@@ -270,7 +258,6 @@ describe('OAuth Utils', () => {
const token = await refreshAccessTokenIfNeeded('credential-id', 'test-user-id', 'request-id')
expect(token).toBeNull()
expect(mockLogger.error).toHaveBeenCalled()
})
})
})

View File

@@ -18,6 +18,7 @@ interface AccountInsertData {
updatedAt: Date
refreshToken?: string
idToken?: string
accessTokenExpiresAt?: Date
}
/**
@@ -103,6 +104,7 @@ export async function getOAuthToken(userId: string, providerId: string): Promise
accessToken: account.accessToken,
refreshToken: account.refreshToken,
accessTokenExpiresAt: account.accessTokenExpiresAt,
idToken: account.idToken,
})
.from(account)
.where(and(eq(account.userId, userId), eq(account.providerId, providerId)))

View File

@@ -303,6 +303,14 @@ export async function POST(req: NextRequest) {
apiVersion: 'preview',
endpoint: env.AZURE_OPENAI_ENDPOINT,
}
} else if (providerEnv === 'vertex') {
providerConfig = {
provider: 'vertex',
model: modelToUse,
apiKey: env.COPILOT_API_KEY,
vertexProject: env.VERTEX_PROJECT,
vertexLocation: env.VERTEX_LOCATION,
}
} else {
providerConfig = {
provider: providerEnv,

View File

@@ -66,6 +66,14 @@ export async function POST(req: NextRequest) {
apiVersion: env.AZURE_OPENAI_API_VERSION,
endpoint: env.AZURE_OPENAI_ENDPOINT,
}
} else if (providerEnv === 'vertex') {
providerConfig = {
provider: 'vertex',
model: modelToUse,
apiKey: env.COPILOT_API_KEY,
vertexProject: env.VERTEX_PROJECT,
vertexLocation: env.VERTEX_LOCATION,
}
} else {
providerConfig = {
provider: providerEnv,

View File

@@ -6,7 +6,22 @@ import {
workflowDeploymentVersion,
workflowExecutionLogs,
} from '@sim/db/schema'
import { and, desc, eq, gte, inArray, isNotNull, isNull, lte, or, type SQL, sql } from 'drizzle-orm'
import {
and,
desc,
eq,
gt,
gte,
inArray,
isNotNull,
isNull,
lt,
lte,
ne,
or,
type SQL,
sql,
} from 'drizzle-orm'
import { type NextRequest, NextResponse } from 'next/server'
import { z } from 'zod'
import { getSession } from '@/lib/auth'
@@ -22,14 +37,19 @@ const QueryParamsSchema = z.object({
limit: z.coerce.number().optional().default(100),
offset: z.coerce.number().optional().default(0),
level: z.string().optional(),
workflowIds: z.string().optional(), // Comma-separated list of workflow IDs
folderIds: z.string().optional(), // Comma-separated list of folder IDs
triggers: z.string().optional(), // Comma-separated list of trigger types
workflowIds: z.string().optional(),
folderIds: z.string().optional(),
triggers: z.string().optional(),
startDate: z.string().optional(),
endDate: z.string().optional(),
search: z.string().optional(),
workflowName: z.string().optional(),
folderName: z.string().optional(),
executionId: z.string().optional(),
costOperator: z.enum(['=', '>', '<', '>=', '<=', '!=']).optional(),
costValue: z.coerce.number().optional(),
durationOperator: z.enum(['=', '>', '<', '>=', '<=', '!=']).optional(),
durationValue: z.coerce.number().optional(),
workspaceId: z.string(),
})
@@ -49,7 +69,6 @@ export async function GET(request: NextRequest) {
const { searchParams } = new URL(request.url)
const params = QueryParamsSchema.parse(Object.fromEntries(searchParams.entries()))
// Conditionally select columns based on detail level to optimize performance
const selectColumns =
params.details === 'full'
? {
@@ -63,9 +82,9 @@ export async function GET(request: NextRequest) {
startedAt: workflowExecutionLogs.startedAt,
endedAt: workflowExecutionLogs.endedAt,
totalDurationMs: workflowExecutionLogs.totalDurationMs,
executionData: workflowExecutionLogs.executionData, // Large field - only in full mode
executionData: workflowExecutionLogs.executionData,
cost: workflowExecutionLogs.cost,
files: workflowExecutionLogs.files, // Large field - only in full mode
files: workflowExecutionLogs.files,
createdAt: workflowExecutionLogs.createdAt,
workflowName: workflow.name,
workflowDescription: workflow.description,
@@ -82,7 +101,6 @@ export async function GET(request: NextRequest) {
deploymentVersionName: workflowDeploymentVersion.name,
}
: {
// Basic mode - exclude large fields for better performance
id: workflowExecutionLogs.id,
workflowId: workflowExecutionLogs.workflowId,
executionId: workflowExecutionLogs.executionId,
@@ -93,9 +111,9 @@ export async function GET(request: NextRequest) {
startedAt: workflowExecutionLogs.startedAt,
endedAt: workflowExecutionLogs.endedAt,
totalDurationMs: workflowExecutionLogs.totalDurationMs,
executionData: sql<null>`NULL`, // Exclude large execution data in basic mode
executionData: sql<null>`NULL`,
cost: workflowExecutionLogs.cost,
files: sql<null>`NULL`, // Exclude files in basic mode
files: sql<null>`NULL`,
createdAt: workflowExecutionLogs.createdAt,
workflowName: workflow.name,
workflowDescription: workflow.description,
@@ -109,7 +127,7 @@ export async function GET(request: NextRequest) {
pausedTotalPauseCount: pausedExecutions.totalPauseCount,
pausedResumedCount: pausedExecutions.resumedCount,
deploymentVersion: workflowDeploymentVersion.version,
deploymentVersionName: sql<null>`NULL`, // Only needed in full mode for details panel
deploymentVersionName: sql<null>`NULL`,
}
const baseQuery = db
@@ -139,34 +157,28 @@ export async function GET(request: NextRequest) {
)
)
// Build additional conditions for the query
let conditions: SQL | undefined
// Filter by level with support for derived statuses (running, pending)
if (params.level && params.level !== 'all') {
const levels = params.level.split(',').filter(Boolean)
const levelConditions: SQL[] = []
for (const level of levels) {
if (level === 'error') {
// Direct database field
levelConditions.push(eq(workflowExecutionLogs.level, 'error'))
} else if (level === 'info') {
// Completed info logs only (not running, not pending)
const condition = and(
eq(workflowExecutionLogs.level, 'info'),
isNotNull(workflowExecutionLogs.endedAt)
)
if (condition) levelConditions.push(condition)
} else if (level === 'running') {
// Running logs: info level with no endedAt
const condition = and(
eq(workflowExecutionLogs.level, 'info'),
isNull(workflowExecutionLogs.endedAt)
)
if (condition) levelConditions.push(condition)
} else if (level === 'pending') {
// Pending logs: info level with pause status indicators
const condition = and(
eq(workflowExecutionLogs.level, 'info'),
or(
@@ -189,7 +201,6 @@ export async function GET(request: NextRequest) {
}
}
// Filter by specific workflow IDs
if (params.workflowIds) {
const workflowIds = params.workflowIds.split(',').filter(Boolean)
if (workflowIds.length > 0) {
@@ -197,7 +208,6 @@ export async function GET(request: NextRequest) {
}
}
// Filter by folder IDs
if (params.folderIds) {
const folderIds = params.folderIds.split(',').filter(Boolean)
if (folderIds.length > 0) {
@@ -205,7 +215,6 @@ export async function GET(request: NextRequest) {
}
}
// Filter by triggers
if (params.triggers) {
const triggers = params.triggers.split(',').filter(Boolean)
if (triggers.length > 0 && !triggers.includes('all')) {
@@ -213,7 +222,6 @@ export async function GET(request: NextRequest) {
}
}
// Filter by date range
if (params.startDate) {
conditions = and(
conditions,
@@ -224,33 +232,79 @@ export async function GET(request: NextRequest) {
conditions = and(conditions, lte(workflowExecutionLogs.startedAt, new Date(params.endDate)))
}
// Filter by search query
if (params.search) {
const searchTerm = `%${params.search}%`
// With message removed, restrict search to executionId only
conditions = and(conditions, sql`${workflowExecutionLogs.executionId} ILIKE ${searchTerm}`)
}
// Filter by workflow name (from advanced search input)
if (params.workflowName) {
const nameTerm = `%${params.workflowName}%`
conditions = and(conditions, sql`${workflow.name} ILIKE ${nameTerm}`)
}
// Filter by folder name (best-effort text match when present on workflows)
if (params.folderName) {
const folderTerm = `%${params.folderName}%`
conditions = and(conditions, sql`${workflow.name} ILIKE ${folderTerm}`)
}
// Execute the query using the optimized join
if (params.executionId) {
conditions = and(conditions, eq(workflowExecutionLogs.executionId, params.executionId))
}
if (params.costOperator && params.costValue !== undefined) {
const costField = sql`(${workflowExecutionLogs.cost}->>'total')::numeric`
switch (params.costOperator) {
case '=':
conditions = and(conditions, sql`${costField} = ${params.costValue}`)
break
case '>':
conditions = and(conditions, sql`${costField} > ${params.costValue}`)
break
case '<':
conditions = and(conditions, sql`${costField} < ${params.costValue}`)
break
case '>=':
conditions = and(conditions, sql`${costField} >= ${params.costValue}`)
break
case '<=':
conditions = and(conditions, sql`${costField} <= ${params.costValue}`)
break
case '!=':
conditions = and(conditions, sql`${costField} != ${params.costValue}`)
break
}
}
if (params.durationOperator && params.durationValue !== undefined) {
const durationField = workflowExecutionLogs.totalDurationMs
switch (params.durationOperator) {
case '=':
conditions = and(conditions, eq(durationField, params.durationValue))
break
case '>':
conditions = and(conditions, gt(durationField, params.durationValue))
break
case '<':
conditions = and(conditions, lt(durationField, params.durationValue))
break
case '>=':
conditions = and(conditions, gte(durationField, params.durationValue))
break
case '<=':
conditions = and(conditions, lte(durationField, params.durationValue))
break
case '!=':
conditions = and(conditions, ne(durationField, params.durationValue))
break
}
}
const logs = await baseQuery
.where(conditions)
.orderBy(desc(workflowExecutionLogs.startedAt))
.limit(params.limit)
.offset(params.offset)
// Get total count for pagination using the same join structure
const countQuery = db
.select({ count: sql<number>`count(*)` })
.from(workflowExecutionLogs)
@@ -279,13 +333,10 @@ export async function GET(request: NextRequest) {
const count = countResult[0]?.count || 0
// Block executions are now extracted from trace spans instead of separate table
const blockExecutionsByExecution: Record<string, any[]> = {}
// Create clean trace spans from block executions
const createTraceSpans = (blockExecutions: any[]) => {
return blockExecutions.map((block, index) => {
// For error blocks, include error information in the output
let output = block.outputData
if (block.status === 'error' && block.errorMessage) {
output = {
@@ -314,7 +365,6 @@ export async function GET(request: NextRequest) {
})
}
// Extract cost information from block executions
const extractCostSummary = (blockExecutions: any[]) => {
let totalCost = 0
let totalInputCost = 0
@@ -333,7 +383,6 @@ export async function GET(request: NextRequest) {
totalPromptTokens += block.cost.tokens?.prompt || 0
totalCompletionTokens += block.cost.tokens?.completion || 0
// Track per-model costs
if (block.cost.model) {
if (!models.has(block.cost.model)) {
models.set(block.cost.model, {
@@ -363,34 +412,29 @@ export async function GET(request: NextRequest) {
prompt: totalPromptTokens,
completion: totalCompletionTokens,
},
models: Object.fromEntries(models), // Convert Map to object for JSON serialization
models: Object.fromEntries(models),
}
}
// Transform to clean log format with workflow data included
const enhancedLogs = logs.map((log) => {
const blockExecutions = blockExecutionsByExecution[log.executionId] || []
// Only process trace spans and detailed cost in full mode
let traceSpans = []
let finalOutput: any
let costSummary = (log.cost as any) || { total: 0 }
if (params.details === 'full' && log.executionData) {
// Use stored trace spans if available, otherwise create from block executions
const storedTraceSpans = (log.executionData as any)?.traceSpans
traceSpans =
storedTraceSpans && Array.isArray(storedTraceSpans) && storedTraceSpans.length > 0
? storedTraceSpans
: createTraceSpans(blockExecutions)
// Prefer stored cost JSON; otherwise synthesize from blocks
costSummary =
log.cost && Object.keys(log.cost as any).length > 0
? (log.cost as any)
: extractCostSummary(blockExecutions)
// Include finalOutput if present on executionData
try {
const fo = (log.executionData as any)?.finalOutput
if (fo !== undefined) finalOutput = fo

View File

@@ -5,6 +5,7 @@ import type { NextRequest } from 'next/server'
import { createLogger } from '@/lib/logs/console/logger'
import { withMcpAuth } from '@/lib/mcp/middleware'
import { mcpService } from '@/lib/mcp/service'
import type { McpServerStatusConfig } from '@/lib/mcp/types'
import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils'
const logger = createLogger('McpServerRefreshAPI')
@@ -50,6 +51,12 @@ export const POST = withMcpAuth<{ id: string }>('read')(
let toolCount = 0
let lastError: string | null = null
const currentStatusConfig: McpServerStatusConfig =
(server.statusConfig as McpServerStatusConfig | null) ?? {
consecutiveFailures: 0,
lastSuccessfulDiscovery: null,
}
try {
const tools = await mcpService.discoverServerTools(userId, serverId, workspaceId)
connectionStatus = 'connected'
@@ -63,20 +70,40 @@ export const POST = withMcpAuth<{ id: string }>('read')(
logger.warn(`[${requestId}] Failed to connect to server ${serverId}:`, error)
}
const now = new Date()
const newStatusConfig =
connectionStatus === 'connected'
? { consecutiveFailures: 0, lastSuccessfulDiscovery: now.toISOString() }
: {
consecutiveFailures: currentStatusConfig.consecutiveFailures + 1,
lastSuccessfulDiscovery: currentStatusConfig.lastSuccessfulDiscovery,
}
const [refreshedServer] = await db
.update(mcpServers)
.set({
lastToolsRefresh: new Date(),
lastToolsRefresh: now,
connectionStatus,
lastError,
lastConnected: connectionStatus === 'connected' ? new Date() : server.lastConnected,
lastConnected: connectionStatus === 'connected' ? now : server.lastConnected,
toolCount,
updatedAt: new Date(),
statusConfig: newStatusConfig,
updatedAt: now,
})
.where(eq(mcpServers.id, serverId))
.returning()
logger.info(`[${requestId}] Successfully refreshed MCP server: ${serverId}`)
if (connectionStatus === 'connected') {
logger.info(
`[${requestId}] Successfully refreshed MCP server: ${serverId} (${toolCount} tools)`
)
await mcpService.clearCache(workspaceId)
} else {
logger.warn(
`[${requestId}] Refresh completed for MCP server ${serverId} but connection failed: ${lastError}`
)
}
return createMcpSuccessResponse({
status: connectionStatus,
toolCount,

View File

@@ -48,6 +48,19 @@ export const PATCH = withMcpAuth<{ id: string }>('write')(
// Remove workspaceId from body to prevent it from being updated
const { workspaceId: _, ...updateData } = body
// Get the current server to check if URL is changing
const [currentServer] = await db
.select({ url: mcpServers.url })
.from(mcpServers)
.where(
and(
eq(mcpServers.id, serverId),
eq(mcpServers.workspaceId, workspaceId),
isNull(mcpServers.deletedAt)
)
)
.limit(1)
const [updatedServer] = await db
.update(mcpServers)
.set({
@@ -71,8 +84,12 @@ export const PATCH = withMcpAuth<{ id: string }>('write')(
)
}
// Clear MCP service cache after update
mcpService.clearCache(workspaceId)
// Only clear cache if URL changed (requires re-discovery)
const urlChanged = body.url && currentServer?.url !== body.url
if (urlChanged) {
await mcpService.clearCache(workspaceId)
logger.info(`[${requestId}] Cleared cache due to URL change`)
}
logger.info(`[${requestId}] Successfully updated MCP server: ${serverId}`)
return createMcpSuccessResponse({ server: updatedServer })

View File

@@ -117,12 +117,14 @@ export const POST = withMcpAuth('write')(
timeout: body.timeout || 30000,
retries: body.retries || 3,
enabled: body.enabled !== false,
connectionStatus: 'connected',
lastConnected: new Date(),
updatedAt: new Date(),
deletedAt: null,
})
.where(eq(mcpServers.id, serverId))
mcpService.clearCache(workspaceId)
await mcpService.clearCache(workspaceId)
logger.info(
`[${requestId}] Successfully updated MCP server: ${body.name} (ID: ${serverId})`
@@ -145,12 +147,14 @@ export const POST = withMcpAuth('write')(
timeout: body.timeout || 30000,
retries: body.retries || 3,
enabled: body.enabled !== false,
connectionStatus: 'connected',
lastConnected: new Date(),
createdAt: new Date(),
updatedAt: new Date(),
})
.returning()
mcpService.clearCache(workspaceId)
await mcpService.clearCache(workspaceId)
logger.info(
`[${requestId}] Successfully registered MCP server: ${body.name} (ID: ${serverId})`
@@ -212,7 +216,7 @@ export const DELETE = withMcpAuth('admin')(
)
}
mcpService.clearCache(workspaceId)
await mcpService.clearCache(workspaceId)
logger.info(`[${requestId}] Successfully deleted MCP server: ${serverId}`)
return createMcpSuccessResponse({ message: `Server ${serverId} deleted successfully` })

View File

@@ -0,0 +1,103 @@
import { db } from '@sim/db'
import { workflow, workflowBlocks } from '@sim/db/schema'
import { eq } from 'drizzle-orm'
import type { NextRequest } from 'next/server'
import { createLogger } from '@/lib/logs/console/logger'
import { withMcpAuth } from '@/lib/mcp/middleware'
import { createMcpErrorResponse, createMcpSuccessResponse } from '@/lib/mcp/utils'
const logger = createLogger('McpStoredToolsAPI')
export const dynamic = 'force-dynamic'
interface StoredMcpTool {
workflowId: string
workflowName: string
serverId: string
serverUrl?: string
toolName: string
schema?: Record<string, unknown>
}
/**
* GET - Get all stored MCP tools from workflows in the workspace
*
* Scans all workflows in the workspace and extracts MCP tools that have been
* added to agent blocks. Returns the stored state of each tool for comparison
* against current server state.
*/
export const GET = withMcpAuth('read')(
async (request: NextRequest, { userId, workspaceId, requestId }) => {
try {
logger.info(`[${requestId}] Fetching stored MCP tools for workspace ${workspaceId}`)
// Get all workflows in workspace
const workflows = await db
.select({
id: workflow.id,
name: workflow.name,
})
.from(workflow)
.where(eq(workflow.workspaceId, workspaceId))
const workflowMap = new Map(workflows.map((w) => [w.id, w.name]))
const workflowIds = workflows.map((w) => w.id)
if (workflowIds.length === 0) {
return createMcpSuccessResponse({ tools: [] })
}
// Get all agent blocks from these workflows
const agentBlocks = await db
.select({
workflowId: workflowBlocks.workflowId,
subBlocks: workflowBlocks.subBlocks,
})
.from(workflowBlocks)
.where(eq(workflowBlocks.type, 'agent'))
const storedTools: StoredMcpTool[] = []
for (const block of agentBlocks) {
if (!workflowMap.has(block.workflowId)) continue
const subBlocks = block.subBlocks as Record<string, unknown> | null
if (!subBlocks) continue
const toolsSubBlock = subBlocks.tools as Record<string, unknown> | undefined
const toolsValue = toolsSubBlock?.value
if (!toolsValue || !Array.isArray(toolsValue)) continue
for (const tool of toolsValue) {
if (tool.type !== 'mcp') continue
const params = tool.params as Record<string, unknown> | undefined
if (!params?.serverId || !params?.toolName) continue
storedTools.push({
workflowId: block.workflowId,
workflowName: workflowMap.get(block.workflowId) || 'Untitled',
serverId: params.serverId as string,
serverUrl: params.serverUrl as string | undefined,
toolName: params.toolName as string,
schema: tool.schema as Record<string, unknown> | undefined,
})
}
}
logger.info(
`[${requestId}] Found ${storedTools.length} stored MCP tools across ${workflows.length} workflows`
)
return createMcpSuccessResponse({ tools: storedTools })
} catch (error) {
logger.error(`[${requestId}] Error fetching stored MCP tools:`, error)
return createMcpErrorResponse(
error instanceof Error ? error : new Error('Failed to fetch stored MCP tools'),
'Failed to fetch stored MCP tools',
500
)
}
}
)

View File

@@ -35,6 +35,8 @@ export async function POST(request: NextRequest) {
apiKey,
azureEndpoint,
azureApiVersion,
vertexProject,
vertexLocation,
responseFormat,
workflowId,
workspaceId,
@@ -58,6 +60,8 @@ export async function POST(request: NextRequest) {
hasApiKey: !!apiKey,
hasAzureEndpoint: !!azureEndpoint,
hasAzureApiVersion: !!azureApiVersion,
hasVertexProject: !!vertexProject,
hasVertexLocation: !!vertexLocation,
hasResponseFormat: !!responseFormat,
workflowId,
stream: !!stream,
@@ -104,6 +108,8 @@ export async function POST(request: NextRequest) {
apiKey: finalApiKey,
azureEndpoint,
azureApiVersion,
vertexProject,
vertexLocation,
responseFormat,
workflowId,
workspaceId,

View File

@@ -11,6 +11,7 @@ import { processInputFileFields } from '@/lib/execution/files'
import { preprocessExecution } from '@/lib/execution/preprocessing'
import { createLogger } from '@/lib/logs/console/logger'
import { LoggingSession } from '@/lib/logs/execution/logging-session'
import { ALL_TRIGGER_TYPES } from '@/lib/logs/types'
import { executeWorkflowCore } from '@/lib/workflows/executor/execution-core'
import { type ExecutionEvent, encodeSSEEvent } from '@/lib/workflows/executor/execution-events'
import { PauseResumeManager } from '@/lib/workflows/executor/human-in-the-loop-manager'
@@ -30,7 +31,7 @@ const logger = createLogger('WorkflowExecuteAPI')
const ExecuteWorkflowSchema = z.object({
selectedOutputs: z.array(z.string()).optional().default([]),
triggerType: z.enum(['api', 'webhook', 'schedule', 'manual', 'chat']).optional(),
triggerType: z.enum(ALL_TRIGGER_TYPES).optional(),
stream: z.boolean().optional(),
useDraftState: z.boolean().optional(),
input: z.any().optional(),

View File

@@ -6,13 +6,14 @@ import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { encryptSecret } from '@/lib/core/security/encryption'
import { createLogger } from '@/lib/logs/console/logger'
import { ALL_TRIGGER_TYPES } from '@/lib/logs/types'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
import { MAX_EMAIL_RECIPIENTS, MAX_WORKFLOW_IDS } from '../constants'
const logger = createLogger('WorkspaceNotificationAPI')
const levelFilterSchema = z.array(z.enum(['info', 'error']))
const triggerFilterSchema = z.array(z.enum(['api', 'webhook', 'schedule', 'manual', 'chat']))
const triggerFilterSchema = z.array(z.enum(ALL_TRIGGER_TYPES))
const alertRuleSchema = z.enum([
'consecutive_failures',

View File

@@ -7,6 +7,7 @@ import { z } from 'zod'
import { getSession } from '@/lib/auth'
import { encryptSecret } from '@/lib/core/security/encryption'
import { createLogger } from '@/lib/logs/console/logger'
import { ALL_TRIGGER_TYPES } from '@/lib/logs/types'
import { getUserEntityPermissions } from '@/lib/workspaces/permissions/utils'
import { MAX_EMAIL_RECIPIENTS, MAX_NOTIFICATIONS_PER_TYPE, MAX_WORKFLOW_IDS } from './constants'
@@ -14,7 +15,7 @@ const logger = createLogger('WorkspaceNotificationsAPI')
const notificationTypeSchema = z.enum(['webhook', 'email', 'slack'])
const levelFilterSchema = z.array(z.enum(['info', 'error']))
const triggerFilterSchema = z.array(z.enum(['api', 'webhook', 'schedule', 'manual', 'chat']))
const triggerFilterSchema = z.array(z.enum(ALL_TRIGGER_TYPES))
const alertRuleSchema = z.enum([
'consecutive_failures',
@@ -80,7 +81,7 @@ const createNotificationSchema = z
workflowIds: z.array(z.string()).max(MAX_WORKFLOW_IDS).default([]),
allWorkflows: z.boolean().default(false),
levelFilter: levelFilterSchema.default(['info', 'error']),
triggerFilter: triggerFilterSchema.default(['api', 'webhook', 'schedule', 'manual', 'chat']),
triggerFilter: triggerFilterSchema.default([...ALL_TRIGGER_TYPES]),
includeFinalOutput: z.boolean().default(false),
includeTraceSpans: z.boolean().default(false),
includeRateLimits: z.boolean().default(false),

View File

@@ -104,6 +104,8 @@ export function SlackChannelSelector({
disabled={disabled || channels.length === 0}
isLoading={isLoading}
error={fetchError}
searchable
searchPlaceholder='Search channels...'
/>
{selectedChannel && !fetchError && (
<p className='text-[12px] text-[var(--text-muted)]'>

View File

@@ -22,6 +22,7 @@ import { SlackIcon } from '@/components/icons'
import { Skeleton } from '@/components/ui'
import { cn } from '@/lib/core/utils/cn'
import { createLogger } from '@/lib/logs/console/logger'
import { ALL_TRIGGER_TYPES, type TriggerType } from '@/lib/logs/types'
import { quickValidateEmail } from '@/lib/messaging/email/validation'
import {
type NotificationSubscription,
@@ -43,7 +44,6 @@ const PRIMARY_BUTTON_STYLES =
type NotificationType = 'webhook' | 'email' | 'slack'
type LogLevel = 'info' | 'error'
type TriggerType = 'api' | 'webhook' | 'schedule' | 'manual' | 'chat'
type AlertRule =
| 'none'
| 'consecutive_failures'
@@ -84,7 +84,6 @@ interface NotificationSettingsProps {
}
const LOG_LEVELS: LogLevel[] = ['info', 'error']
const TRIGGER_TYPES: TriggerType[] = ['api', 'webhook', 'schedule', 'manual', 'chat']
function formatAlertConfigLabel(config: {
rule: AlertRule
@@ -137,7 +136,7 @@ export function NotificationSettings({
workflowIds: [] as string[],
allWorkflows: true,
levelFilter: ['info', 'error'] as LogLevel[],
triggerFilter: ['api', 'webhook', 'schedule', 'manual', 'chat'] as TriggerType[],
triggerFilter: [...ALL_TRIGGER_TYPES] as TriggerType[],
includeFinalOutput: false,
includeTraceSpans: false,
includeRateLimits: false,
@@ -207,7 +206,7 @@ export function NotificationSettings({
workflowIds: [],
allWorkflows: true,
levelFilter: ['info', 'error'],
triggerFilter: ['api', 'webhook', 'schedule', 'manual', 'chat'],
triggerFilter: [...ALL_TRIGGER_TYPES],
includeFinalOutput: false,
includeTraceSpans: false,
includeRateLimits: false,
@@ -768,7 +767,7 @@ export function NotificationSettings({
<Combobox
options={slackAccounts.map((acc) => ({
value: acc.id,
label: acc.accountId,
label: acc.displayName || 'Slack Workspace',
}))}
value={formData.slackAccountId}
onChange={(value) => {
@@ -859,7 +858,7 @@ export function NotificationSettings({
<div className='flex flex-col gap-[8px]'>
<Label className='text-[var(--text-secondary)]'>Trigger Type Filters</Label>
<Combobox
options={TRIGGER_TYPES.map((trigger) => ({
options={ALL_TRIGGER_TYPES.map((trigger) => ({
label: trigger.charAt(0).toUpperCase() + trigger.slice(1),
value: trigger,
}))}

View File

@@ -2,11 +2,9 @@
import { useEffect, useMemo, useRef, useState } from 'react'
import { Search, X } from 'lucide-react'
import { useParams } from 'next/navigation'
import { Button, Popover, PopoverAnchor, PopoverContent } from '@/components/emcn'
import { Badge, Popover, PopoverAnchor, PopoverContent } from '@/components/emcn'
import { cn } from '@/lib/core/utils/cn'
import { createLogger } from '@/lib/logs/console/logger'
import { getIntegrationMetadata } from '@/lib/logs/get-trigger-options'
import { getTriggerOptions } from '@/lib/logs/get-trigger-options'
import { type ParsedFilter, parseQuery } from '@/lib/logs/query-parser'
import {
type FolderData,
@@ -18,7 +16,15 @@ import { useSearchState } from '@/app/workspace/[workspaceId]/logs/hooks/use-sea
import { useFolderStore } from '@/stores/folders/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
const logger = createLogger('AutocompleteSearch')
function truncateFilterValue(field: string, value: string): string {
if ((field === 'executionId' || field === 'workflowId') && value.length > 12) {
return `...${value.slice(-6)}`
}
if (value.length > 20) {
return `${value.slice(0, 17)}...`
}
return value
}
interface AutocompleteSearchProps {
value: string
@@ -35,11 +41,8 @@ export function AutocompleteSearch({
className,
onOpenChange,
}: AutocompleteSearchProps) {
const params = useParams()
const workspaceId = params.workspaceId as string
const workflows = useWorkflowRegistry((state) => state.workflows)
const folders = useFolderStore((state) => state.folders)
const [triggersData, setTriggersData] = useState<TriggerData[]>([])
const workflowsData = useMemo<WorkflowData[]>(() => {
return Object.values(workflows).map((w) => ({
@@ -56,32 +59,13 @@ export function AutocompleteSearch({
}))
}, [folders])
useEffect(() => {
if (!workspaceId) return
const fetchTriggers = async () => {
try {
const response = await fetch(`/api/logs/triggers?workspaceId=${workspaceId}`)
if (!response.ok) return
const data = await response.json()
const triggers: TriggerData[] = data.triggers.map((trigger: string) => {
const metadata = getIntegrationMetadata(trigger)
return {
value: trigger,
label: metadata.label,
color: metadata.color,
}
})
setTriggersData(triggers)
} catch (error) {
logger.error('Failed to fetch triggers:', error)
}
}
fetchTriggers()
}, [workspaceId])
const triggersData = useMemo<TriggerData[]>(() => {
return getTriggerOptions().map((t) => ({
value: t.value,
label: t.label,
color: t.color,
}))
}, [])
const suggestionEngine = useMemo(() => {
return new SearchSuggestions(workflowsData, foldersData, triggersData)
@@ -103,7 +87,6 @@ export function AutocompleteSearch({
suggestions,
sections,
highlightedIndex,
highlightedBadgeIndex,
inputRef,
dropdownRef,
handleInputChange,
@@ -122,7 +105,6 @@ export function AutocompleteSearch({
const lastExternalValue = useRef(value)
useEffect(() => {
// Only re-initialize if value changed externally (not from user typing)
if (value !== lastExternalValue.current) {
lastExternalValue.current = value
const parsed = parseQuery(value)
@@ -130,7 +112,6 @@ export function AutocompleteSearch({
}
}, [value, initializeFromQuery])
// Initial sync on mount
useEffect(() => {
if (value) {
const parsed = parseQuery(value)
@@ -189,40 +170,49 @@ export function AutocompleteSearch({
<div className='flex flex-1 items-center gap-[6px] overflow-x-auto pr-[6px] [scrollbar-width:none] [&::-webkit-scrollbar]:hidden'>
{/* Applied Filter Badges */}
{appliedFilters.map((filter, index) => (
<Button
<Badge
key={`${filter.field}-${filter.value}-${index}`}
variant='outline'
className={cn(
'h-6 flex-shrink-0 gap-1 rounded-[6px] px-2 text-[11px]',
highlightedBadgeIndex === index && 'border'
)}
onClick={(e) => {
e.preventDefault()
removeBadge(index)
role='button'
tabIndex={0}
className='h-6 shrink-0 cursor-pointer whitespace-nowrap rounded-md px-2 text-[11px]'
onClick={() => removeBadge(index)}
onKeyDown={(e) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault()
removeBadge(index)
}
}}
>
<span className='text-[var(--text-muted)]'>{filter.field}:</span>
<span className='text-[var(--text-primary)]'>
{filter.operator !== '=' && filter.operator}
{filter.originalValue}
{truncateFilterValue(filter.field, filter.originalValue)}
</span>
<X className='h-3 w-3' />
</Button>
<X className='h-3 w-3 shrink-0' />
</Badge>
))}
{/* Text Search Badge (if present) */}
{hasTextSearch && (
<Button
<Badge
variant='outline'
className='h-6 flex-shrink-0 gap-1 rounded-[6px] px-2 text-[11px]'
onClick={(e) => {
e.preventDefault()
handleFiltersChange(appliedFilters, '')
role='button'
tabIndex={0}
className='h-6 shrink-0 cursor-pointer whitespace-nowrap rounded-md px-2 text-[11px]'
onClick={() => handleFiltersChange(appliedFilters, '')}
onKeyDown={(e) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault()
handleFiltersChange(appliedFilters, '')
}
}}
>
<span className='text-[var(--text-primary)]'>"{textSearch}"</span>
<X className='h-3 w-3' />
</Button>
<span className='max-w-[150px] truncate text-[var(--text-primary)]'>
"{textSearch}"
</span>
<X className='h-3 w-3 shrink-0' />
</Badge>
)}
{/* Input - only current typing */}
@@ -261,9 +251,8 @@ export function AutocompleteSearch({
sideOffset={4}
onOpenAutoFocus={(e) => e.preventDefault()}
>
<div className='max-h-96 overflow-y-auto'>
<div className='max-h-96 overflow-y-auto px-1'>
{sections.length > 0 ? (
// Multi-section layout
<div className='py-1'>
{/* Show all results (no header) */}
{suggestions[0]?.category === 'show-all' && (
@@ -271,9 +260,9 @@ export function AutocompleteSearch({
key={suggestions[0].id}
data-index={0}
className={cn(
'w-full px-3 py-1.5 text-left transition-colors focus:outline-none',
'hover:bg-[var(--surface-9)] dark:hover:bg-[var(--surface-9)]',
highlightedIndex === 0 && 'bg-[var(--surface-9)] dark:bg-[var(--surface-9)]'
'w-full rounded-[6px] px-3 py-2 text-left transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-[var(--border-focus)]',
'hover:bg-[var(--surface-9)]',
highlightedIndex === 0 && 'bg-[var(--surface-9)]'
)}
onMouseEnter={() => setHighlightedIndex(0)}
onMouseDown={(e) => {
@@ -287,7 +276,7 @@ export function AutocompleteSearch({
{sections.map((section) => (
<div key={section.title}>
<div className='border-[var(--divider)] border-t px-3 py-1.5 font-medium text-[11px] text-[var(--text-tertiary)] uppercase tracking-wide'>
<div className='px-3 py-1.5 font-medium text-[12px] text-[var(--text-tertiary)] uppercase tracking-wide'>
{section.title}
</div>
{section.suggestions.map((suggestion) => {
@@ -301,9 +290,9 @@ export function AutocompleteSearch({
key={suggestion.id}
data-index={index}
className={cn(
'w-full px-3 py-1.5 text-left transition-colors focus:outline-none',
'hover:bg-[var(--surface-9)] dark:hover:bg-[var(--surface-9)]',
isHighlighted && 'bg-[var(--surface-9)] dark:bg-[var(--surface-9)]'
'w-full rounded-[6px] px-3 py-2 text-left transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-[var(--border-focus)]',
'hover:bg-[var(--surface-9)]',
isHighlighted && 'bg-[var(--surface-9)]'
)}
onMouseEnter={() => setHighlightedIndex(index)}
onMouseDown={(e) => {
@@ -312,19 +301,11 @@ export function AutocompleteSearch({
}}
>
<div className='flex items-center justify-between gap-3'>
<div className='flex min-w-0 flex-1 items-center gap-2'>
{suggestion.category === 'trigger' && suggestion.color && (
<div
className='h-2 w-2 flex-shrink-0 rounded-full'
style={{ backgroundColor: suggestion.color }}
/>
)}
<div className='min-w-0 flex-1 truncate text-[13px]'>
{suggestion.label}
</div>
<div className='min-w-0 flex-1 truncate text-[13px]'>
{suggestion.label}
</div>
{suggestion.value !== suggestion.label && (
<div className='flex-shrink-0 font-mono text-[11px] text-[var(--text-muted)]'>
<div className='shrink-0 font-mono text-[11px] text-[var(--text-muted)]'>
{suggestion.category === 'workflow' ||
suggestion.category === 'folder'
? `${suggestion.category}:`
@@ -342,7 +323,7 @@ export function AutocompleteSearch({
// Single section layout
<div className='py-1'>
{suggestionType === 'filters' && (
<div className='border-[var(--divider)] border-b px-3 py-1.5 font-medium text-[11px] text-[var(--text-tertiary)] uppercase tracking-wide'>
<div className='px-3 py-1.5 font-medium text-[12px] text-[var(--text-tertiary)] uppercase tracking-wide'>
SUGGESTED FILTERS
</div>
)}
@@ -352,10 +333,9 @@ export function AutocompleteSearch({
key={suggestion.id}
data-index={index}
className={cn(
'w-full px-3 py-1.5 text-left transition-colors focus:outline-none',
'hover:bg-[var(--surface-9)] dark:hover:bg-[var(--surface-9)]',
index === highlightedIndex &&
'bg-[var(--surface-9)] dark:bg-[var(--surface-9)]'
'w-full rounded-[6px] px-3 py-2 text-left transition-colors focus-visible:outline-none focus-visible:ring-1 focus-visible:ring-[var(--border-focus)]',
'hover:bg-[var(--surface-9)]',
index === highlightedIndex && 'bg-[var(--surface-9)]'
)}
onMouseEnter={() => setHighlightedIndex(index)}
onMouseDown={(e) => {
@@ -364,17 +344,9 @@ export function AutocompleteSearch({
}}
>
<div className='flex items-center justify-between gap-3'>
<div className='flex min-w-0 flex-1 items-center gap-2'>
{suggestion.category === 'trigger' && suggestion.color && (
<div
className='h-2 w-2 flex-shrink-0 rounded-full'
style={{ backgroundColor: suggestion.color }}
/>
)}
<div className='min-w-0 flex-1 text-[13px]'>{suggestion.label}</div>
</div>
<div className='min-w-0 flex-1 text-[13px]'>{suggestion.label}</div>
{suggestion.description && (
<div className='flex-shrink-0 text-[11px] text-[var(--text-muted)]'>
<div className='shrink-0 text-[11px] text-[var(--text-muted)]'>
{suggestion.value}
</div>
)}

View File

@@ -21,21 +21,15 @@ export function useSearchState({
const [currentInput, setCurrentInput] = useState('')
const [textSearch, setTextSearch] = useState('')
// Dropdown state
const [isOpen, setIsOpen] = useState(false)
const [suggestions, setSuggestions] = useState<Suggestion[]>([])
const [sections, setSections] = useState<SuggestionSection[]>([])
const [highlightedIndex, setHighlightedIndex] = useState(-1)
// Badge interaction
const [highlightedBadgeIndex, setHighlightedBadgeIndex] = useState<number | null>(null)
// Refs
const inputRef = useRef<HTMLInputElement>(null)
const dropdownRef = useRef<HTMLDivElement>(null)
const debounceRef = useRef<NodeJS.Timeout | null>(null)
// Update suggestions when input changes
const updateSuggestions = useCallback(
(input: string) => {
const suggestionGroup = getSuggestions(input)
@@ -55,13 +49,10 @@ export function useSearchState({
[getSuggestions]
)
// Handle input changes
const handleInputChange = useCallback(
(value: string) => {
setCurrentInput(value)
setHighlightedBadgeIndex(null) // Clear badge highlight on any input
// Debounce suggestion updates
if (debounceRef.current) {
clearTimeout(debounceRef.current)
}
@@ -73,11 +64,9 @@ export function useSearchState({
[updateSuggestions, debounceMs]
)
// Handle suggestion selection
const handleSuggestionSelect = useCallback(
(suggestion: Suggestion) => {
if (suggestion.category === 'show-all') {
// Treat as text search
setTextSearch(suggestion.value)
setCurrentInput('')
setIsOpen(false)
@@ -85,15 +74,12 @@ export function useSearchState({
return
}
// Check if this is a filter-key suggestion (ends with ':')
if (suggestion.category === 'filters' && suggestion.value.endsWith(':')) {
// Set input to the filter key and keep dropdown open for values
setCurrentInput(suggestion.value)
updateSuggestions(suggestion.value)
return
}
// For filter values, workflows, folders - add as a filter
const newFilter: ParsedFilter = {
field: suggestion.value.split(':')[0] as any,
operator: '=',
@@ -110,15 +96,12 @@ export function useSearchState({
setCurrentInput('')
setTextSearch('')
// Notify parent
onFiltersChange(updatedFilters, '')
// Focus back on input and reopen dropdown with empty suggestions
if (inputRef.current) {
inputRef.current.focus()
}
// Show filter keys dropdown again after selection
setTimeout(() => {
updateSuggestions('')
}, 50)
@@ -126,12 +109,10 @@ export function useSearchState({
[appliedFilters, onFiltersChange, updateSuggestions]
)
// Remove a badge
const removeBadge = useCallback(
(index: number) => {
const updatedFilters = appliedFilters.filter((_, i) => i !== index)
setAppliedFilters(updatedFilters)
setHighlightedBadgeIndex(null)
onFiltersChange(updatedFilters, textSearch)
if (inputRef.current) {
@@ -141,39 +122,22 @@ export function useSearchState({
[appliedFilters, textSearch, onFiltersChange]
)
// Handle keyboard navigation
const handleKeyDown = useCallback(
(event: React.KeyboardEvent) => {
// Backspace on empty input - badge deletion
if (event.key === 'Backspace' && currentInput === '') {
event.preventDefault()
if (highlightedBadgeIndex !== null) {
// Delete highlighted badge
removeBadge(highlightedBadgeIndex)
} else if (appliedFilters.length > 0) {
// Highlight last badge
setHighlightedBadgeIndex(appliedFilters.length - 1)
if (appliedFilters.length > 0) {
event.preventDefault()
removeBadge(appliedFilters.length - 1)
}
return
}
// Clear badge highlight on any other key when not in dropdown navigation
if (
highlightedBadgeIndex !== null &&
!['ArrowDown', 'ArrowUp', 'Enter'].includes(event.key)
) {
setHighlightedBadgeIndex(null)
}
// Enter key
if (event.key === 'Enter') {
event.preventDefault()
if (isOpen && highlightedIndex >= 0 && suggestions[highlightedIndex]) {
handleSuggestionSelect(suggestions[highlightedIndex])
} else if (currentInput.trim()) {
// Submit current input as text search
setTextSearch(currentInput.trim())
setCurrentInput('')
setIsOpen(false)
@@ -182,7 +146,6 @@ export function useSearchState({
return
}
// Dropdown navigation
if (!isOpen) return
switch (event.key) {
@@ -216,7 +179,6 @@ export function useSearchState({
},
[
currentInput,
highlightedBadgeIndex,
appliedFilters,
isOpen,
highlightedIndex,
@@ -227,12 +189,10 @@ export function useSearchState({
]
)
// Handle focus
const handleFocus = useCallback(() => {
updateSuggestions(currentInput)
}, [currentInput, updateSuggestions])
// Handle blur
const handleBlur = useCallback(() => {
setTimeout(() => {
setIsOpen(false)
@@ -240,7 +200,6 @@ export function useSearchState({
}, 150)
}, [])
// Clear all filters
const clearAll = useCallback(() => {
setAppliedFilters([])
setCurrentInput('')
@@ -253,7 +212,6 @@ export function useSearchState({
}
}, [onFiltersChange])
// Initialize from external value (URL params, etc.)
const initializeFromQuery = useCallback((query: string, filters: ParsedFilter[]) => {
setAppliedFilters(filters)
setTextSearch(query)
@@ -261,7 +219,6 @@ export function useSearchState({
}, [])
return {
// State
appliedFilters,
currentInput,
textSearch,
@@ -269,13 +226,10 @@ export function useSearchState({
suggestions,
sections,
highlightedIndex,
highlightedBadgeIndex,
// Refs
inputRef,
dropdownRef,
// Handlers
handleInputChange,
handleSuggestionSelect,
handleKeyDown,
@@ -285,7 +239,6 @@ export function useSearchState({
clearAll,
initializeFromQuery,
// Setters for external control
setHighlightedIndex,
}
}

View File

@@ -101,6 +101,9 @@ const ACTION_VERBS = [
'Generated',
'Rendering',
'Rendered',
'Sleeping',
'Slept',
'Resumed',
] as const
/**
@@ -580,6 +583,11 @@ export function ToolCall({ toolCall: toolCallProp, toolCallId, onStateChange }:
(toolCall.state === (ClientToolCallState.executing as any) ||
toolCall.state === ('executing' as any))
const showWake =
toolCall.name === 'sleep' &&
(toolCall.state === (ClientToolCallState.executing as any) ||
toolCall.state === ('executing' as any))
const handleStateChange = (state: any) => {
forceUpdate({})
onStateChange?.(state)
@@ -1102,6 +1110,37 @@ export function ToolCall({ toolCall: toolCallProp, toolCallId, onStateChange }:
Move to Background
</Button>
</div>
) : showWake ? (
<div className='mt-[8px]'>
<Button
onClick={async () => {
try {
const instance = getClientTool(toolCall.id)
// Get elapsed seconds before waking
const elapsedSeconds = instance?.getElapsedSeconds?.() || 0
// Transition to background state locally so UI updates immediately
// Pass elapsed seconds in the result so dynamic text can use it
instance?.setState?.((ClientToolCallState as any).background, {
result: { _elapsedSeconds: elapsedSeconds },
})
// Update the tool call params in the store to include elapsed time for display
const { updateToolCallParams } = useCopilotStore.getState()
updateToolCallParams?.(toolCall.id, { _elapsedSeconds: Math.round(elapsedSeconds) })
await instance?.markToolComplete?.(
200,
`User woke you up after ${Math.round(elapsedSeconds)} seconds`
)
// Optionally force a re-render; store should sync state from server
forceUpdate({})
onStateChange?.('background')
} catch {}
}}
variant='primary'
title='Wake'
>
Wake
</Button>
</div>
) : null}
</div>
)

View File

@@ -1,5 +1,6 @@
'use client'
import { useMemo } from 'react'
import { Check } from 'lucide-react'
import { Button, Modal, ModalBody, ModalContent, ModalFooter, ModalHeader } from '@/components/emcn'
import { client } from '@/lib/auth/auth-client'
@@ -315,14 +316,28 @@ export function OAuthRequiredModal({
}
}
const displayScopes = requiredScopes.filter(
(scope) => !scope.includes('userinfo.email') && !scope.includes('userinfo.profile')
const newScopesSet = useMemo(
() =>
new Set(
(newScopes || []).filter(
(scope) => !scope.includes('userinfo.email') && !scope.includes('userinfo.profile')
)
),
[newScopes]
)
const newScopesSet = new Set(
(newScopes || []).filter(
const displayScopes = useMemo(() => {
const filtered = requiredScopes.filter(
(scope) => !scope.includes('userinfo.email') && !scope.includes('userinfo.profile')
)
)
return filtered.sort((a, b) => {
const aIsNew = newScopesSet.has(a)
const bIsNew = newScopesSet.has(b)
if (aIsNew && !bIsNew) return -1
if (!aIsNew && bIsNew) return 1
return 0
})
}, [requiredScopes, newScopesSet])
const handleConnectDirectly = async () => {
try {

View File

@@ -12,6 +12,7 @@ import {
parseProvider,
} from '@/lib/oauth'
import { OAuthRequiredModal } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/credential-selector/components/oauth-required-modal'
import { useDependsOnGate } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/hooks/use-depends-on-gate'
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/hooks/use-sub-block-value'
import type { SubBlockConfig } from '@/blocks/types'
import { useOAuthCredentialDetail, useOAuthCredentials } from '@/hooks/queries/oauth-credentials'
@@ -45,10 +46,14 @@ export function CredentialSelector({
const label = subBlock.placeholder || 'Select credential'
const serviceId = subBlock.serviceId || ''
const { depsSatisfied, dependsOn } = useDependsOnGate(blockId, subBlock, { disabled, isPreview })
const hasDependencies = dependsOn.length > 0
const effectiveDisabled = disabled || (hasDependencies && !depsSatisfied)
const effectiveValue = isPreview && previewValue !== undefined ? previewValue : storeValue
const selectedId = typeof effectiveValue === 'string' ? effectiveValue : ''
// serviceId is now the canonical identifier - derive provider from it
const effectiveProviderId = useMemo(
() => getProviderIdFromServiceId(serviceId) as OAuthProvider,
[serviceId]
@@ -130,7 +135,7 @@ export function CredentialSelector({
const needsUpdate =
hasSelection &&
missingRequiredScopes.length > 0 &&
!disabled &&
!effectiveDisabled &&
!isPreview &&
!credentialsLoading
@@ -230,8 +235,10 @@ export function CredentialSelector({
selectedValue={selectedId}
onChange={handleComboboxChange}
onOpenChange={handleOpenChange}
placeholder={label}
disabled={disabled}
placeholder={
hasDependencies && !depsSatisfied ? 'Fill in required fields above first' : label
}
disabled={effectiveDisabled}
editable={true}
filterOptions={true}
isLoading={credentialsLoading}

View File

@@ -90,6 +90,7 @@ export function ShortInput({
blockId,
triggerId: undefined,
isPreview,
useWebhookUrl,
})
const wandHook = useWand({

View File

@@ -91,8 +91,7 @@ export function FieldFormat({
placeholder = 'fieldName',
showType = true,
showValue = false,
valuePlaceholder = 'Enter test value',
config,
valuePlaceholder = 'Enter default value',
}: FieldFormatProps) {
const [storeValue, setStoreValue] = useSubBlockValue<Field[]>(blockId, subBlockId)
const valueInputRefs = useRef<Record<string, HTMLInputElement | HTMLTextAreaElement>>({})
@@ -454,7 +453,6 @@ export function FieldFormat({
)
}
// Export specific components for backward compatibility
export function InputFormat(props: Omit<FieldFormatProps, 'title' | 'placeholder'>) {
return <FieldFormat {...props} title='Input' placeholder='firstName' />
}

View File

@@ -18,12 +18,18 @@ interface McpTool {
inputSchema?: any
}
interface McpServer {
id: string
url?: string
}
interface StoredTool {
type: 'mcp'
title: string
toolId: string
params: {
serverId: string
serverUrl?: string
toolName: string
serverName: string
}
@@ -34,6 +40,7 @@ interface StoredTool {
interface McpToolsListProps {
mcpTools: McpTool[]
mcpServers?: McpServer[]
searchQuery: string
customFilter: (name: string, query: string) => number
onToolSelect: (tool: StoredTool) => void
@@ -45,6 +52,7 @@ interface McpToolsListProps {
*/
export function McpToolsList({
mcpTools,
mcpServers = [],
searchQuery,
customFilter,
onToolSelect,
@@ -59,44 +67,48 @@ export function McpToolsList({
return (
<>
<PopoverSection>MCP Tools</PopoverSection>
{filteredTools.map((mcpTool) => (
<ToolCommand.Item
key={mcpTool.id}
value={mcpTool.name}
onSelect={() => {
if (disabled) return
{filteredTools.map((mcpTool) => {
const server = mcpServers.find((s) => s.id === mcpTool.serverId)
return (
<ToolCommand.Item
key={mcpTool.id}
value={mcpTool.name}
onSelect={() => {
if (disabled) return
const newTool: StoredTool = {
type: 'mcp',
title: mcpTool.name,
toolId: mcpTool.id,
params: {
serverId: mcpTool.serverId,
toolName: mcpTool.name,
serverName: mcpTool.serverName,
},
isExpanded: true,
usageControl: 'auto',
schema: {
...mcpTool.inputSchema,
description: mcpTool.description,
},
}
const newTool: StoredTool = {
type: 'mcp',
title: mcpTool.name,
toolId: mcpTool.id,
params: {
serverId: mcpTool.serverId,
serverUrl: server?.url,
toolName: mcpTool.name,
serverName: mcpTool.serverName,
},
isExpanded: true,
usageControl: 'auto',
schema: {
...mcpTool.inputSchema,
description: mcpTool.description,
},
}
onToolSelect(newTool)
}}
>
<div
className='flex h-[15px] w-[15px] flex-shrink-0 items-center justify-center rounded'
style={{ background: mcpTool.bgColor }}
onToolSelect(newTool)
}}
>
<IconComponent icon={mcpTool.icon} className='h-[11px] w-[11px] text-white' />
</div>
<span className='truncate' title={`${mcpTool.name} (${mcpTool.serverName})`}>
{mcpTool.name}
</span>
</ToolCommand.Item>
))}
<div
className='flex h-[15px] w-[15px] flex-shrink-0 items-center justify-center rounded'
style={{ background: mcpTool.bgColor }}
>
<IconComponent icon={mcpTool.icon} className='h-[11px] w-[11px] text-white' />
</div>
<span className='truncate' title={`${mcpTool.name} (${mcpTool.serverName})`}>
{mcpTool.name}
</span>
</ToolCommand.Item>
)
})}
</>
)
}

View File

@@ -4,6 +4,7 @@ import { useQuery } from '@tanstack/react-query'
import { Loader2, PlusIcon, WrenchIcon, XIcon } from 'lucide-react'
import { useParams } from 'next/navigation'
import {
Badge,
Combobox,
Popover,
PopoverContent,
@@ -12,6 +13,7 @@ import {
PopoverSearch,
PopoverSection,
PopoverTrigger,
Tooltip,
} from '@/components/emcn'
import { McpIcon } from '@/components/icons'
import { Switch } from '@/components/ui/switch'
@@ -55,9 +57,11 @@ import {
type CustomTool as CustomToolDefinition,
useCustomTools,
} from '@/hooks/queries/custom-tools'
import { useMcpServers } from '@/hooks/queries/mcp'
import { useWorkflows } from '@/hooks/queries/workflows'
import { useMcpTools } from '@/hooks/use-mcp-tools'
import { getProviderFromModel, supportsToolUsageControl } from '@/providers/utils'
import { useSettingsModalStore } from '@/stores/settings-modal/store'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
import {
formatParameterLabel,
@@ -802,6 +806,66 @@ export function ToolInput({
refreshTools,
} = useMcpTools(workspaceId)
const { data: mcpServers = [], isLoading: mcpServersLoading } = useMcpServers(workspaceId)
const openSettingsModal = useSettingsModalStore((state) => state.openModal)
const mcpDataLoading = mcpLoading || mcpServersLoading
/**
* Returns issue info for an MCP tool using shared validation logic.
*/
const getMcpToolIssue = useCallback(
(tool: StoredTool) => {
if (tool.type !== 'mcp') return null
const { getMcpToolIssue: validateTool } = require('@/lib/mcp/tool-validation')
return validateTool(
{
serverId: tool.params?.serverId as string,
serverUrl: tool.params?.serverUrl as string | undefined,
toolName: tool.params?.toolName as string,
schema: tool.schema,
},
mcpServers.map((s) => ({
id: s.id,
url: s.url,
connectionStatus: s.connectionStatus,
lastError: s.lastError,
})),
mcpTools.map((t) => ({
serverId: t.serverId,
name: t.name,
inputSchema: t.inputSchema,
}))
)
},
[mcpTools, mcpServers]
)
const isMcpToolUnavailable = useCallback(
(tool: StoredTool): boolean => {
const { isToolUnavailable } = require('@/lib/mcp/tool-validation')
return isToolUnavailable(getMcpToolIssue(tool))
},
[getMcpToolIssue]
)
const hasMcpToolIssue = useCallback(
(tool: StoredTool): boolean => {
return getMcpToolIssue(tool) !== null
},
[getMcpToolIssue]
)
// Filter out MCP tools from unavailable servers for the dropdown
const availableMcpTools = useMemo(() => {
return mcpTools.filter((mcpTool) => {
const server = mcpServers.find((s) => s.id === mcpTool.serverId)
// Only include tools from connected servers
return server && server.connectionStatus === 'connected'
})
}, [mcpTools, mcpServers])
// Reset search query when popover opens
useEffect(() => {
if (open) {
@@ -1849,9 +1913,10 @@ export function ToolInput({
)
})()}
{/* Display MCP tools */}
{/* Display MCP tools (only from available servers) */}
<McpToolsList
mcpTools={mcpTools}
mcpTools={availableMcpTools}
mcpServers={mcpServers}
searchQuery={searchQuery || ''}
customFilter={customFilter}
onToolSelect={handleMcpToolSelect}
@@ -2040,9 +2105,46 @@ export function ToolInput({
<span className='truncate font-medium text-[13px] text-[var(--text-primary)]'>
{isCustomTool ? customToolTitle : tool.title}
</span>
{isMcpTool &&
!mcpDataLoading &&
(() => {
const issue = getMcpToolIssue(tool)
if (!issue) return null
const { getIssueBadgeLabel } = require('@/lib/mcp/tool-validation')
const serverId = tool.params?.serverId
return (
<div
onClick={(e: React.MouseEvent) => {
e.stopPropagation()
e.preventDefault()
openSettingsModal({ section: 'mcp', mcpServerId: serverId })
}}
>
<Tooltip.Root>
<Tooltip.Trigger asChild>
<Badge
variant='outline'
className='cursor-pointer transition-colors hover:bg-[var(--warning)]/10'
style={{
borderColor: 'var(--warning)',
color: 'var(--warning)',
}}
>
{getIssueBadgeLabel(issue)}
</Badge>
</Tooltip.Trigger>
<Tooltip.Content>
<span className='text-sm'>
{issue.message} · Click to open settings
</span>
</Tooltip.Content>
</Tooltip.Root>
</div>
)
})()}
</div>
<div className='flex flex-shrink-0 items-center gap-[8px]'>
{supportsToolControl && (
{supportsToolControl && !(isMcpTool && isMcpToolUnavailable(tool)) && (
<Popover
open={usageControlPopoverIndex === toolIndex}
onOpenChange={(open) =>
@@ -2386,9 +2488,10 @@ export function ToolInput({
)
})()}
{/* Display MCP tools */}
{/* Display MCP tools (only from available servers) */}
<McpToolsList
mcpTools={mcpTools}
mcpTools={availableMcpTools}
mcpServers={mcpServers}
searchQuery={searchQuery || ''}
customFilter={customFilter}
onToolSelect={handleMcpToolSelect}

View File

@@ -74,6 +74,7 @@ export function TriggerSave({
blockId,
triggerId: effectiveTriggerId,
isPreview,
useWebhookUrl: true, // to store the webhook url in the store
})
const triggerConfig = useSubBlockStore((state) => state.getValue(blockId, 'triggerConfig'))

View File

@@ -26,7 +26,7 @@ const SUBFLOW_CONFIG = {
},
typeKey: 'loopType' as const,
storeKey: 'loops' as const,
maxIterations: 100,
maxIterations: 1000,
configKeys: {
iterations: 'iterations' as const,
items: 'forEachItems' as const,

View File

@@ -1741,7 +1741,7 @@ export function Terminal() {
)}
{/* Content */}
<div className='flex-1 overflow-x-auto overflow-y-auto'>
<div className={clsx('flex-1 overflow-y-auto', !wrapText && 'overflow-x-auto')}>
{shouldShowCodeDisplay ? (
<OutputCodeContent
code={selectedEntry.input.code}

View File

@@ -40,6 +40,8 @@ import { useSelectorDisplayName } from '@/hooks/use-selector-display-name'
import { useVariablesStore } from '@/stores/panel/variables/store'
import { useWorkflowRegistry } from '@/stores/workflows/registry/store'
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
import { useWorkflowStore } from '@/stores/workflows/workflow/store'
import { wouldCreateCycle } from '@/stores/workflows/workflow/utils'
const logger = createLogger('WorkflowBlock')
@@ -844,7 +846,11 @@ export const WorkflowBlock = memo(function WorkflowBlock({
data-handleid='target'
isConnectableStart={false}
isConnectableEnd={true}
isValidConnection={(connection) => connection.source !== id}
isValidConnection={(connection) => {
if (connection.source === id) return false
const edges = useWorkflowStore.getState().edges
return !wouldCreateCycle(edges, connection.source!, connection.target!)
}}
/>
)}
@@ -1045,7 +1051,11 @@ export const WorkflowBlock = memo(function WorkflowBlock({
data-handleid={`condition-${cond.id}`}
isConnectableStart={true}
isConnectableEnd={false}
isValidConnection={(connection) => connection.target !== id}
isValidConnection={(connection) => {
if (connection.target === id) return false
const edges = useWorkflowStore.getState().edges
return !wouldCreateCycle(edges, connection.source!, connection.target!)
}}
/>
)
})}
@@ -1064,7 +1074,11 @@ export const WorkflowBlock = memo(function WorkflowBlock({
data-handleid='error'
isConnectableStart={true}
isConnectableEnd={false}
isValidConnection={(connection) => connection.target !== id}
isValidConnection={(connection) => {
if (connection.target === id) return false
const edges = useWorkflowStore.getState().edges
return !wouldCreateCycle(edges, connection.source!, connection.target!)
}}
/>
</>
)}
@@ -1081,7 +1095,11 @@ export const WorkflowBlock = memo(function WorkflowBlock({
data-handleid='source'
isConnectableStart={true}
isConnectableEnd={false}
isValidConnection={(connection) => connection.target !== id}
isValidConnection={(connection) => {
if (connection.target === id) return false
const edges = useWorkflowStore.getState().edges
return !wouldCreateCycle(edges, connection.source!, connection.target!)
}}
/>
{shouldShowDefaultHandles && (
@@ -1100,7 +1118,11 @@ export const WorkflowBlock = memo(function WorkflowBlock({
data-handleid='error'
isConnectableStart={true}
isConnectableEnd={false}
isValidConnection={(connection) => connection.target !== id}
isValidConnection={(connection) => {
if (connection.target === id) return false
const edges = useWorkflowStore.getState().edges
return !wouldCreateCycle(edges, connection.source!, connection.target!)
}}
/>
)}
</>

View File

@@ -6,6 +6,61 @@ import { getBlock } from '@/blocks/registry'
const logger = createLogger('NodeUtilities')
/**
* Estimates block dimensions based on block type.
* Uses subblock count to estimate height for blocks that haven't been measured yet.
*
* @param blockType - The type of block (e.g., 'condition', 'agent')
* @returns Estimated width and height for the block
*/
export function estimateBlockDimensions(blockType: string): { width: number; height: number } {
const blockConfig = getBlock(blockType)
const subBlockCount = blockConfig?.subBlocks?.length ?? 3
// Many subblocks are conditionally rendered (advanced mode, provider-specific, etc.)
// Use roughly half the config count as a reasonable estimate, capped between 3-7 rows
const estimatedRows = Math.max(3, Math.min(Math.ceil(subBlockCount / 2), 7))
const hasErrorRow = blockType !== 'starter' && blockType !== 'response' ? 1 : 0
const height =
BLOCK_DIMENSIONS.HEADER_HEIGHT +
BLOCK_DIMENSIONS.WORKFLOW_CONTENT_PADDING +
(estimatedRows + hasErrorRow) * BLOCK_DIMENSIONS.WORKFLOW_ROW_HEIGHT
return {
width: BLOCK_DIMENSIONS.FIXED_WIDTH,
height: Math.max(height, BLOCK_DIMENSIONS.MIN_HEIGHT),
}
}
/**
* Clamps a position to keep a block fully inside a container's content area.
* Content area starts after the header and padding, and ends before the right/bottom padding.
*
* @param position - Raw position relative to container origin
* @param containerDimensions - Container width and height
* @param blockDimensions - Block width and height
* @returns Clamped position that keeps block inside content area
*/
export function clampPositionToContainer(
position: { x: number; y: number },
containerDimensions: { width: number; height: number },
blockDimensions: { width: number; height: number }
): { x: number; y: number } {
const { width: containerWidth, height: containerHeight } = containerDimensions
const { width: blockWidth, height: blockHeight } = blockDimensions
// Content area bounds (where blocks can be placed)
const minX = CONTAINER_DIMENSIONS.LEFT_PADDING
const minY = CONTAINER_DIMENSIONS.HEADER_HEIGHT + CONTAINER_DIMENSIONS.TOP_PADDING
const maxX = containerWidth - CONTAINER_DIMENSIONS.RIGHT_PADDING - blockWidth
const maxY = containerHeight - CONTAINER_DIMENSIONS.BOTTOM_PADDING - blockHeight
return {
x: Math.max(minX, Math.min(position.x, Math.max(minX, maxX))),
y: Math.max(minY, Math.min(position.y, Math.max(minY, maxY))),
}
}
/**
* Hook providing utilities for node position, hierarchy, and dimension calculations
*/
@@ -21,7 +76,7 @@ export function useNodeUtilities(blocks: Record<string, any>) {
/**
* Get the dimensions of a block.
* For regular blocks, estimates height based on block config if not yet measured.
* For regular blocks, uses stored height or estimates based on block config.
*/
const getBlockDimensions = useCallback(
(blockId: string): { width: number; height: number } => {
@@ -41,32 +96,16 @@ export function useNodeUtilities(blocks: Record<string, any>) {
}
}
// Workflow block nodes have fixed visual width
const width = BLOCK_DIMENSIONS.FIXED_WIDTH
// Prefer deterministic height published by the block component; fallback to estimate
let height = block.height
if (!height) {
// Estimate height based on block config's subblock count for more accurate initial sizing
// This is critical for subflow containers to size correctly before child blocks are measured
const blockConfig = getBlock(block.type)
const subBlockCount = blockConfig?.subBlocks?.length ?? 3
// Many subblocks are conditionally rendered (advanced mode, provider-specific, etc.)
// Use roughly half the config count as a reasonable estimate, capped between 3-7 rows
const estimatedRows = Math.max(3, Math.min(Math.ceil(subBlockCount / 2), 7))
const hasErrorRow = block.type !== 'starter' && block.type !== 'response' ? 1 : 0
height =
BLOCK_DIMENSIONS.HEADER_HEIGHT +
BLOCK_DIMENSIONS.WORKFLOW_CONTENT_PADDING +
(estimatedRows + hasErrorRow) * BLOCK_DIMENSIONS.WORKFLOW_ROW_HEIGHT
if (block.height) {
return {
width: BLOCK_DIMENSIONS.FIXED_WIDTH,
height: Math.max(block.height, BLOCK_DIMENSIONS.MIN_HEIGHT),
}
}
return {
width,
height: Math.max(height, BLOCK_DIMENSIONS.MIN_HEIGHT),
}
// Use shared estimation utility for blocks without measured height
return estimateBlockDimensions(block.type)
},
[blocks, isContainerType]
)
@@ -164,29 +203,36 @@ export function useNodeUtilities(blocks: Record<string, any>) {
)
/**
* Calculates the relative position of a node to a new parent's content area.
* Accounts for header height and padding offsets in container nodes.
* Calculates the relative position of a node to a new parent's origin.
* React Flow positions children relative to parent origin, so we clamp
* to the content area bounds (after header and padding).
* @param nodeId ID of the node being repositioned
* @param newParentId ID of the new parent
* @returns Relative position coordinates {x, y} within the parent's content area
* @returns Relative position coordinates {x, y} within the parent
*/
const calculateRelativePosition = useCallback(
(nodeId: string, newParentId: string): { x: number; y: number } => {
const nodeAbsPos = getNodeAbsolutePosition(nodeId)
const parentAbsPos = getNodeAbsolutePosition(newParentId)
const parentNode = getNodes().find((n) => n.id === newParentId)
// Account for container's header and padding
// Children are positioned relative to content area, not container origin
const headerHeight = 50
const leftPadding = 16
const topPadding = 16
return {
x: nodeAbsPos.x - parentAbsPos.x - leftPadding,
y: nodeAbsPos.y - parentAbsPos.y - headerHeight - topPadding,
// Calculate raw relative position (relative to parent origin)
const rawPosition = {
x: nodeAbsPos.x - parentAbsPos.x,
y: nodeAbsPos.y - parentAbsPos.y,
}
// Get container and block dimensions
const containerDimensions = {
width: parentNode?.data?.width || CONTAINER_DIMENSIONS.DEFAULT_WIDTH,
height: parentNode?.data?.height || CONTAINER_DIMENSIONS.DEFAULT_HEIGHT,
}
const blockDimensions = getBlockDimensions(nodeId)
// Clamp position to keep block inside content area
return clampPositionToContainer(rawPosition, containerDimensions, blockDimensions)
},
[getNodeAbsolutePosition]
[getNodeAbsolutePosition, getNodes, getBlockDimensions]
)
/**
@@ -252,23 +298,16 @@ export function useNodeUtilities(blocks: Record<string, any>) {
*/
const calculateLoopDimensions = useCallback(
(nodeId: string): { width: number; height: number } => {
const minWidth = CONTAINER_DIMENSIONS.DEFAULT_WIDTH
const minHeight = CONTAINER_DIMENSIONS.DEFAULT_HEIGHT
// Match styling in subflow-node.tsx:
// - Header section: 50px total height
// - Content area: px-[16px] pb-[0px] pt-[16px] pr-[70px]
// Left padding: 16px, Right padding: 64px, Top padding: 16px, Bottom padding: -6px (reduced by additional 6px from 0 to achieve 14px total reduction from original 8px)
// - Children are positioned relative to the content area (after header, inside padding)
const headerHeight = 50
const leftPadding = 16
const rightPadding = 80
const topPadding = 16
const bottomPadding = 16
const childNodes = getNodes().filter((node) => node.parentId === nodeId)
// Check both React Flow's node.parentId AND blocks store's data.parentId
// This ensures we catch children even if React Flow hasn't re-rendered yet
const childNodes = getNodes().filter(
(node) => node.parentId === nodeId || blocks[node.id]?.data?.parentId === nodeId
)
if (childNodes.length === 0) {
return { width: minWidth, height: minHeight }
return {
width: CONTAINER_DIMENSIONS.DEFAULT_WIDTH,
height: CONTAINER_DIMENSIONS.DEFAULT_HEIGHT,
}
}
let maxRight = 0
@@ -276,25 +315,28 @@ export function useNodeUtilities(blocks: Record<string, any>) {
childNodes.forEach((node) => {
const { width: nodeWidth, height: nodeHeight } = getBlockDimensions(node.id)
// Child positions are relative to content area's inner top-left (inside padding)
// Calculate the rightmost and bottommost edges of children
const rightEdge = node.position.x + nodeWidth
const bottomEdge = node.position.y + nodeHeight
maxRight = Math.max(maxRight, rightEdge)
maxBottom = Math.max(maxBottom, bottomEdge)
// Use block position from store if available (more up-to-date)
const block = blocks[node.id]
const position = block?.position || node.position
maxRight = Math.max(maxRight, position.x + nodeWidth)
maxBottom = Math.max(maxBottom, position.y + nodeHeight)
})
// Container dimensions = header + padding + children bounds + padding
// Width: left padding + max child right edge + right padding (64px)
const width = Math.max(minWidth, leftPadding + maxRight + rightPadding)
// Height: header + top padding + max child bottom edge + bottom padding (8px)
const height = Math.max(minHeight, headerHeight + topPadding + maxBottom + bottomPadding)
const width = Math.max(
CONTAINER_DIMENSIONS.DEFAULT_WIDTH,
CONTAINER_DIMENSIONS.LEFT_PADDING + maxRight + CONTAINER_DIMENSIONS.RIGHT_PADDING
)
const height = Math.max(
CONTAINER_DIMENSIONS.DEFAULT_HEIGHT,
CONTAINER_DIMENSIONS.HEADER_HEIGHT +
CONTAINER_DIMENSIONS.TOP_PADDING +
maxBottom +
CONTAINER_DIMENSIONS.BOTTOM_PADDING
)
return { width, height }
},
[getNodes, getBlockDimensions]
[getNodes, getBlockDimensions, blocks]
)
/**

View File

@@ -655,6 +655,7 @@ export function useWorkflowExecution() {
setExecutor,
setPendingBlocks,
setActiveBlocks,
workflows,
]
)

View File

@@ -18,6 +18,7 @@ import { useShallow } from 'zustand/react/shallow'
import type { OAuthConnectEventDetail } from '@/lib/copilot/tools/client/other/oauth-request-access'
import { createLogger } from '@/lib/logs/console/logger'
import type { OAuthProvider } from '@/lib/oauth'
import { BLOCK_DIMENSIONS, CONTAINER_DIMENSIONS } from '@/lib/workflows/blocks/block-dimensions'
import { TriggerUtils } from '@/lib/workflows/triggers/triggers'
import { useWorkspacePermissionsContext } from '@/app/workspace/[workspaceId]/providers/workspace-permissions-provider'
import {
@@ -39,6 +40,10 @@ import {
useCurrentWorkflow,
useNodeUtilities,
} from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks'
import {
clampPositionToContainer,
estimateBlockDimensions,
} from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-node-utilities'
import { useSocket } from '@/app/workspace/providers/socket-provider'
import { getBlock } from '@/blocks'
import { isAnnotationOnlyBlock } from '@/executor/constants'
@@ -176,6 +181,7 @@ const WorkflowContent = React.memo(() => {
resizeLoopNodes,
updateNodeParent: updateNodeParentUtil,
getNodeAnchorPosition,
getBlockDimensions,
} = useNodeUtilities(blocks)
/** Triggers immediate subflow resize without delays. */
@@ -692,17 +698,19 @@ const WorkflowContent = React.memo(() => {
return
}
// Calculate position relative to the container's content area
// Account for header (50px), left padding (16px), and top padding (16px)
const headerHeight = 50
const leftPadding = 16
const topPadding = 16
const relativePosition = {
x: position.x - containerInfo.loopPosition.x - leftPadding,
y: position.y - containerInfo.loopPosition.y - headerHeight - topPadding,
// Calculate raw position relative to container origin
const rawPosition = {
x: position.x - containerInfo.loopPosition.x,
y: position.y - containerInfo.loopPosition.y,
}
// Clamp position to keep block inside container's content area
const relativePosition = clampPositionToContainer(
rawPosition,
containerInfo.dimensions,
estimateBlockDimensions(data.type)
)
// Capture existing child blocks before adding the new one
const existingChildBlocks = Object.values(blocks).filter(
(b) => b.data?.parentId === containerInfo.loopId
@@ -1501,6 +1509,66 @@ const WorkflowContent = React.memo(() => {
// Only sync non-position changes (like selection) to store if needed
}, [])
/**
* Updates container dimensions in displayNodes during drag.
* This allows live resizing of containers as their children are dragged.
*/
const updateContainerDimensionsDuringDrag = useCallback(
(draggedNodeId: string, draggedNodePosition: { x: number; y: number }) => {
const parentId = blocks[draggedNodeId]?.data?.parentId
if (!parentId) return
setDisplayNodes((currentNodes) => {
const childNodes = currentNodes.filter((n) => n.parentId === parentId)
if (childNodes.length === 0) return currentNodes
let maxRight = 0
let maxBottom = 0
childNodes.forEach((node) => {
const nodePosition = node.id === draggedNodeId ? draggedNodePosition : node.position
const { width: nodeWidth, height: nodeHeight } = getBlockDimensions(node.id)
maxRight = Math.max(maxRight, nodePosition.x + nodeWidth)
maxBottom = Math.max(maxBottom, nodePosition.y + nodeHeight)
})
const newWidth = Math.max(
CONTAINER_DIMENSIONS.DEFAULT_WIDTH,
CONTAINER_DIMENSIONS.LEFT_PADDING + maxRight + CONTAINER_DIMENSIONS.RIGHT_PADDING
)
const newHeight = Math.max(
CONTAINER_DIMENSIONS.DEFAULT_HEIGHT,
CONTAINER_DIMENSIONS.HEADER_HEIGHT +
CONTAINER_DIMENSIONS.TOP_PADDING +
maxBottom +
CONTAINER_DIMENSIONS.BOTTOM_PADDING
)
return currentNodes.map((node) => {
if (node.id === parentId) {
const currentWidth = node.data?.width || CONTAINER_DIMENSIONS.DEFAULT_WIDTH
const currentHeight = node.data?.height || CONTAINER_DIMENSIONS.DEFAULT_HEIGHT
// Only update if dimensions changed
if (newWidth !== currentWidth || newHeight !== currentHeight) {
return {
...node,
data: {
...node.data,
width: newWidth,
height: newHeight,
},
}
}
}
return node
})
})
},
[blocks, getBlockDimensions]
)
/**
* Effect to resize loops when nodes change (add/remove/position change).
* Runs on structural changes only - not during drag (position-only changes).
@@ -1580,11 +1648,6 @@ const WorkflowContent = React.memo(() => {
const onConnect = useCallback(
(connection: any) => {
if (connection.source && connection.target) {
// Prevent self-connections
if (connection.source === connection.target) {
return
}
// Check if connecting nodes across container boundaries
const sourceNode = getNodes().find((n) => n.id === connection.source)
const targetNode = getNodes().find((n) => n.id === connection.target)
@@ -1681,6 +1744,11 @@ const WorkflowContent = React.memo(() => {
// Get the current parent ID of the node being dragged
const currentParentId = blocks[node.id]?.data?.parentId || null
// If the node is inside a container, update container dimensions during drag
if (currentParentId) {
updateContainerDimensionsDuringDrag(node.id, node.position)
}
// Check if this is a starter block - starter blocks should never be in containers
const isStarterBlock = node.data?.type === 'starter'
if (isStarterBlock) {
@@ -1812,7 +1880,14 @@ const WorkflowContent = React.memo(() => {
}
}
},
[getNodes, potentialParentId, blocks, getNodeAbsolutePosition, getNodeDepth]
[
getNodes,
potentialParentId,
blocks,
getNodeAbsolutePosition,
getNodeDepth,
updateContainerDimensionsDuringDrag,
]
)
/** Captures initial parent ID and position when drag starts. */
@@ -1841,17 +1916,47 @@ const WorkflowContent = React.memo(() => {
})
document.body.style.cursor = ''
// Get the block's current parent (if any)
const currentBlock = blocks[node.id]
const currentParentId = currentBlock?.data?.parentId
// Calculate position - clamp if inside a container
let finalPosition = node.position
if (currentParentId) {
// Block is inside a container - clamp position to keep it fully inside
const parentNode = getNodes().find((n) => n.id === currentParentId)
if (parentNode) {
const containerDimensions = {
width: parentNode.data?.width || CONTAINER_DIMENSIONS.DEFAULT_WIDTH,
height: parentNode.data?.height || CONTAINER_DIMENSIONS.DEFAULT_HEIGHT,
}
const blockDimensions = {
width: BLOCK_DIMENSIONS.FIXED_WIDTH,
height: Math.max(
currentBlock?.height || BLOCK_DIMENSIONS.MIN_HEIGHT,
BLOCK_DIMENSIONS.MIN_HEIGHT
),
}
finalPosition = clampPositionToContainer(
node.position,
containerDimensions,
blockDimensions
)
}
}
// Emit collaborative position update for the final position
// This ensures other users see the smooth final position
collaborativeUpdateBlockPosition(node.id, node.position, true)
collaborativeUpdateBlockPosition(node.id, finalPosition, true)
// Record single move entry on drag end to avoid micro-moves
const start = getDragStartPosition()
if (start && start.id === node.id) {
const before = { x: start.x, y: start.y, parentId: start.parentId }
const after = {
x: node.position.x,
y: node.position.y,
x: finalPosition.x,
y: finalPosition.y,
parentId: node.parentId || blocks[node.id]?.data?.parentId,
}
const moved =

View File

@@ -423,7 +423,21 @@ export function SearchModal({
}
break
case 'workspace':
if (item.isCurrent) {
break
}
if (item.href) {
router.push(item.href)
}
break
case 'workflow':
if (!item.isCurrent && item.href) {
router.push(item.href)
window.dispatchEvent(
new CustomEvent(SIDEBAR_SCROLL_EVENT, { detail: { itemId: item.id } })
)
}
break
case 'page':
case 'doc':
if (item.href) {
@@ -431,12 +445,6 @@ export function SearchModal({
window.open(item.href, '_blank', 'noopener,noreferrer')
} else {
router.push(item.href)
// Scroll to the workflow in the sidebar after navigation
if (item.type === 'workflow') {
window.dispatchEvent(
new CustomEvent(SIDEBAR_SCROLL_EVENT, { detail: { itemId: item.id } })
)
}
}
}
break

View File

@@ -1,8 +1,5 @@
import { Button } from '@/components/emcn'
/**
* Formats transport type for display (e.g., "streamable-http" -> "Streamable-HTTP").
*/
export function formatTransportLabel(transport: string): string {
return transport
.split('-')
@@ -14,10 +11,10 @@ export function formatTransportLabel(transport: string): string {
.join('-')
}
/**
* Formats tools count and names for display.
*/
function formatToolsLabel(tools: any[]): string {
function formatToolsLabel(tools: any[], connectionStatus?: string): string {
if (connectionStatus === 'error') {
return 'Unable to connect'
}
const count = tools.length
const plural = count !== 1 ? 's' : ''
const names = count > 0 ? `: ${tools.map((t) => t.name).join(', ')}` : ''
@@ -29,35 +26,41 @@ interface ServerListItemProps {
tools: any[]
isDeleting: boolean
isLoadingTools?: boolean
isRefreshing?: boolean
onRemove: () => void
onViewDetails: () => void
}
/**
* Renders a single MCP server list item with details and delete actions.
*/
export function ServerListItem({
server,
tools,
isDeleting,
isLoadingTools = false,
isRefreshing = false,
onRemove,
onViewDetails,
}: ServerListItemProps) {
const transportLabel = formatTransportLabel(server.transport || 'http')
const toolsLabel = formatToolsLabel(tools)
const toolsLabel = formatToolsLabel(tools, server.connectionStatus)
const isError = server.connectionStatus === 'error'
return (
<div className='flex items-center justify-between gap-[12px]'>
<div className='flex min-w-0 flex-col justify-center gap-[1px]'>
<div className='flex items-center gap-[6px]'>
<span className='max-w-[280px] truncate font-medium text-[14px]'>
<span className='max-w-[200px] truncate font-medium text-[14px]'>
{server.name || 'Unnamed Server'}
</span>
<span className='text-[13px] text-[var(--text-secondary)]'>({transportLabel})</span>
</div>
<p className='truncate text-[13px] text-[var(--text-muted)]'>
{isLoadingTools && tools.length === 0 ? 'Loading...' : toolsLabel}
<p
className={`truncate text-[13px] ${isError ? 'text-red-500 dark:text-red-400' : 'text-[var(--text-muted)]'}`}
>
{isRefreshing
? 'Refreshing...'
: isLoadingTools && tools.length === 0
? 'Loading...'
: toolsLabel}
</p>
</div>
<div className='flex flex-shrink-0 items-center gap-[4px]'>

View File

@@ -1,9 +1,10 @@
'use client'
import { useCallback, useMemo, useRef, useState } from 'react'
import { useCallback, useEffect, useMemo, useRef, useState } from 'react'
import { Plus, Search } from 'lucide-react'
import { useParams } from 'next/navigation'
import {
Badge,
Button,
Input as EmcnInput,
Modal,
@@ -14,6 +15,7 @@ import {
} from '@/components/emcn'
import { Input } from '@/components/ui'
import { createLogger } from '@/lib/logs/console/logger'
import { getIssueBadgeLabel, getMcpToolIssue, type McpToolIssue } from '@/lib/mcp/tool-validation'
import { checkEnvVarTrigger } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/panel/components/editor/components/sub-block/components/env-var-dropdown'
import {
useCreateMcpServer,
@@ -21,6 +23,7 @@ import {
useMcpServers,
useMcpToolsQuery,
useRefreshMcpServer,
useStoredMcpTools,
} from '@/hooks/queries/mcp'
import { useMcpServerTest } from '@/hooks/use-mcp-server-test'
import type { InputFieldType, McpServerFormData, McpServerTestResult } from './components'
@@ -44,6 +47,9 @@ interface McpServer {
name?: string
transport?: string
url?: string
connectionStatus?: 'connected' | 'disconnected' | 'error'
lastError?: string | null
lastConnected?: string
}
const logger = createLogger('McpSettings')
@@ -69,11 +75,15 @@ function getTestButtonLabel(
return 'Test Connection'
}
interface MCPProps {
initialServerId?: string | null
}
/**
* MCP Settings component for managing Model Context Protocol servers.
* Handles server CRUD operations, connection testing, and environment variable integration.
*/
export function MCP() {
export function MCP({ initialServerId }: MCPProps) {
const params = useParams()
const workspaceId = params.workspaceId as string
@@ -88,6 +98,7 @@ export function MCP() {
isLoading: toolsLoading,
isFetching: toolsFetching,
} = useMcpToolsQuery(workspaceId)
const { data: storedTools = [] } = useStoredMcpTools(workspaceId)
const createServerMutation = useCreateMcpServer()
const deleteServerMutation = useDeleteMcpServer()
const refreshServerMutation = useRefreshMcpServer()
@@ -106,7 +117,9 @@ export function MCP() {
const [serverToDelete, setServerToDelete] = useState<{ id: string; name: string } | null>(null)
const [selectedServerId, setSelectedServerId] = useState<string | null>(null)
const [refreshStatus, setRefreshStatus] = useState<'idle' | 'refreshing' | 'refreshed'>('idle')
const [refreshingServers, setRefreshingServers] = useState<
Record<string, 'refreshing' | 'refreshed'>
>({})
const [showEnvVars, setShowEnvVars] = useState(false)
const [envSearchTerm, setEnvSearchTerm] = useState('')
@@ -114,10 +127,16 @@ export function MCP() {
const [activeInputField, setActiveInputField] = useState<InputFieldType | null>(null)
const [activeHeaderIndex, setActiveHeaderIndex] = useState<number | null>(null)
// Scroll position state for formatted text overlays
const [urlScrollLeft, setUrlScrollLeft] = useState(0)
const [headerScrollLeft, setHeaderScrollLeft] = useState<Record<string, number>>({})
// Auto-select server when initialServerId is provided
useEffect(() => {
if (initialServerId && servers.some((s) => s.id === initialServerId)) {
setSelectedServerId(initialServerId)
}
}, [initialServerId, servers])
/**
* Resets environment variable dropdown state.
*/
@@ -237,6 +256,7 @@ export function MCP() {
/**
* Adds a new MCP server after validating and testing the connection.
* Only creates the server if connection test succeeds.
*/
const handleAddServer = useCallback(async () => {
if (!formData.name.trim()) return
@@ -253,12 +273,12 @@ export function MCP() {
workspaceId,
}
if (!testResult) {
const result = await testConnection(serverConfig)
if (!result.success) return
}
const connectionResult = await testConnection(serverConfig)
if (testResult && !testResult.success) return
if (!connectionResult.success) {
logger.error('Connection test failed, server not added:', connectionResult.error)
return
}
await createServerMutation.mutateAsync({
workspaceId,
@@ -279,15 +299,7 @@ export function MCP() {
} finally {
setIsAddingServer(false)
}
}, [
formData,
testResult,
testConnection,
createServerMutation,
workspaceId,
headersToRecord,
resetForm,
])
}, [formData, testConnection, createServerMutation, workspaceId, headersToRecord, resetForm])
/**
* Opens the delete confirmation dialog for an MCP server.
@@ -297,9 +309,6 @@ export function MCP() {
setShowDeleteDialog(true)
}, [])
/**
* Confirms and executes the server deletion.
*/
const confirmDeleteServer = useCallback(async () => {
if (!serverToDelete) return
@@ -399,14 +408,24 @@ export function MCP() {
const handleRefreshServer = useCallback(
async (serverId: string) => {
try {
setRefreshStatus('refreshing')
setRefreshingServers((prev) => ({ ...prev, [serverId]: 'refreshing' }))
await refreshServerMutation.mutateAsync({ workspaceId, serverId })
logger.info(`Refreshed MCP server: ${serverId}`)
setRefreshStatus('refreshed')
setTimeout(() => setRefreshStatus('idle'), 2000)
setRefreshingServers((prev) => ({ ...prev, [serverId]: 'refreshed' }))
setTimeout(() => {
setRefreshingServers((prev) => {
const newState = { ...prev }
delete newState[serverId]
return newState
})
}, 2000)
} catch (error) {
logger.error('Failed to refresh MCP server:', error)
setRefreshStatus('idle')
setRefreshingServers((prev) => {
const newState = { ...prev }
delete newState[serverId]
return newState
})
}
},
[refreshServerMutation, workspaceId]
@@ -432,6 +451,53 @@ export function MCP() {
const isSubmitDisabled = serversLoading || isAddingServer || !isFormValid
const testButtonLabel = getTestButtonLabel(testResult, isTestingConnection)
/**
* Gets issues for stored tools that reference a specific server tool.
* Returns issues from all workflows that have stored this tool.
*/
const getStoredToolIssues = useCallback(
(serverId: string, toolName: string): { issue: McpToolIssue; workflowName: string }[] => {
const relevantStoredTools = storedTools.filter(
(st) => st.serverId === serverId && st.toolName === toolName
)
const serverStates = servers.map((s) => ({
id: s.id,
url: s.url,
connectionStatus: s.connectionStatus,
lastError: s.lastError || undefined,
}))
const discoveredTools = mcpToolsData.map((t) => ({
serverId: t.serverId,
name: t.name,
inputSchema: t.inputSchema,
}))
const issues: { issue: McpToolIssue; workflowName: string }[] = []
for (const storedTool of relevantStoredTools) {
const issue = getMcpToolIssue(
{
serverId: storedTool.serverId,
serverUrl: storedTool.serverUrl,
toolName: storedTool.toolName,
schema: storedTool.schema,
},
serverStates,
discoveredTools
)
if (issue) {
issues.push({ issue, workflowName: storedTool.workflowName })
}
}
return issues
},
[storedTools, servers, mcpToolsData]
)
if (selectedServer) {
const { server, tools } = selectedServer
const transportLabel = formatTransportLabel(server.transport || 'http')
@@ -463,6 +529,15 @@ export function MCP() {
</div>
)}
{server.connectionStatus === 'error' && (
<div className='flex flex-col gap-[8px]'>
<span className='font-medium text-[13px] text-[var(--text-primary)]'>Status</span>
<p className='text-[14px] text-red-500 dark:text-red-400'>
{server.lastError || 'Unable to connect'}
</p>
</div>
)}
<div className='flex flex-col gap-[8px]'>
<span className='font-medium text-[13px] text-[var(--text-primary)]'>
Tools ({tools.length})
@@ -471,21 +546,37 @@ export function MCP() {
<p className='text-[13px] text-[var(--text-muted)]'>No tools available</p>
) : (
<div className='flex flex-col gap-[8px]'>
{tools.map((tool) => (
<div
key={tool.name}
className='rounded-[6px] border bg-[var(--surface-3)] px-[10px] py-[8px]'
>
<p className='font-medium text-[13px] text-[var(--text-primary)]'>
{tool.name}
</p>
{tool.description && (
<p className='mt-[4px] text-[13px] text-[var(--text-tertiary)]'>
{tool.description}
</p>
)}
</div>
))}
{tools.map((tool) => {
const issues = getStoredToolIssues(server.id, tool.name)
return (
<div
key={tool.name}
className='rounded-[6px] border bg-[var(--surface-3)] px-[10px] py-[8px]'
>
<div className='flex items-center justify-between'>
<p className='font-medium text-[13px] text-[var(--text-primary)]'>
{tool.name}
</p>
{issues.length > 0 && (
<Badge
variant='outline'
style={{
borderColor: 'var(--warning)',
color: 'var(--warning)',
}}
>
{getIssueBadgeLabel(issues[0].issue)}
</Badge>
)}
</div>
{tool.description && (
<p className='mt-[4px] text-[13px] text-[var(--text-tertiary)]'>
{tool.description}
</p>
)}
</div>
)
})}
</div>
)}
</div>
@@ -496,11 +587,11 @@ export function MCP() {
<Button
onClick={() => handleRefreshServer(server.id)}
variant='default'
disabled={refreshStatus !== 'idle'}
disabled={!!refreshingServers[server.id]}
>
{refreshStatus === 'refreshing'
{refreshingServers[server.id] === 'refreshing'
? 'Refreshing...'
: refreshStatus === 'refreshed'
: refreshingServers[server.id] === 'refreshed'
? 'Refreshed'
: 'Refresh Tools'}
</Button>
@@ -672,6 +763,7 @@ export function MCP() {
tools={tools}
isDeleting={deletingServers.has(server.id)}
isLoadingTools={isLoadingTools}
isRefreshing={refreshingServers[server.id] === 'refreshing'}
onRemove={() => handleRemoveServer(server.id, server.name || 'this server')}
onViewDetails={() => handleViewDetails(server.id)}
/>

View File

@@ -46,6 +46,7 @@ import { generalSettingsKeys, useGeneralSettings } from '@/hooks/queries/general
import { organizationKeys, useOrganizations } from '@/hooks/queries/organization'
import { ssoKeys, useSSOProviders } from '@/hooks/queries/sso'
import { subscriptionKeys, useSubscriptionData } from '@/hooks/queries/subscription'
import { useSettingsModalStore } from '@/stores/settings-modal/store'
const isBillingEnabled = isTruthy(getEnv('NEXT_PUBLIC_BILLING_ENABLED'))
const isSSOEnabled = isTruthy(getEnv('NEXT_PUBLIC_SSO_ENABLED'))
@@ -134,6 +135,8 @@ const allNavigationItems: NavigationItem[] = [
export function SettingsModal({ open, onOpenChange }: SettingsModalProps) {
const [activeSection, setActiveSection] = useState<SettingsSection>('general')
const { initialSection, mcpServerId, clearInitialState } = useSettingsModalStore()
const [pendingMcpServerId, setPendingMcpServerId] = useState<string | null>(null)
const { data: session } = useSession()
const queryClient = useQueryClient()
const { data: organizationsData } = useOrganizations()
@@ -247,6 +250,24 @@ export function SettingsModal({ open, onOpenChange }: SettingsModalProps) {
// React Query hook automatically loads and syncs settings
useGeneralSettings()
// Apply initial section from store when modal opens
useEffect(() => {
if (open && initialSection) {
setActiveSection(initialSection)
if (mcpServerId) {
setPendingMcpServerId(mcpServerId)
}
clearInitialState()
}
}, [open, initialSection, mcpServerId, clearInitialState])
// Clear pending server ID when section changes away from MCP
useEffect(() => {
if (activeSection !== 'mcp') {
setPendingMcpServerId(null)
}
}, [activeSection])
useEffect(() => {
const handleOpenSettings = (event: CustomEvent<{ tab: SettingsSection }>) => {
setActiveSection(event.detail.tab)
@@ -436,7 +457,7 @@ export function SettingsModal({ open, onOpenChange }: SettingsModalProps) {
{isBillingEnabled && activeSection === 'team' && <TeamManagement />}
{activeSection === 'sso' && <SSO />}
{activeSection === 'copilot' && <Copilot />}
{activeSection === 'mcp' && <MCP />}
{activeSection === 'mcp' && <MCP initialServerId={pendingMcpServerId} />}
{activeSection === 'custom-tools' && <CustomTools />}
</SModalMainBody>
</SModalMain>

View File

@@ -32,6 +32,7 @@ import {
} from '@/app/workspace/[workspaceId]/w/hooks'
import { useFolderStore } from '@/stores/folders/store'
import { useSearchModalStore } from '@/stores/search-modal/store'
import { useSettingsModalStore } from '@/stores/settings-modal/store'
import { MIN_SIDEBAR_WIDTH, useSidebarStore } from '@/stores/sidebar/store'
const logger = createLogger('Sidebar')
@@ -88,7 +89,11 @@ export function Sidebar() {
const [isWorkspaceMenuOpen, setIsWorkspaceMenuOpen] = useState(false)
const [isHelpModalOpen, setIsHelpModalOpen] = useState(false)
const [isSettingsModalOpen, setIsSettingsModalOpen] = useState(false)
const {
isOpen: isSettingsModalOpen,
openModal: openSettingsModal,
closeModal: closeSettingsModal,
} = useSettingsModalStore()
/** Listens for external events to open help modal */
useEffect(() => {
@@ -219,7 +224,7 @@ export function Sidebar() {
id: 'settings',
label: 'Settings',
icon: Settings,
onClick: () => setIsSettingsModalOpen(true),
onClick: () => openSettingsModal(),
},
],
[workspaceId]
@@ -654,7 +659,10 @@ export function Sidebar() {
{/* Footer Navigation Modals */}
<HelpModal open={isHelpModalOpen} onOpenChange={setIsHelpModalOpen} />
<SettingsModal open={isSettingsModalOpen} onOpenChange={setIsSettingsModalOpen} />
<SettingsModal
open={isSettingsModalOpen}
onOpenChange={(open) => (open ? openSettingsModal() : closeSettingsModal())}
/>
{/* Hidden file input for workspace import */}
<input

View File

@@ -8,6 +8,8 @@ import {
getHostedModels,
getMaxTemperature,
getProviderIcon,
getReasoningEffortValuesForModel,
getVerbosityValuesForModel,
MODELS_WITH_REASONING_EFFORT,
MODELS_WITH_VERBOSITY,
providers,
@@ -114,12 +116,47 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
type: 'dropdown',
placeholder: 'Select reasoning effort...',
options: [
{ label: 'none', id: 'none' },
{ label: 'minimal', id: 'minimal' },
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
],
dependsOn: ['model'],
fetchOptions: async (blockId: string) => {
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
const { useWorkflowRegistry } = await import('@/stores/workflows/registry/store')
const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId
if (!activeWorkflowId) {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
}
const workflowValues = useSubBlockStore.getState().workflowValues[activeWorkflowId]
const blockValues = workflowValues?.[blockId]
const modelValue = blockValues?.model as string
if (!modelValue) {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
}
const validOptions = getReasoningEffortValuesForModel(modelValue)
if (!validOptions) {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
}
return validOptions.map((opt) => ({ label: opt, id: opt }))
},
value: () => 'medium',
condition: {
field: 'model',
@@ -136,6 +173,43 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
],
dependsOn: ['model'],
fetchOptions: async (blockId: string) => {
const { useSubBlockStore } = await import('@/stores/workflows/subblock/store')
const { useWorkflowRegistry } = await import('@/stores/workflows/registry/store')
const activeWorkflowId = useWorkflowRegistry.getState().activeWorkflowId
if (!activeWorkflowId) {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
}
const workflowValues = useSubBlockStore.getState().workflowValues[activeWorkflowId]
const blockValues = workflowValues?.[blockId]
const modelValue = blockValues?.model as string
if (!modelValue) {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
}
const validOptions = getVerbosityValuesForModel(modelValue)
if (!validOptions) {
return [
{ label: 'low', id: 'low' },
{ label: 'medium', id: 'medium' },
{ label: 'high', id: 'high' },
]
}
return validOptions.map((opt) => ({ label: opt, id: opt }))
},
value: () => 'medium',
condition: {
field: 'model',
@@ -166,6 +240,28 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
value: providers['azure-openai'].models,
},
},
{
id: 'vertexProject',
title: 'Vertex AI Project',
type: 'short-input',
placeholder: 'your-gcp-project-id',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'vertexLocation',
title: 'Vertex AI Location',
type: 'short-input',
placeholder: 'us-central1',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'tools',
title: 'Tools',
@@ -465,6 +561,8 @@ Example 3 (Array Input):
apiKey: { type: 'string', description: 'Provider API key' },
azureEndpoint: { type: 'string', description: 'Azure OpenAI endpoint URL' },
azureApiVersion: { type: 'string', description: 'Azure API version' },
vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' },
vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' },
responseFormat: {
type: 'json',
description: 'JSON response format schema',

View File

@@ -4,14 +4,13 @@ import type { BlockConfig } from '@/blocks/types'
interface ConditionBlockOutput {
success: boolean
output: {
content: string
conditionResult: boolean
selectedPath: {
blockId: string
blockType: string
blockTitle: string
}
selectedConditionId: string
selectedOption: string
}
}
@@ -40,9 +39,8 @@ export const ConditionBlock: BlockConfig<ConditionBlockOutput> = {
},
inputs: {},
outputs: {
content: { type: 'string', description: 'Condition evaluation content' },
conditionResult: { type: 'boolean', description: 'Condition result' },
selectedPath: { type: 'json', description: 'Selected execution path' },
selectedConditionId: { type: 'string', description: 'Selected condition identifier' },
selectedOption: { type: 'string', description: 'Selected condition option ID' },
},
}

View File

@@ -239,6 +239,28 @@ export const EvaluatorBlock: BlockConfig<EvaluatorResponse> = {
value: providers['azure-openai'].models,
},
},
{
id: 'vertexProject',
title: 'Vertex AI Project',
type: 'short-input',
placeholder: 'your-gcp-project-id',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'vertexLocation',
title: 'Vertex AI Location',
type: 'short-input',
placeholder: 'us-central1',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'temperature',
title: 'Temperature',
@@ -356,6 +378,14 @@ export const EvaluatorBlock: BlockConfig<EvaluatorResponse> = {
apiKey: { type: 'string' as ParamType, description: 'Provider API key' },
azureEndpoint: { type: 'string' as ParamType, description: 'Azure OpenAI endpoint URL' },
azureApiVersion: { type: 'string' as ParamType, description: 'Azure API version' },
vertexProject: {
type: 'string' as ParamType,
description: 'Google Cloud project ID for Vertex AI',
},
vertexLocation: {
type: 'string' as ParamType,
description: 'Google Cloud location for Vertex AI',
},
temperature: {
type: 'number' as ParamType,
description: 'Response randomness level (low for consistent evaluation)',

View File

@@ -188,6 +188,28 @@ export const RouterBlock: BlockConfig<RouterResponse> = {
value: providers['azure-openai'].models,
},
},
{
id: 'vertexProject',
title: 'Vertex AI Project',
type: 'short-input',
placeholder: 'your-gcp-project-id',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'vertexLocation',
title: 'Vertex AI Location',
type: 'short-input',
placeholder: 'us-central1',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'temperature',
title: 'Temperature',
@@ -235,6 +257,8 @@ export const RouterBlock: BlockConfig<RouterResponse> = {
apiKey: { type: 'string', description: 'Provider API key' },
azureEndpoint: { type: 'string', description: 'Azure OpenAI endpoint URL' },
azureApiVersion: { type: 'string', description: 'Azure API version' },
vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' },
vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' },
temperature: {
type: 'number',
description: 'Response randomness level (low for consistent routing)',

View File

@@ -0,0 +1,230 @@
import { ServiceNowIcon } from '@/components/icons'
import type { BlockConfig } from '@/blocks/types'
import type { ServiceNowResponse } from '@/tools/servicenow/types'
export const ServiceNowBlock: BlockConfig<ServiceNowResponse> = {
type: 'servicenow',
name: 'ServiceNow',
description: 'Create, read, update, and delete ServiceNow records',
longDescription:
'Integrate ServiceNow into your workflow. Create, read, update, and delete records in any ServiceNow table including incidents, tasks, change requests, users, and more.',
docsLink: 'https://docs.sim.ai/tools/servicenow',
category: 'tools',
bgColor: '#032D42',
icon: ServiceNowIcon,
subBlocks: [
// Operation selector
{
id: 'operation',
title: 'Operation',
type: 'dropdown',
options: [
{ label: 'Create Record', id: 'servicenow_create_record' },
{ label: 'Read Records', id: 'servicenow_read_record' },
{ label: 'Update Record', id: 'servicenow_update_record' },
{ label: 'Delete Record', id: 'servicenow_delete_record' },
],
value: () => 'servicenow_read_record',
},
// Instance URL
{
id: 'instanceUrl',
title: 'Instance URL',
type: 'short-input',
placeholder: 'https://instance.service-now.com',
required: true,
description: 'Your ServiceNow instance URL (e.g., https://yourcompany.service-now.com)',
},
// Username
{
id: 'username',
title: 'Username',
type: 'short-input',
placeholder: 'Enter your ServiceNow username',
required: true,
description: 'ServiceNow user with web service access',
},
// Password
{
id: 'password',
title: 'Password',
type: 'short-input',
placeholder: 'Enter your ServiceNow password',
password: true,
required: true,
description: 'Password for the ServiceNow user',
},
// Table Name
{
id: 'tableName',
title: 'Table Name',
type: 'short-input',
placeholder: 'incident, task, sys_user, etc.',
required: true,
description: 'ServiceNow table name',
},
// Create-specific: Fields
{
id: 'fields',
title: 'Fields (JSON)',
type: 'code',
language: 'json',
placeholder: '{\n "short_description": "Issue description",\n "priority": "1"\n}',
condition: { field: 'operation', value: 'servicenow_create_record' },
required: true,
wandConfig: {
enabled: true,
maintainHistory: true,
prompt: `You are an expert ServiceNow developer. Generate ServiceNow record field objects as JSON based on the user's request.
### CONTEXT
ServiceNow records use specific field names depending on the table. Common tables and their key fields include:
- incident: short_description, description, priority (1-5), urgency (1-3), impact (1-3), caller_id, assignment_group, assigned_to, category, subcategory, state
- task: short_description, description, priority, assignment_group, assigned_to, state
- sys_user: user_name, first_name, last_name, email, active, department, title
- change_request: short_description, description, type, risk, impact, priority, assignment_group
### RULES
- Output ONLY valid JSON object starting with { and ending with }
- Use correct ServiceNow field names for the target table
- Values should be strings unless the field specifically requires another type
- For reference fields (like caller_id, assigned_to), use sys_id values or display values
- Do not include sys_id in create operations (it's auto-generated)
### EXAMPLE
User: "Create a high priority incident for network outage"
Output: {"short_description": "Network outage", "description": "Network connectivity issue affecting users", "priority": "1", "urgency": "1", "impact": "1", "category": "Network"}`,
generationType: 'json-object',
},
},
// Read-specific: Query options
{
id: 'sysId',
title: 'Record sys_id',
type: 'short-input',
placeholder: 'Specific record sys_id (optional)',
condition: { field: 'operation', value: 'servicenow_read_record' },
},
{
id: 'number',
title: 'Record Number',
type: 'short-input',
placeholder: 'e.g., INC0010001 (optional)',
condition: { field: 'operation', value: 'servicenow_read_record' },
},
{
id: 'query',
title: 'Query String',
type: 'short-input',
placeholder: 'active=true^priority=1',
condition: { field: 'operation', value: 'servicenow_read_record' },
description: 'ServiceNow encoded query string',
},
{
id: 'limit',
title: 'Limit',
type: 'short-input',
placeholder: '10',
condition: { field: 'operation', value: 'servicenow_read_record' },
},
{
id: 'fields',
title: 'Fields to Return',
type: 'short-input',
placeholder: 'number,short_description,priority',
condition: { field: 'operation', value: 'servicenow_read_record' },
description: 'Comma-separated list of fields',
},
// Update-specific: sysId and fields
{
id: 'sysId',
title: 'Record sys_id',
type: 'short-input',
placeholder: 'Record sys_id to update',
condition: { field: 'operation', value: 'servicenow_update_record' },
required: true,
},
{
id: 'fields',
title: 'Fields to Update (JSON)',
type: 'code',
language: 'json',
placeholder: '{\n "state": "2",\n "assigned_to": "user.sys_id"\n}',
condition: { field: 'operation', value: 'servicenow_update_record' },
required: true,
wandConfig: {
enabled: true,
maintainHistory: true,
prompt: `You are an expert ServiceNow developer. Generate ServiceNow record update field objects as JSON based on the user's request.
### CONTEXT
ServiceNow records use specific field names depending on the table. Common update scenarios include:
- incident: state (1=New, 2=In Progress, 3=On Hold, 6=Resolved, 7=Closed), assigned_to, work_notes, close_notes, close_code
- task: state, assigned_to, work_notes, percent_complete
- change_request: state, risk, approval, work_notes
### RULES
- Output ONLY valid JSON object starting with { and ending with }
- Include only the fields that need to be updated
- Use correct ServiceNow field names for the target table
- For state transitions, use the correct numeric state values
- work_notes and comments fields append to existing values
### EXAMPLE
User: "Assign the incident to John and set to in progress"
Output: {"state": "2", "assigned_to": "john.doe", "work_notes": "Assigned and starting investigation"}`,
generationType: 'json-object',
},
},
// Delete-specific: sysId
{
id: 'sysId',
title: 'Record sys_id',
type: 'short-input',
placeholder: 'Record sys_id to delete',
condition: { field: 'operation', value: 'servicenow_delete_record' },
required: true,
},
],
tools: {
access: [
'servicenow_create_record',
'servicenow_read_record',
'servicenow_update_record',
'servicenow_delete_record',
],
config: {
tool: (params) => params.operation,
params: (params) => {
const { operation, fields, ...rest } = params
const isCreateOrUpdate =
operation === 'servicenow_create_record' || operation === 'servicenow_update_record'
if (fields && isCreateOrUpdate) {
const parsedFields = typeof fields === 'string' ? JSON.parse(fields) : fields
return { ...rest, fields: parsedFields }
}
return rest
},
},
},
inputs: {
operation: { type: 'string', description: 'Operation to perform' },
instanceUrl: { type: 'string', description: 'ServiceNow instance URL' },
username: { type: 'string', description: 'ServiceNow username' },
password: { type: 'string', description: 'ServiceNow password' },
tableName: { type: 'string', description: 'Table name' },
sysId: { type: 'string', description: 'Record sys_id' },
number: { type: 'string', description: 'Record number' },
query: { type: 'string', description: 'Query string' },
limit: { type: 'number', description: 'Result limit' },
fields: { type: 'json', description: 'Fields object or JSON string' },
},
outputs: {
record: { type: 'json', description: 'Single ServiceNow record' },
records: { type: 'json', description: 'Array of ServiceNow records' },
success: { type: 'boolean', description: 'Operation success status' },
metadata: { type: 'json', description: 'Operation metadata' },
},
}

View File

@@ -99,6 +99,28 @@ export const TranslateBlock: BlockConfig = {
value: providers['azure-openai'].models,
},
},
{
id: 'vertexProject',
title: 'Vertex AI Project',
type: 'short-input',
placeholder: 'your-gcp-project-id',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'vertexLocation',
title: 'Vertex AI Location',
type: 'short-input',
placeholder: 'us-central1',
connectionDroppable: false,
condition: {
field: 'model',
value: providers.vertex.models,
},
},
{
id: 'systemPrompt',
title: 'System Prompt',
@@ -120,6 +142,8 @@ export const TranslateBlock: BlockConfig = {
apiKey: params.apiKey,
azureEndpoint: params.azureEndpoint,
azureApiVersion: params.azureApiVersion,
vertexProject: params.vertexProject,
vertexLocation: params.vertexLocation,
}),
},
},
@@ -129,6 +153,8 @@ export const TranslateBlock: BlockConfig = {
apiKey: { type: 'string', description: 'Provider API key' },
azureEndpoint: { type: 'string', description: 'Azure OpenAI endpoint URL' },
azureApiVersion: { type: 'string', description: 'Azure API version' },
vertexProject: { type: 'string', description: 'Google Cloud project ID for Vertex AI' },
vertexLocation: { type: 'string', description: 'Google Cloud location for Vertex AI' },
systemPrompt: { type: 'string', description: 'Translation instructions' },
},
outputs: {

View File

@@ -96,6 +96,7 @@ import { SearchBlock } from '@/blocks/blocks/search'
import { SendGridBlock } from '@/blocks/blocks/sendgrid'
import { SentryBlock } from '@/blocks/blocks/sentry'
import { SerperBlock } from '@/blocks/blocks/serper'
import { ServiceNowBlock } from '@/blocks/blocks/servicenow'
import { SftpBlock } from '@/blocks/blocks/sftp'
import { SharepointBlock } from '@/blocks/blocks/sharepoint'
import { ShopifyBlock } from '@/blocks/blocks/shopify'
@@ -238,6 +239,7 @@ export const registry: Record<string, BlockConfig> = {
search: SearchBlock,
sendgrid: SendGridBlock,
sentry: SentryBlock,
servicenow: ServiceNowBlock,
serper: SerperBlock,
sharepoint: SharepointBlock,
shopify: ShopifyBlock,

View File

@@ -291,7 +291,7 @@ function CodeRow({ index, style, ...props }: RowComponentProps<CodeRowProps>) {
const line = lines[index]
return (
<div style={style} className='flex' data-row-index={index}>
<div style={style} className={cn('flex', wrapText && 'overflow-hidden')} data-row-index={index}>
{showGutter && (
<div
className='flex-shrink-0 select-none pr-0.5 text-right text-[var(--text-muted)] text-xs tabular-nums leading-[21px] dark:text-[#a8a8a8]'
@@ -303,7 +303,7 @@ function CodeRow({ index, style, ...props }: RowComponentProps<CodeRowProps>) {
<pre
className={cn(
'm-0 flex-1 pr-2 pl-2 font-mono text-[13px] text-[var(--text-primary)] leading-[21px] dark:text-[#eeeeee]',
wrapText ? 'whitespace-pre-wrap break-words' : 'whitespace-pre'
wrapText ? 'min-w-0 whitespace-pre-wrap break-words' : 'whitespace-pre'
)}
dangerouslySetInnerHTML={{ __html: line.html || '&nbsp;' }}
/>
@@ -625,7 +625,7 @@ const VirtualizedViewerInner = memo(function VirtualizedViewerInner({
rowComponent={CodeRow}
rowProps={rowProps}
overscanCount={5}
className='overflow-x-auto'
className={wrapText ? 'overflow-x-hidden' : 'overflow-x-auto'}
/>
</div>
)

View File

@@ -2452,6 +2452,56 @@ export const GeminiIcon = (props: SVGProps<SVGSVGElement>) => (
</svg>
)
export const VertexIcon = (props: SVGProps<SVGSVGElement>) => (
<svg
{...props}
id='standard_product_icon'
xmlns='http://www.w3.org/2000/svg'
version='1.1'
viewBox='0 0 512 512'
>
<g id='bounding_box'>
<rect width='512' height='512' fill='none' />
</g>
<g id='art'>
<path
d='M128,244.99c-8.84,0-16-7.16-16-16v-95.97c0-8.84,7.16-16,16-16s16,7.16,16,16v95.97c0,8.84-7.16,16-16,16Z'
fill='#ea4335'
/>
<path
d='M256,458c-2.98,0-5.97-.83-8.59-2.5l-186-122c-7.46-4.74-9.65-14.63-4.91-22.09,4.75-7.46,14.64-9.65,22.09-4.91l177.41,116.53,177.41-116.53c7.45-4.74,17.34-2.55,22.09,4.91,4.74,7.46,2.55,17.34-4.91,22.09l-186,122c-2.62,1.67-5.61,2.5-8.59,2.5Z'
fill='#fbbc04'
/>
<path
d='M256,388.03c-8.84,0-16-7.16-16-16v-73.06c0-8.84,7.16-16,16-16s16,7.16,16,16v73.06c0,8.84-7.16,16-16,16Z'
fill='#34a853'
/>
<circle cx='128' cy='70' r='16' fill='#ea4335' />
<circle cx='128' cy='292' r='16' fill='#ea4335' />
<path
d='M384.23,308.01c-8.82,0-15.98-7.14-16-15.97l-.23-94.01c-.02-8.84,7.13-16.02,15.97-16.03h.04c8.82,0,15.98,7.14,16,15.97l.23,94.01c.02,8.84-7.13,16.02-15.97,16.03h-.04Z'
fill='#4285f4'
/>
<circle cx='384' cy='70' r='16' fill='#4285f4' />
<circle cx='384' cy='134' r='16' fill='#4285f4' />
<path
d='M320,220.36c-8.84,0-16-7.16-16-16v-103.02c0-8.84,7.16-16,16-16s16,7.16,16,16v103.02c0,8.84-7.16,16-16,16Z'
fill='#fbbc04'
/>
<circle cx='256' cy='171' r='16' fill='#34a853' />
<circle cx='256' cy='235' r='16' fill='#34a853' />
<circle cx='320' cy='265' r='16' fill='#fbbc04' />
<circle cx='320' cy='329' r='16' fill='#fbbc04' />
<path
d='M192,217.36c-8.84,0-16-7.16-16-16v-100.02c0-8.84,7.16-16,16-16s16,7.16,16,16v100.02c0,8.84-7.16,16-16,16Z'
fill='#fbbc04'
/>
<circle cx='192' cy='265' r='16' fill='#fbbc04' />
<circle cx='192' cy='329' r='16' fill='#fbbc04' />
</g>
</svg>
)
export const CerebrasIcon = (props: SVGProps<SVGSVGElement>) => (
<svg
{...props}
@@ -3335,6 +3385,21 @@ export function SalesforceIcon(props: SVGProps<SVGSVGElement>) {
)
}
export function ServiceNowIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg {...props} xmlns='http://www.w3.org/2000/svg' viewBox='0 0 71.1 63.6'>
<path
fillRule='evenodd'
clipRule='evenodd'
fill='#62D84E'
d='M35.8,0C16.1,0,0,15.9,0,35.6c0,9.8,4,19.3,11.2,26c2.5,2.4,6.4,2.6,9.2,0.5c9-6.7,21.4-6.7,30.4,0
c2.8,2.1,6.7,1.9,9.2-0.5C74.3,48,74.9,25.4,61.3,11.1C54.7,4.1,45.4,0.1,35.8,0 M35.6,53.5C26,53.8,18,46.2,17.8,36.7
c0-0.3,0-0.6,0-0.9c0-9.8,8-17.8,17.8-17.8s17.8,8,17.8,17.8c0.3,9.6-7.3,17.5-16.8,17.8C36.2,53.5,35.9,53.5,35.6,53.5'
/>
</svg>
)
}
export function ApolloIcon(props: SVGProps<SVGSVGElement>) {
return (
<svg

View File

@@ -3,5 +3,5 @@
"name": "Emir Karabeg",
"url": "https://x.com/karabegemir",
"xHandle": "karabegemir",
"avatarUrl": "/studio/authors/emir.png"
"avatarUrl": "/studio/authors/emir.jpg"
}

View File

@@ -3,5 +3,5 @@
"name": "Siddharth",
"url": "https://x.com/sidganesan",
"xHandle": "sidganesan",
"avatarUrl": "/studio/authors/sid.png"
"avatarUrl": "/studio/authors/sid.jpg"
}

View File

@@ -3,5 +3,5 @@
"name": "Waleed Latif",
"url": "https://x.com/typingwala",
"xHandle": "typingwala",
"avatarUrl": "/studio/authors/waleed.png"
"avatarUrl": "/studio/authors/waleed.jpg"
}

View File

@@ -18,7 +18,7 @@ featured: true
draft: false
---
![Sim team photo](/studio/series-a/team.png)
![Sim team photo](/studio/series-a/team.jpg)
## Why were excited

View File

@@ -1,3 +1,6 @@
import { db } from '@sim/db'
import { mcpServers } from '@sim/db/schema'
import { and, eq, inArray, isNull } from 'drizzle-orm'
import { getBaseUrl } from '@/lib/core/utils/urls'
import { createLogger } from '@/lib/logs/console/logger'
import {
@@ -72,6 +75,11 @@ export class BlockExecutor {
try {
resolvedInputs = this.resolver.resolveInputs(ctx, node.id, block.config.params, block)
if (block.metadata?.id === BlockType.AGENT && resolvedInputs.tools) {
resolvedInputs = await this.filterUnavailableMcpToolsForLog(ctx, resolvedInputs)
}
if (blockLog) {
blockLog.input = resolvedInputs
}
@@ -395,6 +403,60 @@ export class BlockExecutor {
return undefined
}
/**
* Filters out unavailable MCP tools from agent inputs for logging.
* Only includes tools from servers with 'connected' status.
*/
private async filterUnavailableMcpToolsForLog(
ctx: ExecutionContext,
inputs: Record<string, any>
): Promise<Record<string, any>> {
const tools = inputs.tools
if (!Array.isArray(tools) || tools.length === 0) return inputs
const mcpTools = tools.filter((t: any) => t.type === 'mcp')
if (mcpTools.length === 0) return inputs
const serverIds = [
...new Set(mcpTools.map((t: any) => t.params?.serverId).filter(Boolean)),
] as string[]
if (serverIds.length === 0) return inputs
const availableServerIds = new Set<string>()
if (ctx.workspaceId && serverIds.length > 0) {
try {
const servers = await db
.select({ id: mcpServers.id, connectionStatus: mcpServers.connectionStatus })
.from(mcpServers)
.where(
and(
eq(mcpServers.workspaceId, ctx.workspaceId),
inArray(mcpServers.id, serverIds),
isNull(mcpServers.deletedAt)
)
)
for (const server of servers) {
if (server.connectionStatus === 'connected') {
availableServerIds.add(server.id)
}
}
} catch (error) {
logger.warn('Failed to check MCP server availability for logging:', error)
return inputs
}
}
const filteredTools = tools.filter((tool: any) => {
if (tool.type !== 'mcp') return true
const serverId = tool.params?.serverId
if (!serverId) return false
return availableServerIds.has(serverId)
})
return { ...inputs, tools: filteredTools }
}
private preparePauseResumeSelfReference(
ctx: ExecutionContext,
node: DAGNode,

View File

@@ -1,3 +1,6 @@
import { db } from '@sim/db'
import { mcpServers } from '@sim/db/schema'
import { and, eq, inArray, isNull } from 'drizzle-orm'
import { createLogger } from '@/lib/logs/console/logger'
import { createMcpToolId } from '@/lib/mcp/utils'
import { getAllBlocks } from '@/blocks'
@@ -35,19 +38,23 @@ export class AgentBlockHandler implements BlockHandler {
block: SerializedBlock,
inputs: AgentInputs
): Promise<BlockOutput | StreamingExecution> {
const responseFormat = this.parseResponseFormat(inputs.responseFormat)
const model = inputs.model || AGENT.DEFAULT_MODEL
// Filter out unavailable MCP tools early so they don't appear in logs/inputs
const filteredTools = await this.filterUnavailableMcpTools(ctx, inputs.tools || [])
const filteredInputs = { ...inputs, tools: filteredTools }
const responseFormat = this.parseResponseFormat(filteredInputs.responseFormat)
const model = filteredInputs.model || AGENT.DEFAULT_MODEL
const providerId = getProviderFromModel(model)
const formattedTools = await this.formatTools(ctx, inputs.tools || [])
const formattedTools = await this.formatTools(ctx, filteredInputs.tools || [])
const streamingConfig = this.getStreamingConfig(ctx, block)
const messages = await this.buildMessages(ctx, inputs, block.id)
const messages = await this.buildMessages(ctx, filteredInputs, block.id)
const providerRequest = this.buildProviderRequest({
ctx,
providerId,
model,
messages,
inputs,
inputs: filteredInputs,
formattedTools,
responseFormat,
streaming: streamingConfig.shouldUseStreaming ?? false,
@@ -58,10 +65,10 @@ export class AgentBlockHandler implements BlockHandler {
providerRequest,
block,
responseFormat,
inputs
filteredInputs
)
await this.persistResponseToMemory(ctx, inputs, result, block.id)
await this.persistResponseToMemory(ctx, filteredInputs, result, block.id)
return result
}
@@ -115,6 +122,53 @@ export class AgentBlockHandler implements BlockHandler {
return undefined
}
private async filterUnavailableMcpTools(
ctx: ExecutionContext,
tools: ToolInput[]
): Promise<ToolInput[]> {
if (!Array.isArray(tools) || tools.length === 0) return tools
const mcpTools = tools.filter((t) => t.type === 'mcp')
if (mcpTools.length === 0) return tools
const serverIds = [...new Set(mcpTools.map((t) => t.params?.serverId).filter(Boolean))]
if (serverIds.length === 0) return tools
const availableServerIds = new Set<string>()
if (ctx.workspaceId && serverIds.length > 0) {
try {
const servers = await db
.select({ id: mcpServers.id, connectionStatus: mcpServers.connectionStatus })
.from(mcpServers)
.where(
and(
eq(mcpServers.workspaceId, ctx.workspaceId),
inArray(mcpServers.id, serverIds),
isNull(mcpServers.deletedAt)
)
)
for (const server of servers) {
if (server.connectionStatus === 'connected') {
availableServerIds.add(server.id)
}
}
} catch (error) {
logger.warn('Failed to check MCP server availability, including all tools:', error)
for (const serverId of serverIds) {
availableServerIds.add(serverId)
}
}
}
return tools.filter((tool) => {
if (tool.type !== 'mcp') return true
const serverId = tool.params?.serverId
if (!serverId) return false
return availableServerIds.has(serverId)
})
}
private async formatTools(ctx: ExecutionContext, inputTools: ToolInput[]): Promise<any[]> {
if (!Array.isArray(inputTools)) return []
@@ -304,6 +358,7 @@ export class AgentBlockHandler implements BlockHandler {
/**
* Process MCP tools using cached schemas from build time.
* Note: Unavailable tools are already filtered by filterUnavailableMcpTools.
*/
private async processMcpToolsBatched(
ctx: ExecutionContext,
@@ -312,7 +367,6 @@ export class AgentBlockHandler implements BlockHandler {
if (mcpTools.length === 0) return []
const results: any[] = []
const toolsWithSchema: ToolInput[] = []
const toolsNeedingDiscovery: ToolInput[] = []
@@ -439,7 +493,7 @@ export class AgentBlockHandler implements BlockHandler {
const discoveredTools = await this.discoverMcpToolsForServer(ctx, serverId)
return { serverId, tools, discoveredTools, error: null as Error | null }
} catch (error) {
logger.error(`Failed to discover tools from server ${serverId}:`, error)
logger.error(`Failed to discover tools from server ${serverId}:`)
return { serverId, tools, discoveredTools: [] as any[], error: error as Error }
}
})
@@ -829,6 +883,8 @@ export class AgentBlockHandler implements BlockHandler {
apiKey: inputs.apiKey,
azureEndpoint: inputs.azureEndpoint,
azureApiVersion: inputs.azureApiVersion,
vertexProject: inputs.vertexProject,
vertexLocation: inputs.vertexLocation,
responseFormat,
workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId,
@@ -921,6 +977,8 @@ export class AgentBlockHandler implements BlockHandler {
apiKey: finalApiKey,
azureEndpoint: providerRequest.azureEndpoint,
azureApiVersion: providerRequest.azureApiVersion,
vertexProject: providerRequest.vertexProject,
vertexLocation: providerRequest.vertexLocation,
responseFormat: providerRequest.responseFormat,
workflowId: providerRequest.workflowId,
workspaceId: providerRequest.workspaceId,

View File

@@ -19,6 +19,8 @@ export interface AgentInputs {
apiKey?: string
azureEndpoint?: string
azureApiVersion?: string
vertexProject?: string
vertexLocation?: string
reasoningEffort?: string
verbosity?: string
}

View File

@@ -1,11 +1,52 @@
import '@/executor/__test-utils__/mock-dependencies'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { BlockType } from '@/executor/constants'
import { ConditionBlockHandler } from '@/executor/handlers/condition/condition-handler'
import type { BlockState, ExecutionContext } from '@/executor/types'
import type { SerializedBlock, SerializedWorkflow } from '@/serializer/types'
vi.mock('@/lib/logs/console/logger', () => ({
createLogger: vi.fn(() => ({
info: vi.fn(),
error: vi.fn(),
warn: vi.fn(),
debug: vi.fn(),
})),
}))
vi.mock('@/lib/core/utils/request', () => ({
generateRequestId: vi.fn(() => 'test-request-id'),
}))
vi.mock('@/tools', () => ({
executeTool: vi.fn(),
}))
import { executeTool } from '@/tools'
const mockExecuteTool = executeTool as ReturnType<typeof vi.fn>
/**
* Simulates what the function_execute tool does when evaluating condition code
*/
function simulateConditionExecution(code: string): {
success: boolean
output?: { result: unknown }
error?: string
} {
try {
// The code is in format: "const context = {...};\nreturn Boolean(...)"
// We need to execute it and return the result
const fn = new Function(code)
const result = fn()
return { success: true, output: { result } }
} catch (error: any) {
return {
success: false,
error: error.message,
}
}
}
describe('ConditionBlockHandler', () => {
let handler: ConditionBlockHandler
let mockBlock: SerializedBlock
@@ -18,7 +59,6 @@ describe('ConditionBlockHandler', () => {
let mockPathTracker: any
beforeEach(() => {
// Define blocks first
mockSourceBlock = {
id: 'source-block-1',
metadata: { id: 'source', name: 'Source Block' },
@@ -33,7 +73,7 @@ describe('ConditionBlockHandler', () => {
metadata: { id: BlockType.CONDITION, name: 'Test Condition' },
position: { x: 50, y: 50 },
config: { tool: BlockType.CONDITION, params: {} },
inputs: { conditions: 'json' }, // Corrected based on previous step
inputs: { conditions: 'json' },
outputs: {},
enabled: true,
}
@@ -56,7 +96,6 @@ describe('ConditionBlockHandler', () => {
enabled: true,
}
// Then define workflow using the block objects
mockWorkflow = {
blocks: [mockSourceBlock, mockBlock, mockTargetBlock1, mockTargetBlock2],
connections: [
@@ -84,7 +123,6 @@ describe('ConditionBlockHandler', () => {
handler = new ConditionBlockHandler(mockPathTracker, mockResolver)
// Define mock context *after* workflow and blocks are set up
mockContext = {
workflowId: 'test-workflow-id',
blockStates: new Map<string, BlockState>([
@@ -99,7 +137,7 @@ describe('ConditionBlockHandler', () => {
]),
blockLogs: [],
metadata: { duration: 0 },
environmentVariables: {}, // Now set the context's env vars
environmentVariables: {},
decisions: { router: new Map(), condition: new Map() },
loopExecutions: new Map(),
executedBlocks: new Set([mockSourceBlock.id]),
@@ -108,11 +146,11 @@ describe('ConditionBlockHandler', () => {
completedLoops: new Set(),
}
// Reset mocks using vi
vi.clearAllMocks()
// Default mock implementations - Removed as it's in the shared mock now
// mockResolver.resolveBlockReferences.mockImplementation((value) => value)
mockExecuteTool.mockImplementation(async (_toolId: string, params: { code: string }) => {
return simulateConditionExecution(params.code)
})
})
it('should handle condition blocks', () => {
@@ -137,11 +175,9 @@ describe('ConditionBlockHandler', () => {
blockType: 'target',
blockTitle: 'Target Block 1',
},
selectedConditionId: 'cond1',
selectedOption: 'cond1',
}
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences.mockReturnValue('context.value > 5')
mockResolver.resolveBlockReferences.mockReturnValue('context.value > 5')
mockResolver.resolveEnvVariables.mockReturnValue('context.value > 5')
@@ -178,11 +214,9 @@ describe('ConditionBlockHandler', () => {
blockType: 'target',
blockTitle: 'Target Block 2',
},
selectedConditionId: 'else1',
selectedOption: 'else1',
}
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences.mockReturnValue('context.value < 0')
mockResolver.resolveBlockReferences.mockReturnValue('context.value < 0')
mockResolver.resolveEnvVariables.mockReturnValue('context.value < 0')
@@ -207,7 +241,7 @@ describe('ConditionBlockHandler', () => {
const inputs = { conditions: '{ "invalid json ' }
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
/^Invalid conditions format: Unterminated string.*/
/^Invalid conditions format:/
)
})
@@ -218,7 +252,6 @@ describe('ConditionBlockHandler', () => {
]
const inputs = { conditions: JSON.stringify(conditions) }
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences.mockReturnValue('{{source-block-1.value}} > 5')
mockResolver.resolveBlockReferences.mockReturnValue('10 > 5')
mockResolver.resolveEnvVariables.mockReturnValue('10 > 5')
@@ -245,7 +278,6 @@ describe('ConditionBlockHandler', () => {
]
const inputs = { conditions: JSON.stringify(conditions) }
// Mock the full resolution pipeline for variable resolution
mockResolver.resolveVariableReferences.mockReturnValue('"john" !== null')
mockResolver.resolveBlockReferences.mockReturnValue('"john" !== null')
mockResolver.resolveEnvVariables.mockReturnValue('"john" !== null')
@@ -272,7 +304,6 @@ describe('ConditionBlockHandler', () => {
]
const inputs = { conditions: JSON.stringify(conditions) }
// Mock the full resolution pipeline for env variable resolution
mockResolver.resolveVariableReferences.mockReturnValue('{{POOP}} === "hi"')
mockResolver.resolveBlockReferences.mockReturnValue('{{POOP}} === "hi"')
mockResolver.resolveEnvVariables.mockReturnValue('"hi" === "hi"')
@@ -300,7 +331,6 @@ describe('ConditionBlockHandler', () => {
const inputs = { conditions: JSON.stringify(conditions) }
const resolutionError = new Error('Could not resolve reference: invalid-ref')
// Mock the pipeline to throw at the variable resolution stage
mockResolver.resolveVariableReferences.mockImplementation(() => {
throw resolutionError
})
@@ -317,7 +347,6 @@ describe('ConditionBlockHandler', () => {
]
const inputs = { conditions: JSON.stringify(conditions) }
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences.mockReturnValue(
'context.nonExistentProperty.doSomething()'
)
@@ -325,7 +354,7 @@ describe('ConditionBlockHandler', () => {
mockResolver.resolveEnvVariables.mockReturnValue('context.nonExistentProperty.doSomething()')
await expect(handler.execute(mockContext, mockBlock, inputs)).rejects.toThrow(
/^Evaluation error in condition "if": Evaluation error in condition: Cannot read properties of undefined \(reading 'doSomething'\)\. \(Resolved: context\.nonExistentProperty\.doSomething\(\)\)$/
/Evaluation error in condition "if".*doSomething/
)
})
@@ -333,7 +362,6 @@ describe('ConditionBlockHandler', () => {
const conditions = [{ id: 'cond1', title: 'if', value: 'true' }]
const inputs = { conditions: JSON.stringify(conditions) }
// Create a new context with empty blockStates instead of trying to delete from readonly map
const contextWithoutSource = {
...mockContext,
blockStates: new Map<string, BlockState>(),
@@ -346,7 +374,7 @@ describe('ConditionBlockHandler', () => {
const result = await handler.execute(contextWithoutSource, mockBlock, inputs)
expect(result).toHaveProperty('conditionResult', true)
expect(result).toHaveProperty('selectedConditionId', 'cond1')
expect(result).toHaveProperty('selectedOption', 'cond1')
})
it('should throw error if target block is missing', async () => {
@@ -355,7 +383,6 @@ describe('ConditionBlockHandler', () => {
mockContext.workflow!.blocks = [mockSourceBlock, mockBlock, mockTargetBlock2]
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences.mockReturnValue('true')
mockResolver.resolveBlockReferences.mockReturnValue('true')
mockResolver.resolveEnvVariables.mockReturnValue('true')
@@ -381,7 +408,6 @@ describe('ConditionBlockHandler', () => {
},
]
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences
.mockReturnValueOnce('false')
.mockReturnValueOnce('context.value === 99')
@@ -394,12 +420,9 @@ describe('ConditionBlockHandler', () => {
const result = await handler.execute(mockContext, mockBlock, inputs)
// Should return success with no path selected (branch ends gracefully)
expect((result as any).conditionResult).toBe(false)
expect((result as any).selectedPath).toBeNull()
expect((result as any).selectedConditionId).toBeNull()
expect((result as any).selectedOption).toBeNull()
// Decision should not be set when no condition matches
expect(mockContext.decisions.condition.has(mockBlock.id)).toBe(false)
})
@@ -410,7 +433,6 @@ describe('ConditionBlockHandler', () => {
]
const inputs = { conditions: JSON.stringify(conditions) }
// Mock the full resolution pipeline
mockResolver.resolveVariableReferences.mockReturnValue('context.item === "apple"')
mockResolver.resolveBlockReferences.mockReturnValue('context.item === "apple"')
mockResolver.resolveEnvVariables.mockReturnValue('context.item === "apple"')
@@ -418,6 +440,6 @@ describe('ConditionBlockHandler', () => {
const result = await handler.execute(mockContext, mockBlock, inputs)
expect(mockContext.decisions.condition.get(mockBlock.id)).toBe('else1')
expect((result as any).selectedConditionId).toBe('else1')
expect((result as any).selectedOption).toBe('else1')
})
})

View File

@@ -3,9 +3,12 @@ import type { BlockOutput } from '@/blocks/types'
import { BlockType, CONDITION, DEFAULTS, EDGE } from '@/executor/constants'
import type { BlockHandler, ExecutionContext } from '@/executor/types'
import type { SerializedBlock } from '@/serializer/types'
import { executeTool } from '@/tools'
const logger = createLogger('ConditionBlockHandler')
const CONDITION_TIMEOUT_MS = 5000
/**
* Evaluates a single condition expression with variable/block reference resolution
* Returns true if condition is met, false otherwise
@@ -35,11 +38,38 @@ export async function evaluateConditionExpression(
}
try {
const conditionMet = new Function(
'context',
`with(context) { return ${resolvedConditionValue} }`
)(evalContext)
return Boolean(conditionMet)
const contextSetup = `const context = ${JSON.stringify(evalContext)};`
const code = `${contextSetup}\nreturn Boolean(${resolvedConditionValue})`
const result = await executeTool(
'function_execute',
{
code,
timeout: CONDITION_TIMEOUT_MS,
envVars: {},
_context: {
workflowId: ctx.workflowId,
workspaceId: ctx.workspaceId,
},
},
false,
false,
ctx
)
if (!result.success) {
logger.error(`Failed to evaluate condition: ${result.error}`, {
originalCondition: conditionExpression,
resolvedCondition: resolvedConditionValue,
evalContext,
error: result.error,
})
throw new Error(
`Evaluation error in condition: ${result.error}. (Resolved: ${resolvedConditionValue})`
)
}
return Boolean(result.output?.result)
} catch (evalError: any) {
logger.error(`Failed to evaluate condition: ${evalError.message}`, {
originalCondition: conditionExpression,
@@ -87,13 +117,11 @@ export class ConditionBlockHandler implements BlockHandler {
block
)
// Handle case where no condition matched and no else exists - branch ends gracefully
if (!selectedConnection || !selectedCondition) {
return {
...((sourceOutput as any) || {}),
conditionResult: false,
selectedPath: null,
selectedConditionId: null,
selectedOption: null,
}
}
@@ -115,7 +143,6 @@ export class ConditionBlockHandler implements BlockHandler {
blockTitle: targetBlock.metadata?.name || DEFAULTS.BLOCK_TITLE,
},
selectedOption: selectedCondition.id,
selectedConditionId: selectedCondition.id,
}
}
@@ -206,14 +233,12 @@ export class ConditionBlockHandler implements BlockHandler {
if (elseConnection) {
return { selectedConnection: elseConnection, selectedCondition: elseCondition }
}
// Else exists but has no connection - treat as no match, branch ends
logger.info(`No condition matched and else has no connection - branch ending`, {
blockId: block.id,
})
return { selectedConnection: null, selectedCondition: null }
}
// No condition matched and no else exists - branch ends gracefully
logger.info(`No condition matched and no else block - branch ending`, { blockId: block.id })
return { selectedConnection: null, selectedCondition: null }
}

View File

@@ -1,3 +1,5 @@
import { generateRequestId } from '@/lib/core/utils/request'
import { executeInIsolatedVM } from '@/lib/execution/isolated-vm'
import { createLogger } from '@/lib/logs/console/logger'
import { buildLoopIndexCondition, DEFAULTS, EDGE } from '@/executor/constants'
import type { DAG } from '@/executor/dag/builder'
@@ -17,6 +19,8 @@ import type { SerializedLoop } from '@/serializer/types'
const logger = createLogger('LoopOrchestrator')
const LOOP_CONDITION_TIMEOUT_MS = 5000
export type LoopRoute = typeof EDGE.LOOP_CONTINUE | typeof EDGE.LOOP_EXIT
export interface LoopContinuationResult {
@@ -112,7 +116,10 @@ export class LoopOrchestrator {
scope.currentIterationOutputs.set(baseId, output)
}
evaluateLoopContinuation(ctx: ExecutionContext, loopId: string): LoopContinuationResult {
async evaluateLoopContinuation(
ctx: ExecutionContext,
loopId: string
): Promise<LoopContinuationResult> {
const scope = ctx.loopExecutions?.get(loopId)
if (!scope) {
logger.error('Loop scope not found during continuation evaluation', { loopId })
@@ -123,7 +130,6 @@ export class LoopOrchestrator {
}
}
// Check for cancellation
if (ctx.isCancelled) {
logger.info('Loop execution cancelled', { loopId, iteration: scope.iteration })
return this.createExitResult(ctx, loopId, scope)
@@ -140,7 +146,7 @@ export class LoopOrchestrator {
scope.currentIterationOutputs.clear()
if (!this.evaluateCondition(ctx, scope, scope.iteration + 1)) {
if (!(await this.evaluateCondition(ctx, scope, scope.iteration + 1))) {
return this.createExitResult(ctx, loopId, scope)
}
@@ -173,7 +179,11 @@ export class LoopOrchestrator {
}
}
private evaluateCondition(ctx: ExecutionContext, scope: LoopScope, iteration?: number): boolean {
private async evaluateCondition(
ctx: ExecutionContext,
scope: LoopScope,
iteration?: number
): Promise<boolean> {
if (!scope.condition) {
logger.warn('No condition defined for loop')
return false
@@ -184,7 +194,7 @@ export class LoopOrchestrator {
scope.iteration = iteration
}
const result = this.evaluateWhileCondition(ctx, scope.condition, scope)
const result = await this.evaluateWhileCondition(ctx, scope.condition, scope)
if (iteration !== undefined) {
scope.iteration = currentIteration
@@ -223,7 +233,6 @@ export class LoopOrchestrator {
const loopNodes = loopConfig.nodes
const allLoopNodeIds = new Set([sentinelStartId, sentinelEndId, ...loopNodes])
// Clear deactivated edges for loop nodes so error/success edges can be re-evaluated
if (this.edgeManager) {
this.edgeManager.clearDeactivatedEdgesForNodes(allLoopNodeIds)
}
@@ -263,7 +272,7 @@ export class LoopOrchestrator {
*
* @returns true if the loop should execute, false if it should be skipped
*/
evaluateInitialCondition(ctx: ExecutionContext, loopId: string): boolean {
async evaluateInitialCondition(ctx: ExecutionContext, loopId: string): Promise<boolean> {
const scope = ctx.loopExecutions?.get(loopId)
if (!scope) {
logger.warn('Loop scope not found for initial condition evaluation', { loopId })
@@ -300,7 +309,7 @@ export class LoopOrchestrator {
return false
}
const result = this.evaluateWhileCondition(ctx, scope.condition, scope)
const result = await this.evaluateWhileCondition(ctx, scope.condition, scope)
logger.info('While loop initial condition evaluation', {
loopId,
condition: scope.condition,
@@ -327,11 +336,11 @@ export class LoopOrchestrator {
return undefined
}
private evaluateWhileCondition(
private async evaluateWhileCondition(
ctx: ExecutionContext,
condition: string,
scope: LoopScope
): boolean {
): Promise<boolean> {
if (!condition) {
return false
}
@@ -343,7 +352,6 @@ export class LoopOrchestrator {
workflowVariables: ctx.workflowVariables,
})
// Use generic utility for smart variable reference replacement
const evaluatedCondition = replaceValidReferences(condition, (match) => {
const resolved = this.resolver.resolveSingleReference(ctx, '', match, scope)
logger.info('Resolved variable reference in loop condition', {
@@ -352,11 +360,9 @@ export class LoopOrchestrator {
resolvedType: typeof resolved,
})
if (resolved !== undefined) {
// For booleans and numbers, return as-is (no quotes)
if (typeof resolved === 'boolean' || typeof resolved === 'number') {
return String(resolved)
}
// For strings that represent booleans, return without quotes
if (typeof resolved === 'string') {
const lower = resolved.toLowerCase().trim()
if (lower === 'true' || lower === 'false') {
@@ -364,13 +370,33 @@ export class LoopOrchestrator {
}
return `"${resolved}"`
}
// For other types, stringify them
return JSON.stringify(resolved)
}
return match
})
const result = Boolean(new Function(`return (${evaluatedCondition})`)())
const requestId = generateRequestId()
const code = `return Boolean(${evaluatedCondition})`
const vmResult = await executeInIsolatedVM({
code,
params: {},
envVars: {},
contextVariables: {},
timeoutMs: LOOP_CONDITION_TIMEOUT_MS,
requestId,
})
if (vmResult.error) {
logger.error('Failed to evaluate loop condition', {
condition,
evaluatedCondition,
error: vmResult.error,
})
return false
}
const result = Boolean(vmResult.result)
logger.info('Loop condition evaluation result', {
originalCondition: condition,

View File

@@ -68,7 +68,7 @@ export class NodeExecutionOrchestrator {
}
if (node.metadata.isSentinel) {
const output = this.handleSentinel(ctx, node)
const output = await this.handleSentinel(ctx, node)
const isFinalOutput = node.outgoingEdges.size === 0
return {
nodeId,
@@ -86,14 +86,17 @@ export class NodeExecutionOrchestrator {
}
}
private handleSentinel(ctx: ExecutionContext, node: DAGNode): NormalizedBlockOutput {
private async handleSentinel(
ctx: ExecutionContext,
node: DAGNode
): Promise<NormalizedBlockOutput> {
const sentinelType = node.metadata.sentinelType
const loopId = node.metadata.loopId
switch (sentinelType) {
case 'start': {
if (loopId) {
const shouldExecute = this.loopOrchestrator.evaluateInitialCondition(ctx, loopId)
const shouldExecute = await this.loopOrchestrator.evaluateInitialCondition(ctx, loopId)
if (!shouldExecute) {
logger.info('While loop initial condition false, skipping loop body', { loopId })
return {
@@ -112,7 +115,7 @@ export class NodeExecutionOrchestrator {
return { shouldExit: true, selectedRoute: EDGE.LOOP_EXIT }
}
const continuationResult = this.loopOrchestrator.evaluateLoopContinuation(ctx, loopId)
const continuationResult = await this.loopOrchestrator.evaluateLoopContinuation(ctx, loopId)
if (continuationResult.shouldContinue) {
return {

View File

@@ -83,7 +83,7 @@ export interface NormalizedBlockOutput {
blockType?: string
blockTitle?: string
}
selectedConditionId?: string
selectedOption?: string
conditionResult?: boolean
result?: any
stdout?: string

View File

@@ -1,20 +1,17 @@
import { keepPreviousData, useMutation, useQuery, useQueryClient } from '@tanstack/react-query'
import { createLogger } from '@/lib/logs/console/logger'
import type { McpServerStatusConfig } from '@/lib/mcp/types'
const logger = createLogger('McpQueries')
/**
* Query key factories for MCP-related queries
*/
export type { McpServerStatusConfig }
export const mcpKeys = {
all: ['mcp'] as const,
servers: (workspaceId: string) => [...mcpKeys.all, 'servers', workspaceId] as const,
tools: (workspaceId: string) => [...mcpKeys.all, 'tools', workspaceId] as const,
}
/**
* MCP Server Types
*/
export interface McpServer {
id: string
workspaceId: string
@@ -25,9 +22,11 @@ export interface McpServer {
headers?: Record<string, string>
enabled: boolean
connectionStatus?: 'connected' | 'disconnected' | 'error'
lastError?: string
lastError?: string | null
statusConfig?: McpServerStatusConfig
toolCount?: number
lastToolsRefresh?: string
lastConnected?: string
createdAt: string
updatedAt: string
deletedAt?: string
@@ -86,8 +85,13 @@ export function useMcpServers(workspaceId: string) {
/**
* Fetch MCP tools for a workspace
*/
async function fetchMcpTools(workspaceId: string): Promise<McpTool[]> {
const response = await fetch(`/api/mcp/tools/discover?workspaceId=${workspaceId}`)
async function fetchMcpTools(workspaceId: string, forceRefresh = false): Promise<McpTool[]> {
const params = new URLSearchParams({ workspaceId })
if (forceRefresh) {
params.set('refresh', 'true')
}
const response = await fetch(`/api/mcp/tools/discover?${params.toString()}`)
// Treat 404 as "no tools available" - return empty array
if (response.status === 404) {
@@ -159,14 +163,43 @@ export function useCreateMcpServer() {
return {
...serverData,
id: serverId,
connectionStatus: 'disconnected' as const,
connectionStatus: 'connected' as const,
serverId,
updated: wasUpdated,
}
},
onSuccess: (_data, variables) => {
onSuccess: async (data, variables) => {
const freshTools = await fetchMcpTools(variables.workspaceId, true)
const previousServers = queryClient.getQueryData<McpServer[]>(
mcpKeys.servers(variables.workspaceId)
)
if (previousServers) {
const newServer: McpServer = {
id: data.id,
workspaceId: variables.workspaceId,
name: variables.config.name,
transport: variables.config.transport,
url: variables.config.url,
timeout: variables.config.timeout || 30000,
headers: variables.config.headers,
enabled: variables.config.enabled,
connectionStatus: 'connected',
createdAt: new Date().toISOString(),
updatedAt: new Date().toISOString(),
}
const serverExists = previousServers.some((s) => s.id === data.id)
queryClient.setQueryData<McpServer[]>(
mcpKeys.servers(variables.workspaceId),
serverExists
? previousServers.map((s) => (s.id === data.id ? { ...s, ...newServer } : s))
: [...previousServers, newServer]
)
}
queryClient.setQueryData(mcpKeys.tools(variables.workspaceId), freshTools)
queryClient.invalidateQueries({ queryKey: mcpKeys.servers(variables.workspaceId) })
queryClient.invalidateQueries({ queryKey: mcpKeys.tools(variables.workspaceId) })
},
})
}
@@ -213,7 +246,7 @@ export function useDeleteMcpServer() {
interface UpdateMcpServerParams {
workspaceId: string
serverId: string
updates: Partial<McpServerConfig>
updates: Partial<McpServerConfig & { enabled?: boolean }>
}
export function useUpdateMcpServer() {
@@ -221,8 +254,20 @@ export function useUpdateMcpServer() {
return useMutation({
mutationFn: async ({ workspaceId, serverId, updates }: UpdateMcpServerParams) => {
const response = await fetch(`/api/mcp/servers/${serverId}?workspaceId=${workspaceId}`, {
method: 'PATCH',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(updates),
})
const data = await response.json()
if (!response.ok) {
throw new Error(data.error || 'Failed to update MCP server')
}
logger.info(`Updated MCP server: ${serverId} in workspace: ${workspaceId}`)
return { serverId, updates }
return data.data?.server
},
onMutate: async ({ workspaceId, serverId, updates }) => {
await queryClient.cancelQueries({ queryKey: mcpKeys.servers(workspaceId) })
@@ -249,6 +294,7 @@ export function useUpdateMcpServer() {
},
onSettled: (_data, _error, variables) => {
queryClient.invalidateQueries({ queryKey: mcpKeys.servers(variables.workspaceId) })
queryClient.invalidateQueries({ queryKey: mcpKeys.tools(variables.workspaceId) })
},
})
}
@@ -292,9 +338,10 @@ export function useRefreshMcpServer() {
logger.info(`Refreshed MCP server: ${serverId}`)
return data.data
},
onSuccess: (_data, variables) => {
onSuccess: async (_data, variables) => {
queryClient.invalidateQueries({ queryKey: mcpKeys.servers(variables.workspaceId) })
queryClient.invalidateQueries({ queryKey: mcpKeys.tools(variables.workspaceId) })
const freshTools = await fetchMcpTools(variables.workspaceId, true)
queryClient.setQueryData(mcpKeys.tools(variables.workspaceId), freshTools)
},
})
}
@@ -349,3 +396,42 @@ export function useTestMcpServer() {
},
})
}
/**
* Stored MCP tool from workflow state
*/
export interface StoredMcpTool {
workflowId: string
workflowName: string
serverId: string
serverUrl?: string
toolName: string
schema?: Record<string, unknown>
}
/**
* Fetch stored MCP tools from all workflows in the workspace
*/
async function fetchStoredMcpTools(workspaceId: string): Promise<StoredMcpTool[]> {
const response = await fetch(`/api/mcp/tools/stored?workspaceId=${workspaceId}`)
if (!response.ok) {
const data = await response.json().catch(() => ({}))
throw new Error(data.error || 'Failed to fetch stored MCP tools')
}
const data = await response.json()
return data.data?.tools || []
}
/**
* Hook to fetch stored MCP tools from all workflows
*/
export function useStoredMcpTools(workspaceId: string) {
return useQuery({
queryKey: [...mcpKeys.all, workspaceId, 'stored'],
queryFn: () => fetchStoredMcpTools(workspaceId),
enabled: !!workspaceId,
staleTime: 60 * 1000, // 1 minute - workflows don't change frequently
})
}

View File

@@ -28,7 +28,7 @@ export interface ServiceInfo extends OAuthServiceConfig {
function defineServices(): ServiceInfo[] {
const servicesList: ServiceInfo[] = []
Object.values(OAUTH_PROVIDERS).forEach((provider) => {
Object.entries(OAUTH_PROVIDERS).forEach(([_providerKey, provider]) => {
Object.values(provider.services).forEach((service) => {
servicesList.push({
...service,

View File

@@ -1540,7 +1540,7 @@ export function useCollaborativeWorkflow() {
const config = {
id: nodeId,
nodes: childNodes,
iterations: Math.max(1, Math.min(100, count)), // Clamp between 1-100 for loops
iterations: Math.max(1, Math.min(1000, count)), // Clamp between 1-1000 for loops
loopType: currentLoopType,
forEachItems: currentCollection,
}

View File

@@ -34,14 +34,19 @@ export function useMcpServerTest() {
const [isTestingConnection, setIsTestingConnection] = useState(false)
const testConnection = useCallback(
async (config: McpServerTestConfig): Promise<McpServerTestResult> => {
async (
config: McpServerTestConfig,
options?: { silent?: boolean }
): Promise<McpServerTestResult> => {
const { silent = false } = options || {}
if (!config.name || !config.transport || !config.workspaceId) {
const result: McpServerTestResult = {
success: false,
message: 'Missing required configuration',
error: 'Please provide server name, transport method, and workspace ID',
}
setTestResult(result)
if (!silent) setTestResult(result)
return result
}
@@ -51,12 +56,14 @@ export function useMcpServerTest() {
message: 'Missing server URL',
error: 'Please provide a server URL for HTTP/SSE transport',
}
setTestResult(result)
if (!silent) setTestResult(result)
return result
}
setIsTestingConnection(true)
setTestResult(null)
if (!silent) {
setIsTestingConnection(true)
setTestResult(null)
}
try {
const cleanConfig = {
@@ -88,14 +95,14 @@ export function useMcpServerTest() {
error: result.data.error,
warnings: result.data.warnings,
}
setTestResult(testResult)
if (!silent) setTestResult(testResult)
logger.error('MCP server test failed:', result.data.error)
return testResult
}
throw new Error(result.error || 'Connection test failed')
}
setTestResult(result.data || result)
if (!silent) setTestResult(result.data || result)
logger.info(`MCP server test ${result.data?.success ? 'passed' : 'failed'}:`, config.name)
return result.data || result
} catch (error) {
@@ -105,11 +112,11 @@ export function useMcpServerTest() {
message: 'Connection failed',
error: errorMessage,
}
setTestResult(result)
if (!silent) setTestResult(result)
logger.error('MCP server test failed:', errorMessage)
return result
} finally {
setIsTestingConnection(false)
if (!silent) setIsTestingConnection(false)
}
},
[]

View File

@@ -4,6 +4,7 @@ interface SlackAccount {
id: string
accountId: string
providerId: string
displayName?: string
}
interface UseSlackAccountsResult {

View File

@@ -14,6 +14,7 @@ interface UseWebhookManagementProps {
blockId: string
triggerId?: string
isPreview?: boolean
useWebhookUrl?: boolean
}
interface WebhookManagementState {
@@ -90,6 +91,7 @@ export function useWebhookManagement({
blockId,
triggerId,
isPreview = false,
useWebhookUrl = false,
}: UseWebhookManagementProps): WebhookManagementState {
const params = useParams()
const workflowId = params.workflowId as string
@@ -204,9 +206,10 @@ export function useWebhookManagement({
})
}
}
loadWebhookOrGenerateUrl()
}, [isPreview, triggerId, workflowId, blockId])
if (useWebhookUrl) {
loadWebhookOrGenerateUrl()
}
}, [isPreview, triggerId, workflowId, blockId, useWebhookUrl])
const createWebhook = async (
effectiveTriggerId: string | undefined,

View File

@@ -110,28 +110,20 @@ export const auth = betterAuth({
account: {
create: {
before: async (account) => {
// Only one credential per (userId, providerId) is allowed
// If user reconnects (even with a different external account), replace the existing one
const existing = await db.query.account.findFirst({
where: and(
eq(schema.account.userId, account.userId),
eq(schema.account.providerId, account.providerId),
eq(schema.account.accountId, account.accountId)
eq(schema.account.providerId, account.providerId)
),
})
if (existing) {
logger.warn(
'[databaseHooks.account.create.before] Duplicate account detected, updating existing',
{
existingId: existing.id,
userId: account.userId,
providerId: account.providerId,
accountId: account.accountId,
}
)
await db
.update(schema.account)
.set({
accountId: account.accountId,
accessToken: account.accessToken,
refreshToken: account.refreshToken,
idToken: account.idToken,
@@ -733,17 +725,17 @@ export const auth = betterAuth({
scopes: ['login', 'data'],
responseType: 'code',
redirectURI: `${getBaseUrl()}/api/auth/oauth2/callback/wealthbox`,
getUserInfo: async (tokens) => {
getUserInfo: async (_tokens) => {
try {
logger.info('Creating Wealthbox user profile from token data')
const uniqueId = `wealthbox-${Date.now()}`
const uniqueId = 'wealthbox-user'
const now = new Date()
return {
id: uniqueId,
name: 'Wealthbox User',
email: `${uniqueId.replace(/[^a-zA-Z0-9]/g, '')}@wealthbox.user`,
email: `${uniqueId}@wealthbox.user`,
emailVerified: false,
createdAt: now,
updatedAt: now,
@@ -1655,33 +1647,42 @@ export const auth = betterAuth({
redirectURI: `${getBaseUrl()}/api/auth/oauth2/callback/slack`,
getUserInfo: async (tokens) => {
try {
logger.info('Creating Slack bot profile from token data')
const response = await fetch('https://slack.com/api/auth.test', {
headers: {
Authorization: `Bearer ${tokens.accessToken}`,
},
})
// Extract user identifier from tokens if possible
let userId = 'slack-bot'
if (tokens.idToken) {
try {
const decodedToken = JSON.parse(
Buffer.from(tokens.idToken.split('.')[1], 'base64').toString()
)
if (decodedToken.sub) {
userId = decodedToken.sub
}
} catch (e) {
logger.warn('Failed to decode Slack ID token', { error: e })
}
if (!response.ok) {
logger.error('Slack auth.test failed', {
status: response.status,
statusText: response.statusText,
})
return null
}
const uniqueId = `${userId}-${Date.now()}`
const now = new Date()
const data = await response.json()
if (!data.ok) {
logger.error('Slack auth.test returned error', { error: data.error })
return null
}
const teamId = data.team_id || 'unknown'
const userId = data.user_id || data.bot_id || 'bot'
const teamName = data.team || 'Slack Workspace'
const uniqueId = `${teamId}-${userId}`
logger.info('Slack credential identifier', { teamId, userId, uniqueId, teamName })
return {
id: uniqueId,
name: 'Slack Bot',
email: `${uniqueId.replace(/[^a-zA-Z0-9]/g, '')}@slack.bot`,
name: teamName,
email: `${teamId}-${userId}@slack.bot`,
emailVerified: false,
createdAt: now,
updatedAt: now,
createdAt: new Date(),
updatedAt: new Date(),
}
} catch (error) {
logger.error('Error creating Slack bot profile:', { error })
@@ -1722,7 +1723,7 @@ export const auth = betterAuth({
const data = await response.json()
const now = new Date()
const userId = data.user_id || `webflow-${Date.now()}`
const userId = data.user_id || 'user'
const uniqueId = `webflow-${userId}`
return {

View File

@@ -1,5 +1,7 @@
'use client'
import { useState } from 'react'
import { Check, Copy } from 'lucide-react'
import { Code } from '@/components/emcn'
interface CodeBlockProps {
@@ -8,5 +10,36 @@ interface CodeBlockProps {
}
export function CodeBlock({ code, language }: CodeBlockProps) {
return <Code.Viewer code={code} showGutter={true} language={language} />
const [copied, setCopied] = useState(false)
const handleCopy = () => {
navigator.clipboard.writeText(code)
setCopied(true)
setTimeout(() => setCopied(false), 2000)
}
return (
<div className='dark w-full overflow-hidden rounded-md border border-[#2a2a2a] bg-[#1F1F1F] text-sm'>
<div className='flex items-center justify-between border-[#2a2a2a] border-b px-4 py-1.5'>
<span className='text-[#A3A3A3] text-xs'>{language}</span>
<button
onClick={handleCopy}
className='text-[#A3A3A3] transition-colors hover:text-gray-300'
title='Copy code'
>
{copied ? (
<Check className='h-3 w-3' strokeWidth={2} />
) : (
<Copy className='h-3 w-3' strokeWidth={2} />
)}
</button>
</div>
<Code.Viewer
code={code}
showGutter
language={language}
className='[&_pre]:!pb-0 m-0 rounded-none border-0 bg-transparent'
/>
</div>
)
}

Some files were not shown because too many files have changed in this diff Show More