mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-08 22:48:14 -05:00
feat(wand): subblock level wand configuration + migrate old wand usage to this (#829)
* feat(wand): subblock level wand configuration + migrate old wand usage to this * fix build issue * Update apps/sim/app/workspace/[workspaceId]/w/[workflowId]/components/wand-prompt-bar/wand-prompt-bar.tsx Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com> * remove optional param * remove unused test file * address greptile comments * change to enum for gen type * fix caching issue --------- Co-authored-by: greptile-apps[bot] <165735046+greptile-apps[bot]@users.noreply.github.com>
This commit is contained in:
committed by
GitHub
parent
12bb0b4589
commit
03607bbc8b
@@ -1,390 +0,0 @@
|
||||
/**
|
||||
* Tests for codegen API route
|
||||
*
|
||||
* @vitest-environment node
|
||||
*/
|
||||
import { afterEach, beforeEach, describe, expect, it, vi } from 'vitest'
|
||||
import { createMockRequest } from '@/app/api/__test-utils__/utils'
|
||||
|
||||
describe('Codegen API Route', () => {
|
||||
const mockOpenAI = {
|
||||
chat: {
|
||||
completions: {
|
||||
create: vi.fn(),
|
||||
},
|
||||
},
|
||||
}
|
||||
const mockLogger = {
|
||||
info: vi.fn(),
|
||||
warn: vi.fn(),
|
||||
error: vi.fn(),
|
||||
debug: vi.fn(),
|
||||
}
|
||||
const mockEnv = {
|
||||
OPENAI_API_KEY: 'test-api-key',
|
||||
}
|
||||
|
||||
const mockUUID = 'mock-uuid-12345678-90ab-cdef-1234-567890abcdef'
|
||||
|
||||
beforeEach(() => {
|
||||
vi.resetModules()
|
||||
mockEnv.OPENAI_API_KEY = 'test-api-key'
|
||||
|
||||
vi.stubGlobal('crypto', {
|
||||
randomUUID: vi.fn().mockReturnValue(mockUUID),
|
||||
})
|
||||
|
||||
const MockAPIError = class extends Error {
|
||||
status: number
|
||||
constructor(message: string, status?: number) {
|
||||
super(message)
|
||||
this.status = status || 500
|
||||
}
|
||||
}
|
||||
|
||||
vi.doMock('openai', () => ({
|
||||
default: vi.fn().mockImplementation(() => mockOpenAI),
|
||||
APIError: MockAPIError,
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/env', () => ({
|
||||
env: mockEnv,
|
||||
}))
|
||||
|
||||
vi.doMock('@/lib/logs/console/logger', () => ({
|
||||
createLogger: vi.fn().mockReturnValue(mockLogger),
|
||||
}))
|
||||
|
||||
vi.doMock('next/cache', () => ({
|
||||
unstable_noStore: vi.fn(),
|
||||
}))
|
||||
})
|
||||
|
||||
afterEach(() => {
|
||||
vi.clearAllMocks()
|
||||
})
|
||||
|
||||
it('should generate JSON schema successfully', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: JSON.stringify({
|
||||
name: 'test_function',
|
||||
description: 'A test function',
|
||||
strict: true,
|
||||
schema: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
input: { type: 'string', description: 'Test input' },
|
||||
},
|
||||
additionalProperties: false,
|
||||
required: ['input'],
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Create a function that takes a string input',
|
||||
generationType: 'json-schema',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
expect(data.generatedContent).toBeDefined()
|
||||
expect(() => JSON.parse(data.generatedContent)).not.toThrow()
|
||||
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
|
||||
model: 'gpt-4o',
|
||||
messages: expect.arrayContaining([
|
||||
expect.objectContaining({ role: 'system' }),
|
||||
expect.objectContaining({
|
||||
role: 'user',
|
||||
content: 'Create a function that takes a string input',
|
||||
}),
|
||||
]),
|
||||
temperature: 0.2,
|
||||
max_tokens: 1500,
|
||||
response_format: { type: 'json_object' },
|
||||
})
|
||||
})
|
||||
|
||||
it('should generate JavaScript function body successfully', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: 'const input = <input>;\nreturn input.toUpperCase();',
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Convert input to uppercase',
|
||||
generationType: 'javascript-function-body',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
expect(data.generatedContent).toBe('const input = <input>;\nreturn input.toUpperCase();')
|
||||
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
|
||||
model: 'gpt-4o',
|
||||
messages: expect.arrayContaining([
|
||||
expect.objectContaining({ role: 'system' }),
|
||||
expect.objectContaining({ role: 'user' }),
|
||||
]),
|
||||
temperature: 0.2,
|
||||
max_tokens: 1500,
|
||||
response_format: undefined,
|
||||
})
|
||||
})
|
||||
|
||||
it('should generate custom tool schema successfully', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: JSON.stringify({
|
||||
type: 'function',
|
||||
function: {
|
||||
name: 'testFunction',
|
||||
description: 'A test function',
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: {
|
||||
input: { type: 'string', description: 'Test input' },
|
||||
},
|
||||
required: ['input'],
|
||||
additionalProperties: false,
|
||||
},
|
||||
},
|
||||
}),
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Create a custom tool for testing',
|
||||
generationType: 'custom-tool-schema',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(data.success).toBe(true)
|
||||
expect(data.generatedContent).toBeDefined()
|
||||
})
|
||||
|
||||
it('should include context in the prompt', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: 'const result = <input>;\nreturn result;',
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Modify this function',
|
||||
generationType: 'javascript-function-body',
|
||||
context: 'existing function code here',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
|
||||
model: 'gpt-4o',
|
||||
messages: expect.arrayContaining([
|
||||
expect.objectContaining({ role: 'system' }),
|
||||
expect.objectContaining({
|
||||
role: 'user',
|
||||
content:
|
||||
'Prompt: Modify this function\\n\\nExisting Content/Context:\\nexisting function code here',
|
||||
}),
|
||||
]),
|
||||
temperature: 0.2,
|
||||
max_tokens: 1500,
|
||||
response_format: undefined,
|
||||
})
|
||||
})
|
||||
|
||||
it('should include conversation history', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: 'Updated function code',
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Update the function',
|
||||
generationType: 'javascript-function-body',
|
||||
history: [
|
||||
{ role: 'user', content: 'Create a function' },
|
||||
{ role: 'assistant', content: 'function created' },
|
||||
],
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
|
||||
expect(response.status).toBe(200)
|
||||
expect(mockOpenAI.chat.completions.create).toHaveBeenCalledWith({
|
||||
model: 'gpt-4o',
|
||||
messages: expect.arrayContaining([
|
||||
expect.objectContaining({ role: 'system' }),
|
||||
expect.objectContaining({ role: 'user', content: 'Create a function' }),
|
||||
expect.objectContaining({ role: 'assistant', content: 'function created' }),
|
||||
expect.objectContaining({ role: 'user', content: 'Update the function' }),
|
||||
]),
|
||||
temperature: 0.2,
|
||||
max_tokens: 1500,
|
||||
response_format: undefined,
|
||||
})
|
||||
})
|
||||
|
||||
it('should handle missing OpenAI API key', async () => {
|
||||
mockEnv.OPENAI_API_KEY = ''
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Test prompt',
|
||||
generationType: 'json-schema',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(503)
|
||||
expect(data.success).toBe(false)
|
||||
expect(data.error).toBe('Code generation service is not configured.')
|
||||
})
|
||||
|
||||
it('should handle missing required fields', async () => {
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: '',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.success).toBe(false)
|
||||
expect(data.error).toBe('Missing required fields: prompt and generationType.')
|
||||
expect(mockLogger.warn).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle invalid generation type', async () => {
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Test prompt',
|
||||
generationType: 'invalid-type',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(400)
|
||||
expect(data.success).toBe(false)
|
||||
expect(data.error).toBe('Invalid generationType: invalid-type')
|
||||
expect(mockLogger.warn).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle empty OpenAI response', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: null,
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Test prompt',
|
||||
generationType: 'javascript-function-body',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
expect(data.success).toBe(false)
|
||||
expect(data.error).toBe('Failed to generate content. OpenAI response was empty.')
|
||||
expect(mockLogger.error).toHaveBeenCalled()
|
||||
})
|
||||
|
||||
it('should handle invalid JSON schema generation', async () => {
|
||||
const mockResponse = {
|
||||
choices: [
|
||||
{
|
||||
message: {
|
||||
content: 'invalid json content',
|
||||
},
|
||||
},
|
||||
],
|
||||
}
|
||||
|
||||
mockOpenAI.chat.completions.create.mockResolvedValueOnce(mockResponse)
|
||||
|
||||
const req = createMockRequest('POST', {
|
||||
prompt: 'Create a schema',
|
||||
generationType: 'json-schema',
|
||||
})
|
||||
|
||||
const { POST } = await import('@/app/api/codegen/route')
|
||||
|
||||
const response = await POST(req)
|
||||
const data = await response.json()
|
||||
|
||||
expect(response.status).toBe(500)
|
||||
expect(data.success).toBe(false)
|
||||
expect(data.error).toBe('Generated JSON schema was invalid.')
|
||||
expect(mockLogger.error).toHaveBeenCalled()
|
||||
})
|
||||
})
|
||||
@@ -1,535 +0,0 @@
|
||||
import { unstable_noStore as noStore } from 'next/cache'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import OpenAI from 'openai'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
export const runtime = 'edge'
|
||||
export const maxDuration = 60
|
||||
|
||||
const logger = createLogger('GenerateCodeAPI')
|
||||
|
||||
const openai = env.OPENAI_API_KEY
|
||||
? new OpenAI({
|
||||
apiKey: env.OPENAI_API_KEY,
|
||||
})
|
||||
: null
|
||||
|
||||
if (!env.OPENAI_API_KEY) {
|
||||
logger.warn('OPENAI_API_KEY not found. Code generation API will not function.')
|
||||
}
|
||||
|
||||
type GenerationType =
|
||||
| 'json-schema'
|
||||
| 'javascript-function-body'
|
||||
| 'typescript-function-body'
|
||||
| 'custom-tool-schema'
|
||||
| 'json-object'
|
||||
|
||||
// Define the structure for a single message in the history
|
||||
interface ChatMessage {
|
||||
role: 'user' | 'assistant' | 'system' // System role might be needed if we include the initial system prompt in history
|
||||
content: string
|
||||
}
|
||||
|
||||
interface RequestBody {
|
||||
prompt: string
|
||||
generationType: GenerationType
|
||||
context?: string
|
||||
stream?: boolean
|
||||
history?: ChatMessage[] // Optional conversation history
|
||||
}
|
||||
|
||||
const systemPrompts: Record<GenerationType, string> = {
|
||||
'json-schema': `You are an expert programmer specializing in creating JSON schemas according to a specific format.
|
||||
Generate ONLY the JSON schema based on the user's request.
|
||||
The output MUST be a single, valid JSON object, starting with { and ending with }.
|
||||
The JSON object MUST have the following top-level properties: 'name' (string), 'description' (string), 'strict' (boolean, usually true), and 'schema' (object).
|
||||
The 'schema' object must define the structure and MUST contain 'type': 'object', 'properties': {...}, 'additionalProperties': false, and 'required': [...].
|
||||
Inside 'properties', use standard JSON Schema properties (type, description, enum, items for arrays, etc.).
|
||||
Do not include any explanations, markdown formatting, or other text outside the JSON object.
|
||||
|
||||
Valid Schema Examples:
|
||||
|
||||
Example 1:
|
||||
{
|
||||
"name": "reddit_post",
|
||||
"description": "Fetches the reddit posts in the given subreddit",
|
||||
"strict": true,
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "The title of the post"
|
||||
},
|
||||
"content": {
|
||||
"type": "string",
|
||||
"description": "The content of the post"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [ "title", "content" ]
|
||||
}
|
||||
}
|
||||
|
||||
Example 2:
|
||||
{
|
||||
"name": "get_weather",
|
||||
"description": "Fetches the current weather for a specific location.",
|
||||
"strict": true,
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g., San Francisco, CA"
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"description": "Temperature unit",
|
||||
"enum": ["celsius", "fahrenheit"]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": ["location", "unit"]
|
||||
}
|
||||
}
|
||||
|
||||
Example 3 (Array Input):
|
||||
{
|
||||
"name": "process_items",
|
||||
"description": "Processes a list of items with specific IDs.",
|
||||
"strict": true,
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"item_ids": {
|
||||
"type": "array",
|
||||
"description": "A list of unique item identifiers to process.",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "An item ID"
|
||||
}
|
||||
},
|
||||
"processing_mode": {
|
||||
"type": "string",
|
||||
"description": "The mode for processing",
|
||||
"enum": ["fast", "thorough"]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": ["item_ids", "processing_mode"]
|
||||
}
|
||||
}
|
||||
`,
|
||||
'custom-tool-schema': `You are an expert programmer specializing in creating OpenAI function calling format JSON schemas for custom tools.
|
||||
Generate ONLY the JSON schema based on the user's request.
|
||||
The output MUST be a single, valid JSON object, starting with { and ending with }.
|
||||
The JSON schema MUST follow this specific format:
|
||||
1. Top-level property "type" must be set to "function"
|
||||
2. A "function" object containing:
|
||||
- "name": A concise, camelCase name for the function
|
||||
- "description": A clear description of what the function does
|
||||
- "parameters": A JSON Schema object describing the function's parameters with:
|
||||
- "type": "object"
|
||||
- "properties": An object containing parameter definitions
|
||||
- "required": An array of required parameter names
|
||||
|
||||
Do not include any explanations, markdown formatting, or other text outside the JSON object.
|
||||
|
||||
Valid Schema Examples:
|
||||
|
||||
Example 1:
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "getWeather",
|
||||
"description": "Fetches the current weather for a specific location.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g., San Francisco, CA"
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"description": "Temperature unit",
|
||||
"enum": ["celsius", "fahrenheit"]
|
||||
}
|
||||
},
|
||||
"required": ["location"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Example 2:
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "addItemToOrder",
|
||||
"description": "Add one quantity of a food item to the order.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"itemName": {
|
||||
"type": "string",
|
||||
"description": "The name of the food item to add to order"
|
||||
},
|
||||
"quantity": {
|
||||
"type": "integer",
|
||||
"description": "The quantity of the item to add",
|
||||
"default": 1
|
||||
}
|
||||
},
|
||||
"required": ["itemName"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Example 3 (Array Input):
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "processItems",
|
||||
"description": "Processes a list of items with specific IDs.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"itemIds": {
|
||||
"type": "array",
|
||||
"description": "A list of unique item identifiers to process.",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "An item ID"
|
||||
}
|
||||
},
|
||||
"processingMode": {
|
||||
"type": "string",
|
||||
"description": "The mode for processing",
|
||||
"enum": ["fast", "thorough"]
|
||||
}
|
||||
},
|
||||
"required": ["itemIds"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
}
|
||||
`,
|
||||
'javascript-function-body': `You are an expert JavaScript programmer.
|
||||
Generate ONLY the raw body of a JavaScript function based on the user's request.
|
||||
The code should be executable within an 'async function(params, environmentVariables) {...}' context.
|
||||
- 'params' (object): Contains input parameters derived from the JSON schema. Access these directly using the parameter name wrapped in angle brackets, e.g., '<paramName>'. Do NOT use 'params.paramName'.
|
||||
- 'environmentVariables' (object): Contains environment variables. Reference these using the double curly brace syntax: '{{ENV_VAR_NAME}}'. Do NOT use 'environmentVariables.VAR_NAME' or env.
|
||||
|
||||
IMPORTANT FORMATTING RULES:
|
||||
1. Reference Environment Variables: Use the exact syntax {{VARIABLE_NAME}}. Do NOT wrap it in quotes (e.g., use 'apiKey = {{SERVICE_API_KEY}}' not 'apiKey = "{{SERVICE_API_KEY}}"'). Our system replaces these placeholders before execution.
|
||||
2. Reference Input Parameters/Workflow Variables: Use the exact syntax <variable_name>. Do NOT wrap it in quotes (e.g., use 'userId = <userId>;' not 'userId = "<userId>";'). This includes parameters defined in the block's schema and outputs from previous blocks.
|
||||
3. Function Body ONLY: Do NOT include the function signature (e.g., 'async function myFunction() {' or the surrounding '}').
|
||||
4. Imports: Do NOT include import/require statements unless they are standard Node.js built-in modules (e.g., 'crypto', 'fs'). External libraries are not supported in this context.
|
||||
5. Output: Ensure the code returns a value if the function is expected to produce output. Use 'return'.
|
||||
6. Clarity: Write clean, readable code.
|
||||
7. No Explanations: Do NOT include markdown formatting, comments explaining the rules, or any text other than the raw JavaScript code for the function body.
|
||||
|
||||
Example Scenario:
|
||||
User Prompt: "Fetch user data from an API. Use the User ID passed in as 'userId' and an API Key stored as the 'SERVICE_API_KEY' environment variable."
|
||||
|
||||
Generated Code:
|
||||
const userId = <block.content>; // Correct: Accessing input parameter without quotes
|
||||
const apiKey = {{SERVICE_API_KEY}}; // Correct: Accessing environment variable without quotes
|
||||
const url = \`https://api.example.com/users/\${userId}\`;
|
||||
|
||||
try {
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Authorization': \`Bearer \${apiKey}\`,
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
// Throwing an error will mark the block execution as failed
|
||||
throw new Error(\`API request failed with status \${response.status}: \${await response.text()}\`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
console.log('User data fetched successfully.'); // Optional: logging for debugging
|
||||
return data; // Return the fetched data which becomes the block's output
|
||||
} catch (error) {
|
||||
console.error(\`Error fetching user data: \${error.message}\`);
|
||||
// Re-throwing the error ensures the workflow knows this step failed.
|
||||
throw error;
|
||||
}`,
|
||||
'typescript-function-body': `You are an expert TypeScript programmer.
|
||||
Generate ONLY the body of a TypeScript function based on the user's request.
|
||||
The code should be executable within an async context. You have access to a 'params' object (typed as Record<string, any>) containing input parameters and an 'environmentVariables' object (typed as Record<string, string>) for env vars.
|
||||
Do not include the function signature (e.g., 'async function myFunction(): Promise<any> {').
|
||||
Do not include import/require statements unless absolutely necessary and they are standard Node.js modules.
|
||||
Do not include markdown formatting or explanations.
|
||||
Output only the raw TypeScript code. Use modern TypeScript features where appropriate. Do not use semicolons.
|
||||
Example:
|
||||
const userId = <block.content> as string
|
||||
const apiKey = {{SERVICE_API_KEY}}
|
||||
const response = await fetch(\`https://api.example.com/users/\${userId}\`, { headers: { Authorization: \`Bearer \${apiKey}\` } })
|
||||
if (!response.ok) {
|
||||
throw new Error(\`Failed to fetch user data: \${response.statusText}\`)
|
||||
}
|
||||
const data: unknown = await response.json()
|
||||
// Add type checking/assertion if necessary
|
||||
return data // Ensure you return a value if expected`,
|
||||
|
||||
'json-object': `You are an expert JSON programmer.
|
||||
Generate ONLY the raw JSON object based on the user's request.
|
||||
The output MUST be a single, valid JSON object, starting with { and ending with }.
|
||||
|
||||
Do not include any explanations, markdown formatting, or other text outside the JSON object.
|
||||
|
||||
You have access to the following variables you can use to generate the JSON body:
|
||||
- 'params' (object): Contains input parameters derived from the JSON schema. Access these directly using the parameter name wrapped in angle brackets, e.g., '<paramName>'. Do NOT use 'params.paramName'.
|
||||
- 'environmentVariables' (object): Contains environment variables. Reference these using the double curly brace syntax: '{{ENV_VAR_NAME}}'. Do NOT use 'environmentVariables.VAR_NAME' or env.
|
||||
|
||||
Example:
|
||||
{
|
||||
"name": "<block.agent.response.content>",
|
||||
"age": <block.function.output.age>,
|
||||
"success": true
|
||||
}
|
||||
`,
|
||||
}
|
||||
|
||||
export async function POST(req: NextRequest) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
logger.info(`[${requestId}] Received code generation request`)
|
||||
|
||||
if (!openai) {
|
||||
logger.error(`[${requestId}] OpenAI client not initialized. Missing API key.`)
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Code generation service is not configured.' },
|
||||
{ status: 503 }
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
const body = (await req.json()) as RequestBody
|
||||
noStore()
|
||||
|
||||
// Destructure history along with other fields
|
||||
const { prompt, generationType, context, stream = false, history = [] } = body
|
||||
|
||||
if (!prompt || !generationType) {
|
||||
logger.warn(`[${requestId}] Invalid request: Missing prompt or generationType.`)
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Missing required fields: prompt and generationType.' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
if (!systemPrompts[generationType]) {
|
||||
logger.warn(`[${requestId}] Invalid generationType: ${generationType}`)
|
||||
return NextResponse.json(
|
||||
{ success: false, error: `Invalid generationType: ${generationType}` },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
const systemPrompt = systemPrompts[generationType]
|
||||
|
||||
// Construct the user message, potentially including context
|
||||
const currentUserMessageContent = context
|
||||
? `Prompt: ${prompt}\\n\\nExisting Content/Context:\\n${context}`
|
||||
: `${prompt}` // Keep it simple for follow-ups, context is in history
|
||||
|
||||
// Prepare messages for OpenAI API
|
||||
// Start with the system prompt
|
||||
const messages: ChatMessage[] = [{ role: 'system', content: systemPrompt }]
|
||||
|
||||
// Add previous messages from history
|
||||
// Filter out any potential system messages from history if we always prepend a fresh one
|
||||
messages.push(...history.filter((msg) => msg.role !== 'system'))
|
||||
|
||||
// Add the current user prompt
|
||||
messages.push({ role: 'user', content: currentUserMessageContent })
|
||||
|
||||
logger.debug(`[${requestId}] Calling OpenAI API`, {
|
||||
generationType,
|
||||
stream,
|
||||
historyLength: history.length,
|
||||
})
|
||||
|
||||
// For streaming responses
|
||||
if (stream) {
|
||||
try {
|
||||
const streamCompletion = await openai?.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: messages,
|
||||
temperature: 0.2,
|
||||
max_tokens: 1500,
|
||||
stream: true,
|
||||
})
|
||||
|
||||
// Use ReadableStream for Edge runtime
|
||||
return new Response(
|
||||
new ReadableStream({
|
||||
async start(controller) {
|
||||
const encoder = new TextEncoder()
|
||||
let fullContent = generationType === 'json-schema' ? '' : undefined
|
||||
|
||||
// Process each chunk
|
||||
for await (const chunk of streamCompletion) {
|
||||
const content = chunk.choices[0]?.delta?.content || ''
|
||||
if (content) {
|
||||
// Only append if fullContent is defined (i.e., for json-schema)
|
||||
if (fullContent !== undefined) {
|
||||
fullContent += content
|
||||
}
|
||||
|
||||
// Send the chunk to the client
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`${JSON.stringify({
|
||||
chunk: content,
|
||||
done: false,
|
||||
})}\n`
|
||||
)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Check JSON validity for json-schema type when streaming is complete
|
||||
if (generationType === 'json-schema' && fullContent) {
|
||||
try {
|
||||
JSON.parse(fullContent)
|
||||
} catch (parseError: any) {
|
||||
logger.error(`[${requestId}] Generated JSON schema is invalid`, {
|
||||
error: parseError.message,
|
||||
content: fullContent,
|
||||
})
|
||||
|
||||
// Send error to client
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`${JSON.stringify({
|
||||
error: 'Generated JSON schema was invalid.',
|
||||
done: true,
|
||||
})}\n`
|
||||
)
|
||||
)
|
||||
controller.close()
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Send the final done message
|
||||
controller.enqueue(
|
||||
encoder.encode(
|
||||
`${JSON.stringify({
|
||||
done: true,
|
||||
...(fullContent !== undefined && { fullContent: fullContent }),
|
||||
})}\n`
|
||||
)
|
||||
)
|
||||
controller.close()
|
||||
logger.info(`[${requestId}] Code generation streaming completed`, { generationType })
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'text/event-stream',
|
||||
'Cache-Control': 'no-cache, no-transform',
|
||||
Connection: 'keep-alive',
|
||||
},
|
||||
}
|
||||
)
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Streaming error`, {
|
||||
error: error.message || 'Unknown error',
|
||||
stack: error.stack,
|
||||
})
|
||||
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'An error occurred during code generation streaming.' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// For non-streaming responses (original implementation)
|
||||
const completion = await openai?.chat.completions.create({
|
||||
// Use non-null assertion
|
||||
model: 'gpt-4o',
|
||||
// Pass the constructed messages array
|
||||
messages: messages,
|
||||
temperature: 0.2,
|
||||
max_tokens: 1500,
|
||||
response_format: generationType === 'json-schema' ? { type: 'json_object' } : undefined,
|
||||
})
|
||||
|
||||
const generatedContent = completion.choices[0]?.message?.content?.trim()
|
||||
|
||||
if (!generatedContent) {
|
||||
logger.error(`[${requestId}] OpenAI response was empty or invalid.`)
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Failed to generate content. OpenAI response was empty.' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Code generation successful`, { generationType })
|
||||
|
||||
if (generationType === 'json-schema') {
|
||||
try {
|
||||
JSON.parse(generatedContent)
|
||||
return NextResponse.json({ success: true, generatedContent })
|
||||
} catch (parseError: any) {
|
||||
logger.error(`[${requestId}] Generated JSON schema is invalid`, {
|
||||
error: parseError.message,
|
||||
content: generatedContent,
|
||||
})
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Generated JSON schema was invalid.' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
} else {
|
||||
return NextResponse.json({ success: true, generatedContent })
|
||||
}
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Code generation failed`, {
|
||||
error: error.message || 'Unknown error',
|
||||
stack: error.stack,
|
||||
})
|
||||
|
||||
let clientErrorMessage = 'Code generation failed. Please try again later.'
|
||||
// Keep original message for server logging
|
||||
let serverErrorMessage = error.message || 'Unknown error'
|
||||
|
||||
let status = 500
|
||||
if (error instanceof OpenAI.APIError) {
|
||||
status = error.status || 500
|
||||
serverErrorMessage = error.message // Use specific API error for server logs
|
||||
logger.error(`[${requestId}] OpenAI API Error: ${status} - ${serverErrorMessage}`)
|
||||
// Optionally, customize client message based on status, but keep it generic
|
||||
if (status === 401) {
|
||||
clientErrorMessage = 'Authentication failed. Please check your API key configuration.'
|
||||
} else if (status === 429) {
|
||||
clientErrorMessage = 'Rate limit exceeded. Please try again later.'
|
||||
} else if (status >= 500) {
|
||||
clientErrorMessage =
|
||||
'The code generation service is currently unavailable. Please try again later.'
|
||||
}
|
||||
}
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: clientErrorMessage,
|
||||
},
|
||||
{ status }
|
||||
)
|
||||
}
|
||||
}
|
||||
194
apps/sim/app/api/wand-generate/route.ts
Normal file
194
apps/sim/app/api/wand-generate/route.ts
Normal file
@@ -0,0 +1,194 @@
|
||||
import { unstable_noStore as noStore } from 'next/cache'
|
||||
import { type NextRequest, NextResponse } from 'next/server'
|
||||
import OpenAI from 'openai'
|
||||
import { env } from '@/lib/env'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
|
||||
export const dynamic = 'force-dynamic'
|
||||
export const runtime = 'edge'
|
||||
export const maxDuration = 60
|
||||
|
||||
const logger = createLogger('WandGenerateAPI')
|
||||
|
||||
const openai = env.OPENAI_API_KEY
|
||||
? new OpenAI({
|
||||
apiKey: env.OPENAI_API_KEY,
|
||||
})
|
||||
: null
|
||||
|
||||
if (!env.OPENAI_API_KEY) {
|
||||
logger.warn('OPENAI_API_KEY not found. Wand generation API will not function.')
|
||||
}
|
||||
|
||||
interface ChatMessage {
|
||||
role: 'user' | 'assistant' | 'system'
|
||||
content: string
|
||||
}
|
||||
|
||||
interface RequestBody {
|
||||
prompt: string
|
||||
systemPrompt?: string
|
||||
stream?: boolean
|
||||
history?: ChatMessage[]
|
||||
}
|
||||
|
||||
// The endpoint is now generic - system prompts come from wand configs
|
||||
|
||||
export async function POST(req: NextRequest) {
|
||||
const requestId = crypto.randomUUID().slice(0, 8)
|
||||
logger.info(`[${requestId}] Received wand generation request`)
|
||||
|
||||
if (!openai) {
|
||||
logger.error(`[${requestId}] OpenAI client not initialized. Missing API key.`)
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Wand generation service is not configured.' },
|
||||
{ status: 503 }
|
||||
)
|
||||
}
|
||||
|
||||
try {
|
||||
noStore()
|
||||
const body = (await req.json()) as RequestBody
|
||||
|
||||
const { prompt, systemPrompt, stream = false, history = [] } = body
|
||||
|
||||
if (!prompt) {
|
||||
logger.warn(`[${requestId}] Invalid request: Missing prompt.`)
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Missing required field: prompt.' },
|
||||
{ status: 400 }
|
||||
)
|
||||
}
|
||||
|
||||
// Use provided system prompt or default
|
||||
const finalSystemPrompt =
|
||||
systemPrompt ||
|
||||
'You are a helpful AI assistant. Generate content exactly as requested by the user.'
|
||||
|
||||
// Prepare messages for OpenAI API
|
||||
const messages: ChatMessage[] = [{ role: 'system', content: finalSystemPrompt }]
|
||||
|
||||
// Add previous messages from history
|
||||
messages.push(...history.filter((msg) => msg.role !== 'system'))
|
||||
|
||||
// Add the current user prompt
|
||||
messages.push({ role: 'user', content: prompt })
|
||||
|
||||
logger.debug(`[${requestId}] Calling OpenAI API for wand generation`, {
|
||||
stream,
|
||||
historyLength: history.length,
|
||||
})
|
||||
|
||||
// For streaming responses
|
||||
if (stream) {
|
||||
try {
|
||||
const streamCompletion = await openai?.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: messages,
|
||||
temperature: 0.3,
|
||||
max_tokens: 10000,
|
||||
stream: true,
|
||||
})
|
||||
|
||||
return new Response(
|
||||
new ReadableStream({
|
||||
async start(controller) {
|
||||
const encoder = new TextEncoder()
|
||||
|
||||
try {
|
||||
for await (const chunk of streamCompletion) {
|
||||
const content = chunk.choices[0]?.delta?.content || ''
|
||||
if (content) {
|
||||
// Use the same format as codegen API for consistency
|
||||
controller.enqueue(
|
||||
encoder.encode(`${JSON.stringify({ chunk: content, done: false })}\n`)
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// Send completion signal
|
||||
controller.enqueue(encoder.encode(`${JSON.stringify({ chunk: '', done: true })}\n`))
|
||||
controller.close()
|
||||
logger.info(`[${requestId}] Wand generation streaming completed`)
|
||||
} catch (streamError: any) {
|
||||
logger.error(`[${requestId}] Streaming error`, { error: streamError.message })
|
||||
controller.enqueue(
|
||||
encoder.encode(`${JSON.stringify({ error: 'Streaming failed', done: true })}\n`)
|
||||
)
|
||||
controller.close()
|
||||
}
|
||||
},
|
||||
}),
|
||||
{
|
||||
headers: {
|
||||
'Content-Type': 'text/plain',
|
||||
'Cache-Control': 'no-cache, no-transform',
|
||||
Connection: 'keep-alive',
|
||||
},
|
||||
}
|
||||
)
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Streaming error`, {
|
||||
error: error.message || 'Unknown error',
|
||||
stack: error.stack,
|
||||
})
|
||||
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'An error occurred during wand generation streaming.' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
// For non-streaming responses
|
||||
const completion = await openai?.chat.completions.create({
|
||||
model: 'gpt-4o',
|
||||
messages: messages,
|
||||
temperature: 0.3,
|
||||
max_tokens: 10000,
|
||||
})
|
||||
|
||||
const generatedContent = completion.choices[0]?.message?.content?.trim()
|
||||
|
||||
if (!generatedContent) {
|
||||
logger.error(`[${requestId}] OpenAI response was empty or invalid.`)
|
||||
return NextResponse.json(
|
||||
{ success: false, error: 'Failed to generate content. OpenAI response was empty.' },
|
||||
{ status: 500 }
|
||||
)
|
||||
}
|
||||
|
||||
logger.info(`[${requestId}] Wand generation successful`)
|
||||
return NextResponse.json({ success: true, content: generatedContent })
|
||||
} catch (error: any) {
|
||||
logger.error(`[${requestId}] Wand generation failed`, {
|
||||
error: error.message || 'Unknown error',
|
||||
stack: error.stack,
|
||||
})
|
||||
|
||||
let clientErrorMessage = 'Wand generation failed. Please try again later.'
|
||||
let status = 500
|
||||
|
||||
if (error instanceof OpenAI.APIError) {
|
||||
status = error.status || 500
|
||||
logger.error(`[${requestId}] OpenAI API Error: ${status} - ${error.message}`)
|
||||
|
||||
if (status === 401) {
|
||||
clientErrorMessage = 'Authentication failed. Please check your API key configuration.'
|
||||
} else if (status === 429) {
|
||||
clientErrorMessage = 'Rate limit exceeded. Please try again later.'
|
||||
} else if (status >= 500) {
|
||||
clientErrorMessage =
|
||||
'The wand generation service is currently unavailable. Please try again later.'
|
||||
}
|
||||
}
|
||||
|
||||
return NextResponse.json(
|
||||
{
|
||||
success: false,
|
||||
error: clientErrorMessage,
|
||||
},
|
||||
{ status }
|
||||
)
|
||||
}
|
||||
}
|
||||
@@ -1,10 +1,10 @@
|
||||
export { CodePromptBar } from './code-prompt-bar/code-prompt-bar'
|
||||
export { ControlBar } from './control-bar/control-bar'
|
||||
export { ErrorBoundary } from './error/index'
|
||||
export { LoopNodeComponent } from './loop-node/loop-node'
|
||||
export { Panel } from './panel/panel'
|
||||
export { ParallelNodeComponent } from './parallel-node/parallel-node'
|
||||
export { SkeletonLoading } from './skeleton-loading/skeleton-loading'
|
||||
export { WandPromptBar } from './wand-prompt-bar/wand-prompt-bar'
|
||||
export { WorkflowBlock } from './workflow-block/workflow-block'
|
||||
export { WorkflowEdge } from './workflow-edge/workflow-edge'
|
||||
export { WorkflowTextEditorModal } from './workflow-text-editor/workflow-text-editor-modal'
|
||||
|
||||
@@ -4,7 +4,7 @@ import { Button } from '@/components/ui/button'
|
||||
import { Input } from '@/components/ui/input'
|
||||
import { cn } from '@/lib/utils'
|
||||
|
||||
interface CodePromptBarProps {
|
||||
interface WandPromptBarProps {
|
||||
isVisible: boolean
|
||||
isLoading: boolean
|
||||
isStreaming: boolean
|
||||
@@ -16,7 +16,7 @@ interface CodePromptBarProps {
|
||||
className?: string
|
||||
}
|
||||
|
||||
export function CodePromptBar({
|
||||
export function WandPromptBar({
|
||||
isVisible,
|
||||
isLoading,
|
||||
isStreaming,
|
||||
@@ -24,9 +24,9 @@ export function CodePromptBar({
|
||||
onSubmit,
|
||||
onCancel,
|
||||
onChange,
|
||||
placeholder = 'Describe the JavaScript code to generate...',
|
||||
placeholder = 'Describe what you want to generate...',
|
||||
className,
|
||||
}: CodePromptBarProps) {
|
||||
}: WandPromptBarProps) {
|
||||
const promptBarRef = useRef<HTMLDivElement>(null)
|
||||
const [isExiting, setIsExiting] = useState(false)
|
||||
|
||||
@@ -10,9 +10,10 @@ import { checkEnvVarTrigger, EnvVarDropdown } from '@/components/ui/env-var-drop
|
||||
import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { CodePromptBar } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/code-prompt-bar/code-prompt-bar'
|
||||
import { WandPromptBar } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/wand-prompt-bar/wand-prompt-bar'
|
||||
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
|
||||
import { useCodeGeneration } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-code-generation'
|
||||
import { useWand } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-wand'
|
||||
import type { GenerationType } from '@/blocks/types'
|
||||
import { useSubBlockStore } from '@/stores/workflows/subblock/store'
|
||||
|
||||
const logger = createLogger('Code')
|
||||
@@ -23,12 +24,19 @@ interface CodeProps {
|
||||
isConnecting: boolean
|
||||
placeholder?: string
|
||||
language?: 'javascript' | 'json'
|
||||
generationType?: 'javascript-function-body' | 'json-schema' | 'json-object'
|
||||
generationType?: GenerationType
|
||||
value?: string
|
||||
isPreview?: boolean
|
||||
previewValue?: string | null
|
||||
disabled?: boolean
|
||||
onValidationChange?: (isValid: boolean) => void
|
||||
wandConfig: {
|
||||
enabled: boolean
|
||||
prompt: string
|
||||
generationType?: GenerationType
|
||||
placeholder?: string
|
||||
maintainHistory?: boolean
|
||||
}
|
||||
}
|
||||
|
||||
if (typeof document !== 'undefined') {
|
||||
@@ -61,6 +69,7 @@ export function Code({
|
||||
previewValue,
|
||||
disabled = false,
|
||||
onValidationChange,
|
||||
wandConfig,
|
||||
}: CodeProps) {
|
||||
// Determine the AI prompt placeholder based on language
|
||||
const aiPromptPlaceholder = useMemo(() => {
|
||||
@@ -124,25 +133,27 @@ export function Code({
|
||||
const handleGeneratedContentRef = useRef<(generatedCode: string) => void>(() => {})
|
||||
const handleStreamChunkRef = useRef<(chunk: string) => void>(() => {})
|
||||
|
||||
// AI Code Generation Hook
|
||||
const {
|
||||
isLoading: isAiLoading,
|
||||
isStreaming: isAiStreaming,
|
||||
generate: generateCode,
|
||||
generateStream: generateCodeStream,
|
||||
cancelGeneration,
|
||||
isPromptVisible,
|
||||
showPromptInline,
|
||||
hidePromptInline,
|
||||
promptInputValue,
|
||||
updatePromptValue,
|
||||
} = useCodeGeneration({
|
||||
generationType: generationType,
|
||||
initialContext: code,
|
||||
onGeneratedContent: (content: string) => handleGeneratedContentRef.current?.(content),
|
||||
onStreamChunk: (chunk: string) => handleStreamChunkRef.current?.(chunk),
|
||||
onStreamStart: () => handleStreamStartRef.current?.(),
|
||||
})
|
||||
// AI Code Generation Hook - use new wand system
|
||||
const wandHook = wandConfig?.enabled
|
||||
? useWand({
|
||||
wandConfig,
|
||||
currentValue: code,
|
||||
onStreamStart: () => handleStreamStartRef.current?.(),
|
||||
onStreamChunk: (chunk: string) => handleStreamChunkRef.current?.(chunk),
|
||||
onGeneratedContent: (content: string) => handleGeneratedContentRef.current?.(content),
|
||||
})
|
||||
: null
|
||||
|
||||
// Extract values from wand hook
|
||||
const isAiLoading = wandHook?.isLoading || false
|
||||
const isAiStreaming = wandHook?.isStreaming || false
|
||||
const generateCodeStream = wandHook?.generateStream || (() => {})
|
||||
const isPromptVisible = wandHook?.isPromptVisible || false
|
||||
const showPromptInline = wandHook?.showPromptInline || (() => {})
|
||||
const hidePromptInline = wandHook?.hidePromptInline || (() => {})
|
||||
const promptInputValue = wandHook?.promptInputValue || ''
|
||||
const updatePromptValue = wandHook?.updatePromptValue || (() => {})
|
||||
const cancelGeneration = wandHook?.cancelGeneration || (() => {})
|
||||
|
||||
// State management - useSubBlockValue with explicit streaming control
|
||||
const [storeValue, setStoreValue] = useSubBlockValue(blockId, subBlockId, false, {
|
||||
@@ -156,30 +167,32 @@ export function Code({
|
||||
// Use preview value when in preview mode, otherwise use store value or prop value
|
||||
const value = isPreview ? previewValue : propValue !== undefined ? propValue : storeValue
|
||||
|
||||
// Define the handlers now that we have access to setStoreValue
|
||||
handleStreamStartRef.current = () => {
|
||||
setCode('')
|
||||
// Streaming state is now controlled by isAiStreaming
|
||||
}
|
||||
|
||||
handleGeneratedContentRef.current = (generatedCode: string) => {
|
||||
setCode(generatedCode)
|
||||
if (!isPreview && !disabled) {
|
||||
setStoreValue(generatedCode)
|
||||
// Final value will be persisted when isAiStreaming becomes false
|
||||
// Define the handlers in useEffect to avoid setState during render
|
||||
useEffect(() => {
|
||||
handleStreamStartRef.current = () => {
|
||||
setCode('')
|
||||
// Streaming state is now controlled by isAiStreaming
|
||||
}
|
||||
}
|
||||
|
||||
handleStreamChunkRef.current = (chunk: string) => {
|
||||
setCode((currentCode) => {
|
||||
const newCode = currentCode + chunk
|
||||
handleGeneratedContentRef.current = (generatedCode: string) => {
|
||||
setCode(generatedCode)
|
||||
if (!isPreview && !disabled) {
|
||||
// Update the value - it won't be persisted until streaming ends
|
||||
setStoreValue(newCode)
|
||||
setStoreValue(generatedCode)
|
||||
// Final value will be persisted when isAiStreaming becomes false
|
||||
}
|
||||
return newCode
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
handleStreamChunkRef.current = (chunk: string) => {
|
||||
setCode((currentCode) => {
|
||||
const newCode = currentCode + chunk
|
||||
if (!isPreview && !disabled) {
|
||||
// Update the value - it won't be persisted until streaming ends
|
||||
setStoreValue(newCode)
|
||||
}
|
||||
return newCode
|
||||
})
|
||||
}
|
||||
}, [isPreview, disabled, setStoreValue])
|
||||
|
||||
// Effects
|
||||
useEffect(() => {
|
||||
@@ -352,15 +365,15 @@ export function Code({
|
||||
|
||||
return (
|
||||
<>
|
||||
<CodePromptBar
|
||||
<WandPromptBar
|
||||
isVisible={isPromptVisible}
|
||||
isLoading={isAiLoading}
|
||||
isStreaming={isAiStreaming}
|
||||
promptValue={promptInputValue}
|
||||
onSubmit={(prompt: string) => generateCodeStream({ prompt, context: code })}
|
||||
onSubmit={(prompt: string) => generateCodeStream({ prompt })}
|
||||
onCancel={isAiStreaming ? cancelGeneration : hidePromptInline}
|
||||
onChange={updatePromptValue}
|
||||
placeholder={aiPromptPlaceholder}
|
||||
placeholder={wandConfig?.placeholder || aiPromptPlaceholder}
|
||||
/>
|
||||
|
||||
<div
|
||||
|
||||
@@ -1,13 +1,16 @@
|
||||
import { useEffect, useLayoutEffect, useRef, useState } from 'react'
|
||||
import { ChevronsUpDown } from 'lucide-react'
|
||||
import { ChevronsUpDown, Wand2 } from 'lucide-react'
|
||||
import { useReactFlow } from 'reactflow'
|
||||
import { Button } from '@/components/ui/button'
|
||||
import { checkEnvVarTrigger, EnvVarDropdown } from '@/components/ui/env-var-dropdown'
|
||||
import { formatDisplayText } from '@/components/ui/formatted-text'
|
||||
import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
|
||||
import { Textarea } from '@/components/ui/textarea'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { WandPromptBar } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/wand-prompt-bar/wand-prompt-bar'
|
||||
import { useSubBlockValue } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/hooks/use-sub-block-value'
|
||||
import { useWand } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-wand'
|
||||
import type { SubBlockConfig } from '@/blocks/types'
|
||||
|
||||
const logger = createLogger('LongInput')
|
||||
@@ -44,7 +47,38 @@ export function LongInput({
|
||||
onChange,
|
||||
disabled,
|
||||
}: LongInputProps) {
|
||||
const [storeValue, setStoreValue] = useSubBlockValue(blockId, subBlockId)
|
||||
// Local state for immediate UI updates during streaming
|
||||
const [localContent, setLocalContent] = useState<string>('')
|
||||
|
||||
// Wand functionality (only if wandConfig is enabled) - define early to get streaming state
|
||||
const wandHook = config.wandConfig?.enabled
|
||||
? useWand({
|
||||
wandConfig: config.wandConfig,
|
||||
currentValue: localContent,
|
||||
onStreamStart: () => {
|
||||
// Clear the content when streaming starts
|
||||
setLocalContent('')
|
||||
},
|
||||
onStreamChunk: (chunk) => {
|
||||
// Update local content with each chunk as it arrives
|
||||
setLocalContent((current) => current + chunk)
|
||||
},
|
||||
onGeneratedContent: (content) => {
|
||||
// Final content update (fallback)
|
||||
setLocalContent(content)
|
||||
},
|
||||
})
|
||||
: null
|
||||
|
||||
// State management - useSubBlockValue with explicit streaming control
|
||||
const [storeValue, setStoreValue] = useSubBlockValue(blockId, subBlockId, false, {
|
||||
debounceMs: 150,
|
||||
isStreaming: wandHook?.isStreaming || false, // Use wand streaming state
|
||||
onStreamingEnd: () => {
|
||||
logger.debug('Wand streaming ended, value persisted', { blockId, subBlockId })
|
||||
},
|
||||
})
|
||||
|
||||
const [showEnvVars, setShowEnvVars] = useState(false)
|
||||
const [showTags, setShowTags] = useState(false)
|
||||
const [searchTerm, setSearchTerm] = useState('')
|
||||
@@ -55,7 +89,29 @@ export function LongInput({
|
||||
const containerRef = useRef<HTMLDivElement>(null)
|
||||
|
||||
// Use preview value when in preview mode, otherwise use store value or prop value
|
||||
const value = isPreview ? previewValue : propValue !== undefined ? propValue : storeValue
|
||||
const baseValue = isPreview ? previewValue : propValue !== undefined ? propValue : storeValue
|
||||
|
||||
// During streaming, use local content; otherwise use base value
|
||||
const value = wandHook?.isStreaming ? localContent : baseValue
|
||||
|
||||
// Sync local content with base value when not streaming
|
||||
useEffect(() => {
|
||||
if (!wandHook?.isStreaming) {
|
||||
const baseValueString = baseValue?.toString() ?? ''
|
||||
if (baseValueString !== localContent) {
|
||||
setLocalContent(baseValueString)
|
||||
}
|
||||
}
|
||||
}, [baseValue, wandHook?.isStreaming]) // Removed localContent to prevent infinite loop
|
||||
|
||||
// Update store value during streaming (but won't persist until streaming ends)
|
||||
useEffect(() => {
|
||||
if (wandHook?.isStreaming && localContent !== '') {
|
||||
if (!isPreview && !disabled) {
|
||||
setStoreValue(localContent)
|
||||
}
|
||||
}
|
||||
}, [localContent, wandHook?.isStreaming, isPreview, disabled, setStoreValue])
|
||||
|
||||
// Calculate initial height based on rows prop with reasonable defaults
|
||||
const getInitialHeight = () => {
|
||||
@@ -83,12 +139,15 @@ export function LongInput({
|
||||
|
||||
// Handle input changes
|
||||
const handleChange = (e: React.ChangeEvent<HTMLTextAreaElement>) => {
|
||||
// Don't allow changes if disabled
|
||||
if (disabled) return
|
||||
// Don't allow changes if disabled or streaming
|
||||
if (disabled || wandHook?.isStreaming) return
|
||||
|
||||
const newValue = e.target.value
|
||||
const newCursorPosition = e.target.selectionStart ?? 0
|
||||
|
||||
// Update local content immediately
|
||||
setLocalContent(newValue)
|
||||
|
||||
if (onChange) {
|
||||
onChange(newValue)
|
||||
} else if (!isPreview) {
|
||||
@@ -190,7 +249,12 @@ export function LongInput({
|
||||
|
||||
// Update all state in a single batch
|
||||
Promise.resolve().then(() => {
|
||||
if (!isPreview) {
|
||||
// Update local content immediately
|
||||
setLocalContent(newValue)
|
||||
|
||||
if (onChange) {
|
||||
onChange(newValue)
|
||||
} else if (!isPreview) {
|
||||
setStoreValue(newValue)
|
||||
}
|
||||
setCursorPosition(dropPosition + 1)
|
||||
@@ -270,96 +334,130 @@ export function LongInput({
|
||||
}
|
||||
|
||||
return (
|
||||
<div ref={containerRef} className='relative w-full' style={{ height: `${height}px` }}>
|
||||
<Textarea
|
||||
ref={textareaRef}
|
||||
className={cn(
|
||||
'allow-scroll min-h-full w-full resize-none text-transparent caret-foreground placeholder:text-muted-foreground/50',
|
||||
isConnecting &&
|
||||
config?.connectionDroppable !== false &&
|
||||
'ring-2 ring-blue-500 ring-offset-2 focus-visible:ring-blue-500'
|
||||
<>
|
||||
{/* Wand Prompt Bar - positioned above the textarea */}
|
||||
{wandHook && (
|
||||
<WandPromptBar
|
||||
isVisible={wandHook.isPromptVisible}
|
||||
isLoading={wandHook.isLoading}
|
||||
isStreaming={wandHook.isStreaming}
|
||||
promptValue={wandHook.promptInputValue}
|
||||
onSubmit={(prompt: string) => wandHook.generateStream({ prompt })}
|
||||
onCancel={wandHook.isStreaming ? wandHook.cancelGeneration : wandHook.hidePromptInline}
|
||||
onChange={wandHook.updatePromptValue}
|
||||
placeholder={config.wandConfig?.placeholder || 'Describe what you want to generate...'}
|
||||
/>
|
||||
)}
|
||||
|
||||
<div ref={containerRef} className='group relative w-full' style={{ height: `${height}px` }}>
|
||||
<Textarea
|
||||
ref={textareaRef}
|
||||
className={cn(
|
||||
'allow-scroll min-h-full w-full resize-none text-transparent caret-foreground placeholder:text-muted-foreground/50',
|
||||
isConnecting &&
|
||||
config?.connectionDroppable !== false &&
|
||||
'ring-2 ring-blue-500 ring-offset-2 focus-visible:ring-blue-500'
|
||||
)}
|
||||
rows={rows ?? DEFAULT_ROWS}
|
||||
placeholder={placeholder ?? ''}
|
||||
value={value?.toString() ?? ''}
|
||||
onChange={handleChange}
|
||||
onDrop={handleDrop}
|
||||
onDragOver={handleDragOver}
|
||||
onScroll={handleScroll}
|
||||
onWheel={handleWheel}
|
||||
onKeyDown={handleKeyDown}
|
||||
onFocus={() => {
|
||||
setShowEnvVars(false)
|
||||
setShowTags(false)
|
||||
setSearchTerm('')
|
||||
}}
|
||||
disabled={isPreview || disabled}
|
||||
style={{
|
||||
fontFamily: 'inherit',
|
||||
lineHeight: 'inherit',
|
||||
height: `${height}px`,
|
||||
}}
|
||||
/>
|
||||
<div
|
||||
ref={overlayRef}
|
||||
className='pointer-events-none absolute inset-0 whitespace-pre-wrap break-words bg-transparent px-3 py-2 text-sm'
|
||||
style={{
|
||||
fontFamily: 'inherit',
|
||||
lineHeight: 'inherit',
|
||||
width: textareaRef.current ? `${textareaRef.current.clientWidth}px` : '100%',
|
||||
height: `${height}px`,
|
||||
overflow: 'hidden',
|
||||
}}
|
||||
>
|
||||
{formatDisplayText(value?.toString() ?? '', true)}
|
||||
</div>
|
||||
|
||||
{/* Wand Button */}
|
||||
{wandHook && !isPreview && (
|
||||
<div className='absolute top-2 right-3 z-10 flex items-center gap-1 opacity-0 transition-opacity group-hover:opacity-100'>
|
||||
<Button
|
||||
variant='ghost'
|
||||
size='icon'
|
||||
onClick={
|
||||
wandHook.isPromptVisible ? wandHook.hidePromptInline : wandHook.showPromptInline
|
||||
}
|
||||
disabled={wandHook.isLoading || wandHook.isStreaming || disabled}
|
||||
aria-label='Generate content with AI'
|
||||
className='h-8 w-8 rounded-full border border-transparent bg-muted/80 text-muted-foreground shadow-sm transition-all duration-200 hover:border-primary/20 hover:bg-muted hover:text-primary hover:shadow'
|
||||
>
|
||||
<Wand2 className='h-4 w-4' />
|
||||
</Button>
|
||||
</div>
|
||||
)}
|
||||
rows={rows ?? DEFAULT_ROWS}
|
||||
placeholder={placeholder ?? ''}
|
||||
value={value?.toString() ?? ''}
|
||||
onChange={handleChange}
|
||||
onDrop={handleDrop}
|
||||
onDragOver={handleDragOver}
|
||||
onScroll={handleScroll}
|
||||
onWheel={handleWheel}
|
||||
onKeyDown={handleKeyDown}
|
||||
onFocus={() => {
|
||||
setShowEnvVars(false)
|
||||
setShowTags(false)
|
||||
setSearchTerm('')
|
||||
}}
|
||||
disabled={isPreview || disabled}
|
||||
style={{
|
||||
fontFamily: 'inherit',
|
||||
lineHeight: 'inherit',
|
||||
height: `${height}px`,
|
||||
}}
|
||||
/>
|
||||
<div
|
||||
ref={overlayRef}
|
||||
className='pointer-events-none absolute inset-0 whitespace-pre-wrap break-words bg-transparent px-3 py-2 text-sm'
|
||||
style={{
|
||||
fontFamily: 'inherit',
|
||||
lineHeight: 'inherit',
|
||||
width: textareaRef.current ? `${textareaRef.current.clientWidth}px` : '100%',
|
||||
height: `${height}px`,
|
||||
overflow: 'hidden',
|
||||
}}
|
||||
>
|
||||
{formatDisplayText(value?.toString() ?? '', true)}
|
||||
</div>
|
||||
|
||||
{/* Custom resize handle */}
|
||||
<div
|
||||
className='absolute right-1 bottom-1 flex h-4 w-4 cursor-s-resize items-center justify-center rounded-sm bg-background'
|
||||
onMouseDown={startResize}
|
||||
onDragStart={(e) => {
|
||||
e.preventDefault()
|
||||
}}
|
||||
>
|
||||
<ChevronsUpDown className='h-3 w-3 text-muted-foreground/70' />
|
||||
</div>
|
||||
{/* Custom resize handle */}
|
||||
<div
|
||||
className='absolute right-1 bottom-1 flex h-4 w-4 cursor-s-resize items-center justify-center rounded-sm bg-background'
|
||||
onMouseDown={startResize}
|
||||
onDragStart={(e) => {
|
||||
e.preventDefault()
|
||||
}}
|
||||
>
|
||||
<ChevronsUpDown className='h-3 w-3 text-muted-foreground/70' />
|
||||
</div>
|
||||
|
||||
<EnvVarDropdown
|
||||
visible={showEnvVars}
|
||||
onSelect={(newValue) => {
|
||||
if (onChange) {
|
||||
onChange(newValue)
|
||||
} else if (!isPreview) {
|
||||
setStoreValue(newValue)
|
||||
}
|
||||
}}
|
||||
searchTerm={searchTerm}
|
||||
inputValue={value?.toString() ?? ''}
|
||||
cursorPosition={cursorPosition}
|
||||
onClose={() => {
|
||||
setShowEnvVars(false)
|
||||
setSearchTerm('')
|
||||
}}
|
||||
/>
|
||||
<TagDropdown
|
||||
visible={showTags}
|
||||
onSelect={(newValue) => {
|
||||
if (onChange) {
|
||||
onChange(newValue)
|
||||
} else if (!isPreview) {
|
||||
setStoreValue(newValue)
|
||||
}
|
||||
}}
|
||||
blockId={blockId}
|
||||
activeSourceBlockId={activeSourceBlockId}
|
||||
inputValue={value?.toString() ?? ''}
|
||||
cursorPosition={cursorPosition}
|
||||
onClose={() => {
|
||||
setShowTags(false)
|
||||
setActiveSourceBlockId(null)
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
<EnvVarDropdown
|
||||
visible={showEnvVars}
|
||||
onSelect={(newValue) => {
|
||||
if (onChange) {
|
||||
onChange(newValue)
|
||||
} else if (!isPreview) {
|
||||
setStoreValue(newValue)
|
||||
}
|
||||
}}
|
||||
searchTerm={searchTerm}
|
||||
inputValue={value?.toString() ?? ''}
|
||||
cursorPosition={cursorPosition}
|
||||
onClose={() => {
|
||||
setShowEnvVars(false)
|
||||
setSearchTerm('')
|
||||
}}
|
||||
/>
|
||||
<TagDropdown
|
||||
visible={showTags}
|
||||
onSelect={(newValue) => {
|
||||
if (onChange) {
|
||||
onChange(newValue)
|
||||
} else if (!isPreview) {
|
||||
setStoreValue(newValue)
|
||||
}
|
||||
}}
|
||||
blockId={blockId}
|
||||
activeSourceBlockId={activeSourceBlockId}
|
||||
inputValue={value?.toString() ?? ''}
|
||||
cursorPosition={cursorPosition}
|
||||
onClose={() => {
|
||||
setShowTags(false)
|
||||
setActiveSourceBlockId(null)
|
||||
}}
|
||||
/>
|
||||
</div>
|
||||
</>
|
||||
)
|
||||
}
|
||||
|
||||
@@ -352,6 +352,7 @@ export function ShortInput({
|
||||
: formatDisplayText(value?.toString() ?? '', true)}
|
||||
</div>
|
||||
</div>
|
||||
|
||||
<EnvVarDropdown
|
||||
visible={showEnvVars}
|
||||
onSelect={handleEnvVarSelect}
|
||||
|
||||
@@ -24,9 +24,9 @@ import { Label } from '@/components/ui/label'
|
||||
import { checkTagTrigger, TagDropdown } from '@/components/ui/tag-dropdown'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import { cn } from '@/lib/utils'
|
||||
import { CodePromptBar } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/code-prompt-bar/code-prompt-bar'
|
||||
import { WandPromptBar } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/wand-prompt-bar/wand-prompt-bar'
|
||||
import { CodeEditor } from '@/app/workspace/[workspaceId]/w/[workflowId]/components/workflow-block/components/sub-block/components/tool-input/components/code-editor/code-editor'
|
||||
import { useCodeGeneration } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-code-generation'
|
||||
import { useWand } from '@/app/workspace/[workspaceId]/w/[workflowId]/hooks/use-wand'
|
||||
import { useCustomToolsStore } from '@/stores/custom-tools/store'
|
||||
|
||||
const logger = createLogger('CustomToolModal')
|
||||
@@ -73,8 +73,82 @@ export function CustomToolModal({
|
||||
const [showDeleteConfirm, setShowDeleteConfirm] = useState(false)
|
||||
|
||||
// AI Code Generation Hooks
|
||||
const schemaGeneration = useCodeGeneration({
|
||||
generationType: 'custom-tool-schema',
|
||||
const schemaGeneration = useWand({
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
maintainHistory: true,
|
||||
prompt: `You are an expert programmer specializing in creating OpenAI function calling format JSON schemas for custom tools.
|
||||
Generate ONLY the JSON schema based on the user's request.
|
||||
The output MUST be a single, valid JSON object, starting with { and ending with }.
|
||||
The JSON schema MUST follow this specific format:
|
||||
1. Top-level property "type" must be set to "function"
|
||||
2. A "function" object containing:
|
||||
- "name": A concise, camelCase name for the function
|
||||
- "description": A clear description of what the function does
|
||||
- "parameters": A JSON Schema object describing the function's parameters with:
|
||||
- "type": "object"
|
||||
- "properties": An object containing parameter definitions
|
||||
- "required": An array of required parameter names
|
||||
|
||||
Current schema: {context}
|
||||
|
||||
Do not include any explanations, markdown formatting, or other text outside the JSON object.
|
||||
|
||||
Valid Schema Examples:
|
||||
|
||||
Example 1:
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "getWeather",
|
||||
"description": "Fetches the current weather for a specific location.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g., San Francisco, CA"
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"description": "Temperature unit",
|
||||
"enum": ["celsius", "fahrenheit"]
|
||||
}
|
||||
},
|
||||
"required": ["location"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
Example 2:
|
||||
{
|
||||
"type": "function",
|
||||
"function": {
|
||||
"name": "addItemToOrder",
|
||||
"description": "Add one quantity of a food item to the order.",
|
||||
"parameters": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"itemName": {
|
||||
"type": "string",
|
||||
"description": "The name of the food item to add to order"
|
||||
},
|
||||
"quantity": {
|
||||
"type": "integer",
|
||||
"description": "The quantity of the item to add",
|
||||
"default": 1
|
||||
}
|
||||
},
|
||||
"required": ["itemName"],
|
||||
"additionalProperties": false
|
||||
}
|
||||
}
|
||||
}`,
|
||||
placeholder: 'Describe the function parameters and structure...',
|
||||
generationType: 'custom-tool-schema',
|
||||
},
|
||||
currentValue: jsonSchema,
|
||||
onGeneratedContent: (content) => {
|
||||
handleJsonSchemaChange(content)
|
||||
setSchemaError(null) // Clear error on successful generation
|
||||
@@ -89,8 +163,61 @@ export function CustomToolModal({
|
||||
},
|
||||
})
|
||||
|
||||
const codeGeneration = useCodeGeneration({
|
||||
generationType: 'javascript-function-body',
|
||||
const codeGeneration = useWand({
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
maintainHistory: true,
|
||||
prompt: `You are an expert JavaScript programmer.
|
||||
Generate ONLY the raw body of a JavaScript function based on the user's request.
|
||||
The code should be executable within an 'async function(params, environmentVariables) {...}' context.
|
||||
- 'params' (object): Contains input parameters derived from the JSON schema. Access these directly using the parameter name wrapped in angle brackets, e.g., '<paramName>'. Do NOT use 'params.paramName'.
|
||||
- 'environmentVariables' (object): Contains environment variables. Reference these using the double curly brace syntax: '{{ENV_VAR_NAME}}'. Do NOT use 'environmentVariables.VAR_NAME' or env.
|
||||
|
||||
Current code: {context}
|
||||
|
||||
IMPORTANT FORMATTING RULES:
|
||||
1. Reference Environment Variables: Use the exact syntax {{VARIABLE_NAME}}. Do NOT wrap it in quotes (e.g., use 'apiKey = {{SERVICE_API_KEY}}' not 'apiKey = "{{SERVICE_API_KEY}}"'). Our system replaces these placeholders before execution.
|
||||
2. Reference Input Parameters/Workflow Variables: Use the exact syntax <variable_name>. Do NOT wrap it in quotes (e.g., use 'userId = <userId>;' not 'userId = "<userId>";'). This includes parameters defined in the block's schema and outputs from previous blocks.
|
||||
3. Function Body ONLY: Do NOT include the function signature (e.g., 'async function myFunction() {' or the surrounding '}').
|
||||
4. Imports: Do NOT include import/require statements unless they are standard Node.js built-in modules (e.g., 'crypto', 'fs'). External libraries are not supported in this context.
|
||||
5. Output: Ensure the code returns a value if the function is expected to produce output. Use 'return'.
|
||||
6. Clarity: Write clean, readable code.
|
||||
7. No Explanations: Do NOT include markdown formatting, comments explaining the rules, or any text other than the raw JavaScript code for the function body.
|
||||
|
||||
Example Scenario:
|
||||
User Prompt: "Fetch user data from an API. Use the User ID passed in as 'userId' and an API Key stored as the 'SERVICE_API_KEY' environment variable."
|
||||
|
||||
Generated Code:
|
||||
const userId = <block.content>; // Correct: Accessing input parameter without quotes
|
||||
const apiKey = {{SERVICE_API_KEY}}; // Correct: Accessing environment variable without quotes
|
||||
const url = \`https://api.example.com/users/\${userId}\`;
|
||||
|
||||
try {
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Authorization': \`Bearer \${apiKey}\`,
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
// Throwing an error will mark the block execution as failed
|
||||
throw new Error(\`API request failed with status \${response.status}: \${await response.text()}\`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
console.log('User data fetched successfully.'); // Optional: logging for debugging
|
||||
return data; // Return the fetched data which becomes the block's output
|
||||
} catch (error) {
|
||||
console.error(\`Error fetching user data: \${error.message}\`);
|
||||
// Re-throwing the error ensures the workflow knows this step failed.
|
||||
throw error;
|
||||
}`,
|
||||
placeholder: 'Describe the JavaScript function to generate...',
|
||||
generationType: 'javascript-function-body',
|
||||
},
|
||||
currentValue: functionCode,
|
||||
onGeneratedContent: (content) => {
|
||||
handleFunctionCodeChange(content) // Use existing handler to also trigger dropdown checks
|
||||
setCodeError(null) // Clear error on successful generation
|
||||
@@ -532,14 +659,12 @@ export function CustomToolModal({
|
||||
<div className='relative flex-1 overflow-auto px-6 pt-6 pb-12'>
|
||||
{/* Schema Section AI Prompt Bar */}
|
||||
{activeSection === 'schema' && (
|
||||
<CodePromptBar
|
||||
<WandPromptBar
|
||||
isVisible={schemaGeneration.isPromptVisible}
|
||||
isLoading={schemaGeneration.isLoading}
|
||||
isStreaming={schemaGeneration.isStreaming}
|
||||
promptValue={schemaGeneration.promptInputValue}
|
||||
onSubmit={(prompt: string) =>
|
||||
schemaGeneration.generateStream({ prompt, context: jsonSchema })
|
||||
}
|
||||
onSubmit={(prompt: string) => schemaGeneration.generateStream({ prompt })}
|
||||
onCancel={
|
||||
schemaGeneration.isStreaming
|
||||
? schemaGeneration.cancelGeneration
|
||||
@@ -553,14 +678,12 @@ export function CustomToolModal({
|
||||
|
||||
{/* Code Section AI Prompt Bar */}
|
||||
{activeSection === 'code' && (
|
||||
<CodePromptBar
|
||||
<WandPromptBar
|
||||
isVisible={codeGeneration.isPromptVisible}
|
||||
isLoading={codeGeneration.isLoading}
|
||||
isStreaming={codeGeneration.isStreaming}
|
||||
promptValue={codeGeneration.promptInputValue}
|
||||
onSubmit={(prompt: string) =>
|
||||
codeGeneration.generateStream({ prompt, context: functionCode })
|
||||
}
|
||||
onSubmit={(prompt: string) => codeGeneration.generateStream({ prompt })}
|
||||
onCancel={
|
||||
codeGeneration.isStreaming
|
||||
? codeGeneration.cancelGeneration
|
||||
|
||||
@@ -314,6 +314,10 @@ function CodeSyncWrapper({
|
||||
language={uiComponent.language}
|
||||
generationType={uiComponent.generationType}
|
||||
disabled={disabled}
|
||||
wandConfig={{
|
||||
enabled: false,
|
||||
prompt: '',
|
||||
}}
|
||||
/>
|
||||
</GenericSyncWrapper>
|
||||
)
|
||||
|
||||
@@ -181,6 +181,13 @@ export function SubBlock({
|
||||
previewValue={previewValue}
|
||||
disabled={isDisabled}
|
||||
onValidationChange={handleValidationChange}
|
||||
wandConfig={
|
||||
config.wandConfig || {
|
||||
enabled: false,
|
||||
prompt: '',
|
||||
placeholder: '',
|
||||
}
|
||||
}
|
||||
/>
|
||||
)
|
||||
case 'switch':
|
||||
|
||||
@@ -1,315 +0,0 @@
|
||||
import { useCallback, useRef, useState } from 'react'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
|
||||
interface ChatMessage {
|
||||
role: 'user' | 'assistant' | 'system'
|
||||
content: string
|
||||
}
|
||||
|
||||
type GenerationType =
|
||||
| 'json-schema'
|
||||
| 'javascript-function-body'
|
||||
| 'typescript-function-body'
|
||||
| 'custom-tool-schema'
|
||||
| 'json-object'
|
||||
|
||||
interface UseCodeGenerationProps {
|
||||
generationType: GenerationType
|
||||
initialContext?: string // Optional initial code/schema
|
||||
onGeneratedContent: (content: string) => void
|
||||
onStreamChunk?: (chunk: string) => void
|
||||
onStreamStart?: () => void
|
||||
onGenerationComplete?: (prompt: string, generatedContent: string) => void // New callback
|
||||
}
|
||||
|
||||
interface GenerateOptions {
|
||||
prompt: string
|
||||
context?: string // Overrides initialContext if provided
|
||||
}
|
||||
|
||||
const logger = createLogger('useCodeGeneration')
|
||||
|
||||
export function useCodeGeneration({
|
||||
generationType,
|
||||
initialContext = '',
|
||||
onGeneratedContent,
|
||||
onStreamChunk,
|
||||
onStreamStart,
|
||||
onGenerationComplete,
|
||||
}: UseCodeGenerationProps) {
|
||||
const [isLoading, setIsLoading] = useState(false)
|
||||
const [isPromptOpen, setIsPromptOpen] = useState(false)
|
||||
const [isPromptVisible, setIsPromptVisible] = useState(false)
|
||||
const [promptInputValue, setPromptInputValue] = useState('')
|
||||
const [error, setError] = useState<string | null>(null)
|
||||
const [isStreaming, setIsStreaming] = useState(false)
|
||||
|
||||
// State for conversation history
|
||||
const [conversationHistory, setConversationHistory] = useState<ChatMessage[]>([])
|
||||
|
||||
// Use useRef for the abort controller
|
||||
const abortControllerRef = useRef<AbortController | null>(null)
|
||||
|
||||
// Standard non-streaming generation
|
||||
const generate = async ({ prompt, context }: GenerateOptions) => {
|
||||
console.log('[useCodeGeneration.ts] generate function called')
|
||||
if (!prompt) {
|
||||
const errorMessage = 'Prompt cannot be empty.'
|
||||
setError(errorMessage)
|
||||
return
|
||||
}
|
||||
|
||||
setIsLoading(true)
|
||||
setError(null)
|
||||
logger.debug('Starting code generation', { generationType, prompt })
|
||||
setPromptInputValue('')
|
||||
|
||||
// Keep track of the current prompt for history
|
||||
const currentPrompt = prompt
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/codegen', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
prompt,
|
||||
generationType,
|
||||
context: context ?? initialContext, // Use override context if available
|
||||
history: conversationHistory, // Send history
|
||||
}),
|
||||
})
|
||||
|
||||
const result = await response.json()
|
||||
|
||||
if (!response.ok || !result.success) {
|
||||
throw new Error(result.error || `HTTP error! status: ${response.status}`)
|
||||
}
|
||||
|
||||
logger.info('Code generation successful', { generationType })
|
||||
onGeneratedContent(result.generatedContent)
|
||||
setIsPromptOpen(false)
|
||||
setIsPromptVisible(false)
|
||||
|
||||
// Update history after successful non-streaming generation
|
||||
setConversationHistory((prevHistory) => [
|
||||
...prevHistory,
|
||||
{ role: 'user', content: currentPrompt },
|
||||
{ role: 'assistant', content: result.generatedContent },
|
||||
])
|
||||
|
||||
if (onGenerationComplete) {
|
||||
onGenerationComplete(currentPrompt, result.generatedContent)
|
||||
}
|
||||
} catch (err: any) {
|
||||
const errorMessage = err.message || 'An unknown error occurred during generation.'
|
||||
logger.error('Code generation failed', { error: errorMessage })
|
||||
setError(errorMessage)
|
||||
} finally {
|
||||
setIsLoading(false)
|
||||
}
|
||||
}
|
||||
|
||||
// Streaming generation
|
||||
const generateStream = async ({ prompt, context }: GenerateOptions) => {
|
||||
if (!prompt) {
|
||||
const errorMessage = 'Prompt cannot be empty.'
|
||||
setError(errorMessage)
|
||||
return
|
||||
}
|
||||
|
||||
setIsLoading(true)
|
||||
setIsStreaming(true)
|
||||
setError(null)
|
||||
setPromptInputValue('')
|
||||
|
||||
// Keep track of the current prompt for history
|
||||
const currentPrompt = prompt
|
||||
|
||||
// Create a new AbortController for this request
|
||||
abortControllerRef.current = new AbortController()
|
||||
|
||||
logger.debug('Starting streaming code generation', { generationType, prompt })
|
||||
|
||||
try {
|
||||
const response = await fetch('/api/codegen', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Cache-Control': 'no-cache, no-transform',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
prompt,
|
||||
generationType,
|
||||
context: context ?? initialContext,
|
||||
stream: true,
|
||||
history: conversationHistory, // Send history
|
||||
}),
|
||||
signal: abortControllerRef.current.signal,
|
||||
// Ensure no caching for Edge Functions
|
||||
cache: 'no-store',
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
throw new Error(errorText || `HTTP error! status: ${response.status}`)
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
throw new Error('Response body is null')
|
||||
}
|
||||
|
||||
// Signal the start of the stream to clear previous content
|
||||
if (onStreamStart) {
|
||||
onStreamStart()
|
||||
}
|
||||
|
||||
// Set up streaming reader
|
||||
const reader = response.body.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
let fullContent = ''
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
// Process incoming chunks
|
||||
const text = decoder.decode(value)
|
||||
const lines = text.split('\n').filter((line) => line.trim() !== '')
|
||||
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const data = JSON.parse(line)
|
||||
|
||||
// Check if there's an error
|
||||
if (data.error) {
|
||||
throw new Error(data.error)
|
||||
}
|
||||
|
||||
// Process chunk
|
||||
if (data.chunk) {
|
||||
fullContent += data.chunk
|
||||
if (onStreamChunk) {
|
||||
onStreamChunk(data.chunk)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if streaming is complete
|
||||
if (data.done) {
|
||||
// Use full content from server if available (for validation)
|
||||
if (data.fullContent) {
|
||||
fullContent = data.fullContent
|
||||
}
|
||||
|
||||
logger.info('Streaming code generation completed', { generationType })
|
||||
// Update history AFTER the stream is fully complete
|
||||
setConversationHistory((prevHistory) => [
|
||||
...prevHistory,
|
||||
{ role: 'user', content: currentPrompt },
|
||||
{ role: 'assistant', content: fullContent }, // Use the final full content
|
||||
])
|
||||
|
||||
// Call the main handler for the complete content
|
||||
onGeneratedContent(fullContent)
|
||||
|
||||
if (onGenerationComplete) {
|
||||
onGenerationComplete(currentPrompt, fullContent)
|
||||
}
|
||||
break
|
||||
}
|
||||
} catch (jsonError: any) {
|
||||
logger.error('Failed to parse streaming response', { error: jsonError.message, line })
|
||||
}
|
||||
}
|
||||
}
|
||||
} catch (streamError: any) {
|
||||
// Additional error handling for stream processing
|
||||
if (streamError.name !== 'AbortError') {
|
||||
logger.error('Error processing stream', { error: streamError.message })
|
||||
throw streamError // Re-throw to be caught by outer try/catch
|
||||
}
|
||||
} finally {
|
||||
// Always release the reader when done
|
||||
reader.releaseLock()
|
||||
}
|
||||
} catch (err: any) {
|
||||
// Don't show error if it was due to an abort
|
||||
if (err.name === 'AbortError') {
|
||||
logger.info('Streaming code generation aborted', { generationType })
|
||||
return
|
||||
}
|
||||
|
||||
const errorMessage = err.message || 'An unknown error occurred during streaming.'
|
||||
logger.error('Streaming code generation failed', { error: errorMessage })
|
||||
setError(errorMessage)
|
||||
} finally {
|
||||
setIsLoading(false)
|
||||
setIsStreaming(false)
|
||||
abortControllerRef.current = null
|
||||
}
|
||||
}
|
||||
|
||||
const cancelGeneration = () => {
|
||||
if (abortControllerRef.current) {
|
||||
abortControllerRef.current.abort()
|
||||
abortControllerRef.current = null
|
||||
setIsLoading(false)
|
||||
setIsStreaming(false)
|
||||
logger.info('Code generation canceled', { generationType })
|
||||
}
|
||||
}
|
||||
|
||||
const openPrompt = () => {
|
||||
setIsPromptOpen(true)
|
||||
setPromptInputValue('')
|
||||
}
|
||||
|
||||
const closePrompt = () => {
|
||||
if (isLoading) return
|
||||
setIsPromptOpen(false)
|
||||
setPromptInputValue('')
|
||||
}
|
||||
|
||||
const showPromptInline = () => {
|
||||
logger.debug('showPromptInline called', { generationType })
|
||||
setIsPromptVisible(true)
|
||||
setPromptInputValue('')
|
||||
}
|
||||
|
||||
const hidePromptInline = () => {
|
||||
logger.debug('hidePromptInline called', { generationType })
|
||||
if (isLoading) return
|
||||
setIsPromptVisible(false)
|
||||
setPromptInputValue('')
|
||||
}
|
||||
|
||||
const updatePromptValue = (value: string) => {
|
||||
setPromptInputValue(value)
|
||||
}
|
||||
|
||||
const clearHistory = useCallback(() => {
|
||||
setConversationHistory([])
|
||||
logger.info('Conversation history cleared', { generationType })
|
||||
}, [generationType])
|
||||
|
||||
return {
|
||||
isLoading,
|
||||
isStreaming,
|
||||
error,
|
||||
generate,
|
||||
generateStream,
|
||||
cancelGeneration,
|
||||
isPromptOpen,
|
||||
openPrompt,
|
||||
closePrompt,
|
||||
isPromptVisible,
|
||||
showPromptInline,
|
||||
hidePromptInline,
|
||||
promptInputValue,
|
||||
updatePromptValue,
|
||||
conversationHistory,
|
||||
clearHistory,
|
||||
}
|
||||
}
|
||||
@@ -0,0 +1,297 @@
|
||||
import { useCallback, useRef, useState } from 'react'
|
||||
import { createLogger } from '@/lib/logs/console/logger'
|
||||
import type { GenerationType } from '@/blocks/types'
|
||||
|
||||
const logger = createLogger('useWand')
|
||||
|
||||
/**
|
||||
* Builds rich context information based on current content and generation type
|
||||
*/
|
||||
function buildContextInfo(currentValue?: string, generationType?: string): string {
|
||||
if (!currentValue || currentValue.trim() === '') {
|
||||
return 'no current content'
|
||||
}
|
||||
|
||||
const contentLength = currentValue.length
|
||||
const lineCount = currentValue.split('\n').length
|
||||
|
||||
let contextInfo = `Current content (${contentLength} characters, ${lineCount} lines):\n${currentValue}`
|
||||
|
||||
// Add type-specific context analysis
|
||||
if (generationType) {
|
||||
switch (generationType) {
|
||||
case 'javascript-function-body':
|
||||
case 'typescript-function-body': {
|
||||
// Analyze code structure
|
||||
const hasFunction = /function\s+\w+/.test(currentValue)
|
||||
const hasArrowFunction = /=>\s*{/.test(currentValue)
|
||||
const hasReturn = /return\s+/.test(currentValue)
|
||||
contextInfo += `\n\nCode analysis: ${hasFunction ? 'Contains function declaration. ' : ''}${hasArrowFunction ? 'Contains arrow function. ' : ''}${hasReturn ? 'Has return statement.' : 'No return statement.'}`
|
||||
break
|
||||
}
|
||||
|
||||
case 'json-schema':
|
||||
case 'json-object':
|
||||
// Analyze JSON structure
|
||||
try {
|
||||
const parsed = JSON.parse(currentValue)
|
||||
const keys = Object.keys(parsed)
|
||||
contextInfo += `\n\nJSON analysis: Valid JSON with ${keys.length} top-level keys: ${keys.join(', ')}`
|
||||
} catch {
|
||||
contextInfo += `\n\nJSON analysis: Invalid JSON - needs fixing`
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
|
||||
return contextInfo
|
||||
}
|
||||
|
||||
interface ChatMessage {
|
||||
role: 'user' | 'assistant' | 'system'
|
||||
content: string
|
||||
}
|
||||
|
||||
export interface WandConfig {
|
||||
enabled: boolean
|
||||
prompt: string
|
||||
generationType?: GenerationType
|
||||
placeholder?: string
|
||||
maintainHistory?: boolean // Whether to keep conversation history
|
||||
}
|
||||
|
||||
interface UseWandProps {
|
||||
wandConfig: WandConfig
|
||||
currentValue?: string
|
||||
onGeneratedContent: (content: string) => void
|
||||
onStreamChunk?: (chunk: string) => void
|
||||
onStreamStart?: () => void
|
||||
onGenerationComplete?: (prompt: string, generatedContent: string) => void
|
||||
}
|
||||
|
||||
export function useWand({
|
||||
wandConfig,
|
||||
currentValue,
|
||||
onGeneratedContent,
|
||||
onStreamChunk,
|
||||
onStreamStart,
|
||||
onGenerationComplete,
|
||||
}: UseWandProps) {
|
||||
const [isLoading, setIsLoading] = useState(false)
|
||||
const [isPromptVisible, setIsPromptVisible] = useState(false)
|
||||
const [promptInputValue, setPromptInputValue] = useState('')
|
||||
const [error, setError] = useState<string | null>(null)
|
||||
const [isStreaming, setIsStreaming] = useState(false)
|
||||
|
||||
// Conversation history state
|
||||
const [conversationHistory, setConversationHistory] = useState<ChatMessage[]>([])
|
||||
|
||||
const abortControllerRef = useRef<AbortController | null>(null)
|
||||
|
||||
const showPromptInline = useCallback(() => {
|
||||
setIsPromptVisible(true)
|
||||
setError(null)
|
||||
}, [])
|
||||
|
||||
const hidePromptInline = useCallback(() => {
|
||||
setIsPromptVisible(false)
|
||||
setPromptInputValue('')
|
||||
setError(null)
|
||||
}, [])
|
||||
|
||||
const updatePromptValue = useCallback((value: string) => {
|
||||
setPromptInputValue(value)
|
||||
}, [])
|
||||
|
||||
const cancelGeneration = useCallback(() => {
|
||||
if (abortControllerRef.current) {
|
||||
abortControllerRef.current.abort()
|
||||
abortControllerRef.current = null
|
||||
}
|
||||
setIsStreaming(false)
|
||||
setIsLoading(false)
|
||||
setError(null)
|
||||
}, [])
|
||||
|
||||
const openPrompt = useCallback(() => {
|
||||
setIsPromptVisible(true)
|
||||
setPromptInputValue('')
|
||||
}, [])
|
||||
|
||||
const closePrompt = useCallback(() => {
|
||||
if (isLoading) return
|
||||
setIsPromptVisible(false)
|
||||
setPromptInputValue('')
|
||||
}, [isLoading])
|
||||
|
||||
const generateStream = useCallback(
|
||||
async ({ prompt }: { prompt: string }) => {
|
||||
if (!prompt) {
|
||||
setError('Prompt cannot be empty.')
|
||||
return
|
||||
}
|
||||
|
||||
if (!wandConfig.enabled) {
|
||||
setError('Wand is not enabled.')
|
||||
return
|
||||
}
|
||||
|
||||
setIsLoading(true)
|
||||
setIsStreaming(true)
|
||||
setError(null)
|
||||
setPromptInputValue('')
|
||||
|
||||
abortControllerRef.current = new AbortController()
|
||||
|
||||
// Signal the start of streaming to clear previous content
|
||||
if (onStreamStart) {
|
||||
onStreamStart()
|
||||
}
|
||||
|
||||
try {
|
||||
// Build context-aware message
|
||||
const contextInfo = buildContextInfo(currentValue, wandConfig.generationType)
|
||||
|
||||
// Build the system prompt with context information
|
||||
let systemPrompt = wandConfig.prompt
|
||||
if (systemPrompt.includes('{context}')) {
|
||||
systemPrompt = systemPrompt.replace('{context}', contextInfo)
|
||||
}
|
||||
|
||||
// User message is just the user's specific request
|
||||
const userMessage = prompt
|
||||
|
||||
// Keep track of the current prompt for history
|
||||
const currentPrompt = prompt
|
||||
|
||||
const response = await fetch('/api/wand-generate', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Cache-Control': 'no-cache, no-transform',
|
||||
},
|
||||
body: JSON.stringify({
|
||||
prompt: userMessage,
|
||||
systemPrompt: systemPrompt, // Send the processed system prompt with context
|
||||
stream: true,
|
||||
history: wandConfig.maintainHistory ? conversationHistory : [], // Include history if enabled
|
||||
}),
|
||||
signal: abortControllerRef.current.signal,
|
||||
cache: 'no-store',
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const errorText = await response.text()
|
||||
throw new Error(errorText || `HTTP error! status: ${response.status}`)
|
||||
}
|
||||
|
||||
if (!response.body) {
|
||||
throw new Error('Response body is null')
|
||||
}
|
||||
|
||||
const reader = response.body.getReader()
|
||||
const decoder = new TextDecoder()
|
||||
let accumulatedContent = ''
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read()
|
||||
if (done) break
|
||||
|
||||
// Process incoming chunks
|
||||
const text = decoder.decode(value)
|
||||
const lines = text.split('\n').filter((line) => line.trim() !== '')
|
||||
|
||||
for (const line of lines) {
|
||||
try {
|
||||
const data = JSON.parse(line)
|
||||
|
||||
// Check if there's an error
|
||||
if (data.error) {
|
||||
throw new Error(data.error)
|
||||
}
|
||||
|
||||
// Process chunk
|
||||
if (data.chunk && !data.done) {
|
||||
accumulatedContent += data.chunk
|
||||
// Stream each chunk to the UI immediately
|
||||
if (onStreamChunk) {
|
||||
onStreamChunk(data.chunk)
|
||||
}
|
||||
}
|
||||
|
||||
// Check if streaming is complete
|
||||
if (data.done) {
|
||||
break
|
||||
}
|
||||
} catch (parseError) {
|
||||
// Continue processing other lines
|
||||
logger.debug('Failed to parse streaming line', { line, parseError })
|
||||
}
|
||||
}
|
||||
}
|
||||
} finally {
|
||||
reader.releaseLock()
|
||||
}
|
||||
|
||||
if (accumulatedContent) {
|
||||
onGeneratedContent(accumulatedContent)
|
||||
|
||||
// Update conversation history if enabled
|
||||
if (wandConfig.maintainHistory) {
|
||||
setConversationHistory((prev) => [
|
||||
...prev,
|
||||
{ role: 'user', content: currentPrompt },
|
||||
{ role: 'assistant', content: accumulatedContent },
|
||||
])
|
||||
}
|
||||
|
||||
// Call completion callback
|
||||
if (onGenerationComplete) {
|
||||
onGenerationComplete(currentPrompt, accumulatedContent)
|
||||
}
|
||||
}
|
||||
|
||||
logger.debug('Wand generation completed', {
|
||||
prompt,
|
||||
contentLength: accumulatedContent.length,
|
||||
})
|
||||
} catch (error: any) {
|
||||
if (error.name === 'AbortError') {
|
||||
logger.debug('Wand generation cancelled')
|
||||
} else {
|
||||
logger.error('Wand generation failed', { error })
|
||||
setError(error.message || 'Generation failed')
|
||||
}
|
||||
} finally {
|
||||
setIsLoading(false)
|
||||
setIsStreaming(false)
|
||||
abortControllerRef.current = null
|
||||
}
|
||||
},
|
||||
[
|
||||
wandConfig,
|
||||
currentValue,
|
||||
onGeneratedContent,
|
||||
onStreamChunk,
|
||||
onStreamStart,
|
||||
onGenerationComplete,
|
||||
]
|
||||
)
|
||||
|
||||
return {
|
||||
isLoading,
|
||||
isStreaming,
|
||||
isPromptVisible,
|
||||
promptInputValue,
|
||||
error,
|
||||
conversationHistory,
|
||||
generateStream,
|
||||
showPromptInline,
|
||||
hidePromptInline,
|
||||
openPrompt,
|
||||
closePrompt,
|
||||
updatePromptValue,
|
||||
cancelGeneration,
|
||||
}
|
||||
}
|
||||
@@ -69,6 +69,42 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
|
||||
layout: 'full',
|
||||
placeholder: 'Enter system prompt...',
|
||||
rows: 5,
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
maintainHistory: true, // Enable conversation history for iterative improvements
|
||||
prompt: `You are an expert at writing system prompts for AI agents. Write a system prompt based exactly on what the user asks for.
|
||||
|
||||
Current context: {context}
|
||||
|
||||
IMPORTANT: Write the system prompt as if the user asked you directly to create it. Match their level of detail and complexity. If they ask for something "comprehensive" or "detailed", write a thorough, in-depth prompt. If they ask for something "simple", keep it concise.
|
||||
|
||||
Key guidelines:
|
||||
- Always start with "You are..." to define the agent's role
|
||||
- Include everything the user specifically requests
|
||||
- If they mention specific tools (like "use Exa to search", "send emails via Gmail", "post to Slack"), explicitly include those tool usage instructions in the prompt
|
||||
- If they want extensive capabilities, write extensively about them
|
||||
- If they mention specific behaviors, tone, or constraints, include those
|
||||
- Write naturally - don't worry about sentence counts or rigid structure
|
||||
- Focus on being comprehensive when they ask for comprehensive
|
||||
|
||||
Tool Integration: Since this is an AI agent platform, users often want agents that use specific tools. If the user mentions:
|
||||
- Web search → Include instructions about using search tools like Exa
|
||||
- Email → Include instructions about Gmail integration
|
||||
- Communication → Include Slack, Discord, Teams instructions
|
||||
- Data → Include instructions about databases, APIs, spreadsheets
|
||||
- Any other specific tools → Include explicit usage instructions
|
||||
|
||||
Examples:
|
||||
SIMPLE REQUEST: "Write a basic customer service agent"
|
||||
You are a helpful customer service representative. Assist customers with their questions about orders, returns, and products. Be polite and professional in all interactions.
|
||||
|
||||
COMPREHENSIVE REQUEST: "Create a detailed AI research assistant that can search the web and analyze information"
|
||||
You are an advanced AI research assistant specializing in conducting thorough research and analysis across various topics. Your primary capabilities include web searching, information synthesis, critical analysis, and presenting findings in clear, actionable formats. When conducting research, use Exa or other web search tools to gather current, relevant information from authoritative sources. Always verify information from multiple sources when possible and clearly distinguish between established facts and emerging trends or opinions. For each research query, begin by understanding the specific research objectives, target audience, and desired depth of analysis. Structure your research process systematically: start with broad topic exploration, then narrow down to specific aspects, and finally synthesize findings into coherent insights. When presenting results, include source citations, highlight key findings, note any limitations or gaps in available information, and suggest areas for further investigation. Adapt your communication style to match the user's expertise level - provide detailed technical explanations for expert audiences and clear, accessible summaries for general audiences. Always maintain objectivity and acknowledge when information is uncertain or conflicting.
|
||||
|
||||
Write naturally and comprehensively based on what the user actually asks for.`,
|
||||
placeholder: 'Describe the AI agent you want to create...',
|
||||
generationType: 'system-prompt',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'userPrompt',
|
||||
@@ -202,7 +238,97 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
|
||||
layout: 'full',
|
||||
placeholder: 'Enter JSON schema...',
|
||||
language: 'json',
|
||||
generationType: 'json-schema',
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
maintainHistory: true,
|
||||
prompt: `You are an expert programmer specializing in creating JSON schemas according to a specific format.
|
||||
Generate ONLY the JSON schema based on the user's request.
|
||||
The output MUST be a single, valid JSON object, starting with { and ending with }.
|
||||
The JSON object MUST have the following top-level properties: 'name' (string), 'description' (string), 'strict' (boolean, usually true), and 'schema' (object).
|
||||
The 'schema' object must define the structure and MUST contain 'type': 'object', 'properties': {...}, 'additionalProperties': false, and 'required': [...].
|
||||
Inside 'properties', use standard JSON Schema properties (type, description, enum, items for arrays, etc.).
|
||||
|
||||
Current schema: {context}
|
||||
|
||||
Do not include any explanations, markdown formatting, or other text outside the JSON object.
|
||||
|
||||
Valid Schema Examples:
|
||||
|
||||
Example 1:
|
||||
{
|
||||
"name": "reddit_post",
|
||||
"description": "Fetches the reddit posts in the given subreddit",
|
||||
"strict": true,
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"title": {
|
||||
"type": "string",
|
||||
"description": "The title of the post"
|
||||
},
|
||||
"content": {
|
||||
"type": "string",
|
||||
"description": "The content of the post"
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": [ "title", "content" ]
|
||||
}
|
||||
}
|
||||
|
||||
Example 2:
|
||||
{
|
||||
"name": "get_weather",
|
||||
"description": "Fetches the current weather for a specific location.",
|
||||
"strict": true,
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"location": {
|
||||
"type": "string",
|
||||
"description": "The city and state, e.g., San Francisco, CA"
|
||||
},
|
||||
"unit": {
|
||||
"type": "string",
|
||||
"description": "Temperature unit",
|
||||
"enum": ["celsius", "fahrenheit"]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": ["location", "unit"]
|
||||
}
|
||||
}
|
||||
|
||||
Example 3 (Array Input):
|
||||
{
|
||||
"name": "process_items",
|
||||
"description": "Processes a list of items with specific IDs.",
|
||||
"strict": true,
|
||||
"schema": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"item_ids": {
|
||||
"type": "array",
|
||||
"description": "A list of unique item identifiers to process.",
|
||||
"items": {
|
||||
"type": "string",
|
||||
"description": "An item ID"
|
||||
}
|
||||
},
|
||||
"processing_mode": {
|
||||
"type": "string",
|
||||
"description": "The mode for processing",
|
||||
"enum": ["fast", "thorough"]
|
||||
}
|
||||
},
|
||||
"additionalProperties": false,
|
||||
"required": ["item_ids", "processing_mode"]
|
||||
}
|
||||
}
|
||||
`,
|
||||
placeholder: 'Describe the JSON schema structure you need...',
|
||||
generationType: 'json-schema',
|
||||
},
|
||||
},
|
||||
],
|
||||
tools: {
|
||||
|
||||
@@ -55,6 +55,30 @@ export const ApiBlock: BlockConfig<RequestResponse> = {
|
||||
type: 'code',
|
||||
layout: 'full',
|
||||
placeholder: 'Enter JSON...',
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
maintainHistory: true,
|
||||
prompt: `You are an expert JSON programmer.
|
||||
Generate ONLY the raw JSON object based on the user's request.
|
||||
The output MUST be a single, valid JSON object, starting with { and ending with }.
|
||||
|
||||
Current body: {context}
|
||||
|
||||
Do not include any explanations, markdown formatting, or other text outside the JSON object.
|
||||
|
||||
You have access to the following variables you can use to generate the JSON body:
|
||||
- 'params' (object): Contains input parameters derived from the JSON schema. Access these directly using the parameter name wrapped in angle brackets, e.g., '<paramName>'. Do NOT use 'params.paramName'.
|
||||
- 'environmentVariables' (object): Contains environment variables. Reference these using the double curly brace syntax: '{{ENV_VAR_NAME}}'. Do NOT use 'environmentVariables.VAR_NAME' or env.
|
||||
|
||||
Example:
|
||||
{
|
||||
"name": "<block.agent.response.content>",
|
||||
"age": <block.function.output.age>,
|
||||
"success": true
|
||||
}`,
|
||||
placeholder: 'Describe the API request body you need...',
|
||||
generationType: 'json-object',
|
||||
},
|
||||
},
|
||||
],
|
||||
tools: {
|
||||
|
||||
@@ -17,6 +17,59 @@ export const FunctionBlock: BlockConfig<CodeExecutionOutput> = {
|
||||
id: 'code',
|
||||
type: 'code',
|
||||
layout: 'full',
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
maintainHistory: true,
|
||||
prompt: `You are an expert JavaScript programmer.
|
||||
Generate ONLY the raw body of a JavaScript function based on the user's request.
|
||||
The code should be executable within an 'async function(params, environmentVariables) {...}' context.
|
||||
- 'params' (object): Contains input parameters derived from the JSON schema. Access these directly using the parameter name wrapped in angle brackets, e.g., '<paramName>'. Do NOT use 'params.paramName'.
|
||||
- 'environmentVariables' (object): Contains environment variables. Reference these using the double curly brace syntax: '{{ENV_VAR_NAME}}'. Do NOT use 'environmentVariables.VAR_NAME' or env.
|
||||
|
||||
Current code context: {context}
|
||||
|
||||
IMPORTANT FORMATTING RULES:
|
||||
1. Reference Environment Variables: Use the exact syntax {{VARIABLE_NAME}}. Do NOT wrap it in quotes (e.g., use 'apiKey = {{SERVICE_API_KEY}}' not 'apiKey = "{{SERVICE_API_KEY}}"'). Our system replaces these placeholders before execution.
|
||||
2. Reference Input Parameters/Workflow Variables: Use the exact syntax <variable_name>. Do NOT wrap it in quotes (e.g., use 'userId = <userId>;' not 'userId = "<userId>";'). This includes parameters defined in the block's schema and outputs from previous blocks.
|
||||
3. Function Body ONLY: Do NOT include the function signature (e.g., 'async function myFunction() {' or the surrounding '}').
|
||||
4. Imports: Do NOT include import/require statements unless they are standard Node.js built-in modules (e.g., 'crypto', 'fs'). External libraries are not supported in this context.
|
||||
5. Output: Ensure the code returns a value if the function is expected to produce output. Use 'return'.
|
||||
6. Clarity: Write clean, readable code.
|
||||
7. No Explanations: Do NOT include markdown formatting, comments explaining the rules, or any text other than the raw JavaScript code for the function body.
|
||||
|
||||
Example Scenario:
|
||||
User Prompt: "Fetch user data from an API. Use the User ID passed in as 'userId' and an API Key stored as the 'SERVICE_API_KEY' environment variable."
|
||||
|
||||
Generated Code:
|
||||
const userId = <block.content>; // Correct: Accessing input parameter without quotes
|
||||
const apiKey = {{SERVICE_API_KEY}}; // Correct: Accessing environment variable without quotes
|
||||
const url = \`https://api.example.com/users/\${userId}\`;
|
||||
|
||||
try {
|
||||
const response = await fetch(url, {
|
||||
method: 'GET',
|
||||
headers: {
|
||||
'Authorization': \`Bearer \${apiKey}\`,
|
||||
'Content-Type': 'application/json'
|
||||
}
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
// Throwing an error will mark the block execution as failed
|
||||
throw new Error(\`API request failed with status \${response.status}: \${await response.text()}\`);
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
console.log('User data fetched successfully.'); // Optional: logging for debugging
|
||||
return data; // Return the fetched data which becomes the block's output
|
||||
} catch (error) {
|
||||
console.error(\`Error fetching user data: \${error.message}\`);
|
||||
// Re-throwing the error ensures the workflow knows this step failed.
|
||||
throw error;
|
||||
}`,
|
||||
placeholder: 'Describe the function you want to create...',
|
||||
generationType: 'javascript-function-body',
|
||||
},
|
||||
},
|
||||
],
|
||||
tools: {
|
||||
|
||||
@@ -41,10 +41,33 @@ export const ResponseBlock: BlockConfig<ResponseBlockOutput> = {
|
||||
layout: 'full',
|
||||
placeholder: '{\n "message": "Hello world",\n "userId": "<variable.userId>"\n}',
|
||||
language: 'json',
|
||||
generationType: 'json-object',
|
||||
condition: { field: 'dataMode', value: 'json' },
|
||||
description:
|
||||
'Data that will be sent as the response body on API calls. Use <variable.name> to reference workflow variables.',
|
||||
wandConfig: {
|
||||
enabled: true,
|
||||
maintainHistory: true,
|
||||
prompt: `You are an expert JSON programmer.
|
||||
Generate ONLY the raw JSON object based on the user's request.
|
||||
The output MUST be a single, valid JSON object, starting with { and ending with }.
|
||||
|
||||
Current response: {context}
|
||||
|
||||
Do not include any explanations, markdown formatting, or other text outside the JSON object.
|
||||
|
||||
You have access to the following variables you can use to generate the JSON body:
|
||||
- 'params' (object): Contains input parameters derived from the JSON schema. Access these directly using the parameter name wrapped in angle brackets, e.g., '<paramName>'. Do NOT use 'params.paramName'.
|
||||
- 'environmentVariables' (object): Contains environment variables. Reference these using the double curly brace syntax: '{{ENV_VAR_NAME}}'. Do NOT use 'environmentVariables.VAR_NAME' or env.
|
||||
|
||||
Example:
|
||||
{
|
||||
"name": "<block.agent.response.content>",
|
||||
"age": <block.function.output.age>,
|
||||
"success": true
|
||||
}`,
|
||||
placeholder: 'Describe the API response structure you need...',
|
||||
generationType: 'json-object',
|
||||
},
|
||||
},
|
||||
{
|
||||
id: 'status',
|
||||
|
||||
@@ -9,6 +9,15 @@ export type PrimitiveValueType = 'string' | 'number' | 'boolean' | 'json' | 'any
|
||||
// Block classification
|
||||
export type BlockCategory = 'blocks' | 'tools' | 'triggers'
|
||||
|
||||
// Valid generation types for AI assistance
|
||||
export type GenerationType =
|
||||
| 'javascript-function-body'
|
||||
| 'typescript-function-body'
|
||||
| 'json-schema'
|
||||
| 'json-object'
|
||||
| 'system-prompt'
|
||||
| 'custom-tool-schema'
|
||||
|
||||
// SubBlock types
|
||||
export type SubBlockType =
|
||||
| 'short-input' // Single line input
|
||||
@@ -117,7 +126,7 @@ export interface SubBlockConfig {
|
||||
}
|
||||
// Props specific to 'code' sub-block type
|
||||
language?: 'javascript' | 'json'
|
||||
generationType?: 'javascript-function-body' | 'json-schema' | 'json-object'
|
||||
generationType?: GenerationType
|
||||
// OAuth specific properties
|
||||
provider?: string
|
||||
serviceId?: string
|
||||
@@ -135,6 +144,14 @@ export interface SubBlockConfig {
|
||||
rows?: number
|
||||
// Multi-select functionality
|
||||
multiSelect?: boolean
|
||||
// Wand configuration for AI assistance
|
||||
wandConfig?: {
|
||||
enabled: boolean
|
||||
prompt: string // Custom prompt template for this subblock
|
||||
generationType?: GenerationType // Optional custom generation type
|
||||
placeholder?: string // Custom placeholder for the prompt input
|
||||
maintainHistory?: boolean // Whether to maintain conversation history
|
||||
}
|
||||
}
|
||||
|
||||
// Main block definition
|
||||
|
||||
Reference in New Issue
Block a user