fix(memories): simplified memories, added memories subblock for agent (#432)

* fix(memories): simplified memories, added memories subblock for agent

* removed raw data from memory block, simplified memory config

* fix(persistence): persist collapsed state for responseFormat code subblock (#429)

* fix(persistence): persist collapsed state for responseFormat code subblock

* add additional type safety

* acknowledged PR comments
This commit is contained in:
Waleed Latif
2025-05-28 17:48:43 -07:00
committed by GitHub
parent 2fb0894c2a
commit ca6884cdc2
13 changed files with 668 additions and 474 deletions

View File

@@ -43,17 +43,15 @@ Create persistent storage for data that needs to be accessed across multiple wor
### `memory_add`
Add a new memory to the database or append to existing memory with the same ID. When appending to existing memory, the memory types must match.
Add a new memory to the database or append to existing memory with the same ID.
#### Input
| Parameter | Type | Required | Description |
| --------- | ---- | -------- | ----------- |
| `id` | string | Yes | Identifier for the memory. If a memory with this ID already exists, the new data will be appended to it. |
| `type` | string | Yes | Type of memory \(agent or raw\) |
| `role` | string | No | Role for agent memory \(user, assistant, or system\) |
| `content` | string | No | Content for agent memory |
| `rawData` | json | No | Raw data to store \(JSON format\) |
| `role` | string | Yes | Role for agent memory \(user, assistant, or system\) |
| `content` | string | Yes | Content for agent memory |
#### Output

View File

@@ -96,8 +96,8 @@ export async function GET(request: NextRequest) {
* POST handler for creating new memories
* Requires:
* - key: Unique identifier for the memory (within workflow scope)
* - type: Memory type ('agent' or 'raw')
* - data: Memory content (varies by type)
* - type: Memory type ('agent')
* - data: Memory content (agent message with role and content)
* - workflowId: ID of the workflow this memory belongs to
*/
export async function POST(request: NextRequest) {
@@ -124,13 +124,13 @@ export async function POST(request: NextRequest) {
)
}
if (!type || !['agent', 'raw'].includes(type)) {
if (!type || type !== 'agent') {
logger.warn(`[${requestId}] Invalid memory type: ${type}`)
return NextResponse.json(
{
success: false,
error: {
message: 'Valid memory type (agent or raw) is required',
message: 'Memory type must be "agent"',
},
},
{ status: 400 }
@@ -220,30 +220,20 @@ export async function POST(request: NextRequest) {
)
}
// Handle appending based on memory type
// Handle appending for agent type
let updatedData
if (type === 'agent') {
// For agent type
const newMessage = data
const existingData = existingMemory[0].data
// For agent type
const newMessage = data
const existingData = existingMemory[0].data
// If existing data is an array, append to it
if (Array.isArray(existingData)) {
updatedData = [...existingData, newMessage]
}
// If existing data is a single message object, convert to array
else {
updatedData = [existingData, newMessage]
}
} else {
// For raw type
// Merge objects if they're objects, otherwise use the new data
if (typeof existingMemory[0].data === 'object' && typeof data === 'object') {
updatedData = { ...existingMemory[0].data, ...data }
} else {
updatedData = data
}
// If existing data is an array, append to it
if (Array.isArray(existingData)) {
updatedData = [...existingData, newMessage]
}
// If existing data is a single message object, convert to array
else {
updatedData = [existingData, newMessage]
}
// Update the existing memory with appended data
@@ -263,7 +253,7 @@ export async function POST(request: NextRequest) {
workflowId,
key,
type,
data: type === 'agent' ? (Array.isArray(data) ? data : [data]) : data,
data: Array.isArray(data) ? data : [data],
createdAt: new Date(),
updatedAt: new Date(),
}

View File

@@ -30,7 +30,6 @@ import { Input } from '@/components/ui/input'
import { Label } from '@/components/ui/label'
import { Skeleton } from '@/components/ui/skeleton'
import { Textarea } from '@/components/ui/textarea'
import { getNodeEnv } from '@/lib/environment'
import { createLogger } from '@/lib/logs/console-logger'
import { getBaseDomain } from '@/lib/urls/utils'
import { cn } from '@/lib/utils'
@@ -54,10 +53,8 @@ interface ChatDeployProps {
type AuthType = 'public' | 'password' | 'email'
const isDevelopment = getNodeEnv() === 'development'
const getDomainSuffix = (() => {
const suffix = isDevelopment ? `.${getBaseDomain()}` : '.simstudio.ai'
const suffix = process.env.NODE_ENV === 'development' ? `.${getBaseDomain()}` : '.simstudio.ai'
return () => suffix
})()

View File

@@ -13,7 +13,8 @@ function handleProviderBasedApiKey(
blockId: string,
subBlockId: string,
modelValue: string | null | undefined,
storeValue: any
storeValue: any,
isModelChange = false
) {
// Only proceed if we have a model selected
if (!modelValue) return
@@ -25,20 +26,25 @@ function handleProviderBasedApiKey(
if (!provider || provider === 'ollama') return
const subBlockStore = useSubBlockStore.getState()
const isAutoFillEnabled = useGeneralStore.getState().isAutoFillEnvVarsEnabled
// Try to get a saved API key for this provider
const savedValue = subBlockStore.resolveToolParamValue(provider, 'apiKey', blockId)
// Try to get a saved API key for this provider (only if auto-fill is enabled)
const savedValue = isAutoFillEnabled
? subBlockStore.resolveToolParamValue(provider, 'apiKey', blockId)
: null
// If we have a valid API key, use it
if (savedValue && savedValue !== '') {
// Always update the value when switching models, even if it appears the same
// This handles cases where the field shows masked values but needs to update
subBlockStore.setValue(blockId, subBlockId, savedValue)
} else {
// Always clear the field when switching to a model with no API key
// Don't wait for user interaction to clear it
// If we have a valid saved API key and auto-fill is enabled, use it
if (savedValue && savedValue !== '' && isAutoFillEnabled) {
// Only update if the current value is different to avoid unnecessary updates
if (storeValue !== savedValue) {
subBlockStore.setValue(blockId, subBlockId, savedValue)
}
} else if (isModelChange && (!storeValue || storeValue === '')) {
// Only clear the field when switching models AND the field is already empty
// Don't clear existing user-entered values on initial load
subBlockStore.setValue(blockId, subBlockId, '')
}
// If no saved value and this is initial load, preserve existing value
}
/**
@@ -237,7 +243,7 @@ export function useSubBlockValue<T = any>(
// Handle different block types
if (isProviderBasedBlock) {
handleProviderBasedApiKey(blockId, subBlockId, modelValue, storeValue)
handleProviderBasedApiKey(blockId, subBlockId, modelValue, storeValue, false)
} else {
// Normal handling for non-provider blocks
handleStandardBlockApiKey(blockId, subBlockId, blockType, storeValue)
@@ -263,27 +269,12 @@ export function useSubBlockValue<T = any>(
// Update the previous model reference
prevModelRef.current = modelValue
// For provider-based blocks, always clear the field if needed
// But only fill with saved values if auto-fill is enabled
// Handle API key auto-fill for model changes
if (modelValue) {
const provider = getProviderFromModel(modelValue)
// Skip if we couldn't determine a provider
if (!provider || provider === 'ollama') return
const subBlockStore = useSubBlockStore.getState()
// Check if there's a saved value for this provider
const savedValue = subBlockStore.resolveToolParamValue(provider, 'apiKey', blockId)
if (savedValue && savedValue !== '' && isAutoFillEnvVarsEnabled) {
// Only auto-fill if the feature is enabled
subBlockStore.setValue(blockId, subBlockId, savedValue)
} else {
// Always clear immediately when switching to a model with no saved key
// or when auto-fill is disabled
subBlockStore.setValue(blockId, subBlockId, '')
}
handleProviderBasedApiKey(blockId, subBlockId, modelValue, storeValue, true)
} else {
// If no model is selected, clear the API key field
useSubBlockStore.getState().setValue(blockId, subBlockId, '')
}
}
}, [

View File

@@ -61,25 +61,22 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
layout: 'full',
placeholder: 'Enter system prompt...',
rows: 5,
mode: 'basic',
},
{
id: 'context',
id: 'userPrompt',
title: 'User Prompt',
type: 'long-input',
layout: 'full',
placeholder: 'Enter context or user message...',
rows: 3,
mode: 'basic',
},
{
id: 'messages',
title: 'Messages',
type: 'code',
id: 'memories',
title: 'Memories',
type: 'short-input',
layout: 'full',
placeholder: 'Connect memory block output...',
mode: 'advanced',
language: 'javascript',
placeholder: '[{"role": "user", "content": "Hello, can you help me with a question?"}]',
},
{
id: 'model',
@@ -236,15 +233,10 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
},
inputs: {
systemPrompt: { type: 'string', required: false },
context: { type: 'string', required: false },
userPrompt: { type: 'string', required: false },
memories: { type: 'json', required: false },
model: { type: 'string', required: true },
apiKey: { type: 'string', required: true },
messages: {
type: 'json',
required: false,
description:
'Array of message objects with role and content fields for advanced chat history control.',
},
responseFormat: {
type: 'json',
required: false,

View File

@@ -48,19 +48,11 @@ export const MemoryBlock: BlockConfig = {
}
if (params.operation === 'add') {
if (!params.type) {
errors.push('Memory type is required for add operation')
} else if (params.type === 'agent') {
if (!params.role) {
errors.push('Role is required for agent memory')
}
if (!params.content) {
errors.push('Content is required for agent memory')
}
} else if (params.type === 'raw') {
if (!params.rawData) {
errors.push('Raw data is required for raw memory')
}
if (!params.role) {
errors.push('Role is required for agent memory')
}
if (!params.content) {
errors.push('Content is required for agent memory')
}
}
@@ -77,14 +69,9 @@ export const MemoryBlock: BlockConfig = {
const result: Record<string, any> = {
...baseResult,
id: params.id,
type: params.type,
}
if (params.type === 'agent') {
result.role = params.role
result.content = params.content
} else if (params.type === 'raw') {
result.rawData = params.rawData
type: 'agent', // Always agent type
role: params.role,
content: params.content,
}
return result
@@ -114,10 +101,8 @@ export const MemoryBlock: BlockConfig = {
inputs: {
operation: { type: 'string', required: true },
id: { type: 'string', required: true },
type: { type: 'string', required: false },
role: { type: 'string', required: false },
content: { type: 'string', required: false },
rawData: { type: 'json', required: false },
},
outputs: {
response: {
@@ -174,21 +159,6 @@ export const MemoryBlock: BlockConfig = {
value: 'delete',
},
},
{
id: 'type',
title: 'Type',
type: 'dropdown',
layout: 'full',
options: [
{ label: 'Agent', id: 'agent' },
{ label: 'Raw', id: 'raw' },
],
placeholder: 'Select memory type',
condition: {
field: 'operation',
value: 'add',
},
},
{
id: 'role',
title: 'Role',
@@ -201,12 +171,8 @@ export const MemoryBlock: BlockConfig = {
],
placeholder: 'Select agent role',
condition: {
field: 'type',
value: 'agent',
and: {
field: 'operation',
value: 'add',
},
field: 'operation',
value: 'add',
},
},
{
@@ -216,28 +182,8 @@ export const MemoryBlock: BlockConfig = {
layout: 'full',
placeholder: 'Enter message content',
condition: {
field: 'type',
value: 'agent',
and: {
field: 'operation',
value: 'add',
},
},
},
{
id: 'rawData',
title: 'Raw Data',
type: 'code',
layout: 'full',
language: 'json',
placeholder: '{"key": "value"}',
condition: {
field: 'type',
value: 'raw',
and: {
field: 'operation',
value: 'add',
},
field: 'operation',
value: 'add',
},
},
],

View File

@@ -161,7 +161,7 @@ describe('AgentBlockHandler', () => {
const inputs = {
model: 'gpt-4o',
systemPrompt: 'You are a helpful assistant.',
context: 'User query: Hello!',
userPrompt: 'User query: Hello!',
temperature: 0.7,
maxTokens: 100,
apiKey: 'test-api-key', // Add API key for non-hosted env
@@ -169,17 +169,6 @@ describe('AgentBlockHandler', () => {
mockGetProviderFromModel.mockReturnValue('openai')
const _expectedProviderRequest = {
model: 'gpt-4o',
systemPrompt: 'You are a helpful assistant.',
context: 'User query: Hello!',
tools: undefined, // No tools in this basic case
temperature: 0.7,
maxTokens: 100,
apiKey: 'test-api-key',
responseFormat: undefined,
}
const expectedOutput = {
response: {
content: 'Mocked response content',
@@ -246,7 +235,7 @@ describe('AgentBlockHandler', () => {
const inputs = {
model: 'gpt-4o',
context: 'Test custom tools with different usageControl settings',
userPrompt: 'Test custom tools with different usageControl settings',
apiKey: 'test-api-key',
tools: [
{
@@ -331,7 +320,7 @@ describe('AgentBlockHandler', () => {
expect(typeof autoTool.executeFunction).toBe('function')
expect(typeof forceTool.executeFunction).toBe('function')
const _autoResult = await autoTool.executeFunction({ input: 'test input' })
await autoTool.executeFunction({ input: 'test input' })
expect(mockExecuteTool).toHaveBeenCalledWith(
'function_execute',
expect.objectContaining({
@@ -340,7 +329,7 @@ describe('AgentBlockHandler', () => {
})
)
const _forceResult = await forceTool.executeFunction({ input: 'another test' })
await forceTool.executeFunction({ input: 'another test' })
expect(mockExecuteTool).toHaveBeenCalledWith(
'function_execute',
expect.objectContaining({
@@ -358,7 +347,7 @@ describe('AgentBlockHandler', () => {
it('should filter out tools with usageControl set to "none"', async () => {
const inputs = {
model: 'gpt-4o',
context: 'Use the tools provided.',
userPrompt: 'Use the tools provided.',
apiKey: 'test-api-key',
tools: [
{
@@ -403,7 +392,7 @@ describe('AgentBlockHandler', () => {
it('should include usageControl property in transformed tools', async () => {
const inputs = {
model: 'gpt-4o',
context: 'Use the tools with different usage controls.',
userPrompt: 'Use the tools with different usage controls.',
apiKey: 'test-api-key',
tools: [
{
@@ -444,7 +433,7 @@ describe('AgentBlockHandler', () => {
it('should handle custom tools with usageControl properties', async () => {
const inputs = {
model: 'gpt-4o',
context: 'Use the custom tools.',
userPrompt: 'Use the custom tools.',
apiKey: 'test-api-key',
tools: [
{
@@ -522,24 +511,13 @@ describe('AgentBlockHandler', () => {
const inputs = {
model: 'gpt-4o',
systemPrompt: 'You are a helpful assistant.',
context: 'User query: Hello!',
userPrompt: 'User query: Hello!',
temperature: 0.7,
maxTokens: 100,
}
mockGetProviderFromModel.mockReturnValue('openai')
const _expectedProviderRequest = {
model: 'gpt-4o',
systemPrompt: 'You are a helpful assistant.',
context: 'User query: Hello!',
tools: undefined,
temperature: 0.7,
maxTokens: 100,
apiKey: undefined, // No API key, server will add it
responseFormat: undefined,
}
await handler.execute(mockBlock, inputs, mockContext)
expect(mockFetch).toHaveBeenCalledWith(expect.any(String), expect.any(Object))
@@ -548,7 +526,7 @@ describe('AgentBlockHandler', () => {
it('should execute with standard block tools', async () => {
const inputs = {
model: 'gpt-4o',
context: 'Analyze this data.',
userPrompt: 'Analyze this data.',
apiKey: 'test-api-key', // Add API key for non-hosted env
tools: [
{
@@ -569,17 +547,6 @@ describe('AgentBlockHandler', () => {
mockTransformBlockTool.mockReturnValue(mockToolDetails)
mockGetProviderFromModel.mockReturnValue('openai')
const _expectedProviderRequest = {
model: 'gpt-4o',
systemPrompt: undefined,
context: 'Analyze this data.',
tools: [mockToolDetails],
temperature: undefined,
maxTokens: undefined,
apiKey: 'test-api-key',
responseFormat: undefined,
}
const expectedOutput = {
response: {
content: 'Mocked response content',
@@ -604,8 +571,8 @@ describe('AgentBlockHandler', () => {
it('should execute with custom tools (schema only and with code)', async () => {
const inputs = {
model: 'gpt-4o',
context: 'Use the custom tools.',
apiKey: 'test-api-key', // Add API key for non-hosted env
userPrompt: 'Use the custom tools.',
apiKey: 'test-api-key',
tools: [
{
type: 'custom-tool',
@@ -674,7 +641,7 @@ describe('AgentBlockHandler', () => {
const inputs = {
model: 'gpt-4o',
context: 'Test context',
userPrompt: 'Test context',
apiKey: 'test-api-key',
responseFormat:
'{"type":"object","properties":{"result":{"type":"string"},"score":{"type":"number"}}}',
@@ -715,7 +682,7 @@ describe('AgentBlockHandler', () => {
const inputs = {
model: 'gpt-4o',
context: 'Test context',
userPrompt: 'Test context',
apiKey: 'test-api-key',
responseFormat: '', // Empty string
}
@@ -736,7 +703,7 @@ describe('AgentBlockHandler', () => {
it('should throw an error for invalid JSON in responseFormat', async () => {
const inputs = {
model: 'gpt-4o',
context: 'Format this output.',
userPrompt: 'Format this output.',
apiKey: 'test-api-key',
responseFormat: '{invalid-json',
}
@@ -749,7 +716,7 @@ describe('AgentBlockHandler', () => {
it('should handle errors from the provider request', async () => {
const inputs = {
model: 'gpt-4o',
context: 'This will fail.',
userPrompt: 'This will fail.',
apiKey: 'test-api-key', // Add API key for non-hosted env
}
@@ -761,144 +728,6 @@ describe('AgentBlockHandler', () => {
)
})
// Tests for raw messages parameter
it('should execute with raw JSON messages array', async () => {
const inputs = {
model: 'gpt-4o',
messages: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Hello, how are you?' },
],
apiKey: 'test-api-key',
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Verify messages were sent to the provider
expect(requestBody.messages).toBeDefined()
expect(requestBody.messages.length).toBe(2)
expect(requestBody.messages[0].role).toBe('system')
expect(requestBody.messages[1].role).toBe('user')
// Verify system prompt and context are not included
expect(requestBody.systemPrompt).toBeUndefined()
expect(requestBody.context).toBeUndefined()
})
it('should parse and use messages with single quotes', async () => {
const inputs = {
model: 'gpt-4o',
// Single-quoted JSON format
messages: `[{'role': 'system', 'content': 'You are a helpful assistant.'}, {'role': 'user', 'content': 'Hello, how are you?'}]`,
apiKey: 'test-api-key',
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Verify messages were parsed and sent to the provider
expect(requestBody.messages).toBeDefined()
expect(requestBody.messages.length).toBe(2)
expect(requestBody.messages[0].role).toBe('system')
expect(requestBody.messages[0].content).toBe('You are a helpful assistant.')
expect(requestBody.messages[1].role).toBe('user')
expect(requestBody.messages[1].content).toBe('Hello, how are you?')
})
it('should prioritize messages over systemPrompt and context when both are provided', async () => {
const inputs = {
model: 'gpt-4o',
// Valid messages array should take priority
messages: [
{ role: 'system', content: 'You are an AI assistant.' },
{ role: 'user', content: 'What is the capital of France?' },
],
// These should be ignored since messages are valid
systemPrompt: 'You are a helpful assistant.',
context: 'Tell me about the weather.',
apiKey: 'test-api-key',
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Verify messages were sent to the provider
expect(requestBody.messages).toBeDefined()
expect(requestBody.messages.length).toBe(2)
expect(requestBody.messages[0].content).toBe('You are an AI assistant.')
expect(requestBody.messages[1].content).toBe('What is the capital of France?')
// Verify system prompt and context are not included
expect(requestBody.systemPrompt).toBeUndefined()
expect(requestBody.context).toBeUndefined()
})
it('should fall back to systemPrompt and context if messages array is invalid', async () => {
const inputs = {
model: 'gpt-4o',
// Invalid messages array (missing required 'role' field)
messages: [
{ content: 'This message is missing the role field' },
{ role: 'user', content: 'Hello' },
],
// These should be used as fallback
systemPrompt: 'You are a helpful assistant.',
context: 'Help the user with their query.',
apiKey: 'test-api-key',
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Verify fallback to systemPrompt and context
expect(requestBody.messages).toBeUndefined()
expect(requestBody.systemPrompt).toBe('You are a helpful assistant.')
expect(requestBody.context).toBe('Help the user with their query.')
})
it('should handle messages with mixed quote styles', async () => {
const inputs = {
model: 'gpt-4o',
// Mixed quote styles as shown in the user's example
messages: `[{'role': 'system', "content": "Only answer questions about the United States. If someone asks about something else, just say you can't help with that."}, {"role": "user", "content": "What's the capital of Bosnia and Herzegovina?"}]`,
apiKey: 'test-api-key',
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Verify messages were parsed and sent to the provider
expect(requestBody.messages).toBeDefined()
expect(requestBody.messages.length).toBe(2)
expect(requestBody.messages[0].role).toBe('system')
expect(requestBody.messages[0].content).toBe(
"Only answer questions about the United States. If someone asks about something else, just say you can't help with that."
)
expect(requestBody.messages[1].role).toBe('user')
expect(requestBody.messages[1].content).toBe("What's the capital of Bosnia and Herzegovina?")
})
it('should handle streaming responses with text/event-stream content type', async () => {
const mockStreamBody = {
getReader: vi.fn().mockReturnValue({
@@ -922,7 +751,7 @@ describe('AgentBlockHandler', () => {
const inputs = {
model: 'gpt-4o',
context: 'Stream this response.',
userPrompt: 'Stream this response.',
apiKey: 'test-api-key',
stream: true,
}
@@ -989,7 +818,7 @@ describe('AgentBlockHandler', () => {
const inputs = {
model: 'gpt-4o',
context: 'Stream this response with execution data.',
userPrompt: 'Stream this response with execution data.',
apiKey: 'test-api-key',
stream: true,
}
@@ -1012,7 +841,7 @@ describe('AgentBlockHandler', () => {
})
it('should handle combined stream+execution responses', async () => {
const _mockStreamObj = new ReadableStream({
new ReadableStream({
start(controller) {
controller.close()
},
@@ -1048,7 +877,7 @@ describe('AgentBlockHandler', () => {
const inputs = {
model: 'gpt-4o',
context: 'Return a combined response.',
userPrompt: 'Return a combined response.',
apiKey: 'test-api-key',
stream: true,
}
@@ -1067,5 +896,282 @@ describe('AgentBlockHandler', () => {
)
expect((result as StreamingExecution).execution.output.response.model).toBe('gpt-4o')
})
it('should process memories in advanced mode with system prompt and user prompt', async () => {
const inputs = {
model: 'gpt-4o',
systemPrompt: 'You are a helpful assistant.',
userPrompt: 'What did we discuss before?',
memories: [
{ role: 'user', content: 'Hello, my name is John.' },
{ role: 'assistant', content: 'Hello John! Nice to meet you.' },
{ role: 'user', content: 'I like programming.' },
{ role: 'assistant', content: "That's great! What programming languages do you enjoy?" },
],
apiKey: 'test-api-key',
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Verify messages were built correctly
expect(requestBody.messages).toBeDefined()
expect(requestBody.messages.length).toBe(6) // system + 4 memories + user prompt
// Check system prompt is first
expect(requestBody.messages[0].role).toBe('system')
expect(requestBody.messages[0].content).toBe('You are a helpful assistant.')
// Check memories are in the middle
expect(requestBody.messages[1].role).toBe('user')
expect(requestBody.messages[1].content).toBe('Hello, my name is John.')
expect(requestBody.messages[2].role).toBe('assistant')
expect(requestBody.messages[2].content).toBe('Hello John! Nice to meet you.')
// Check user prompt is last
expect(requestBody.messages[5].role).toBe('user')
expect(requestBody.messages[5].content).toBe('What did we discuss before?')
// Verify system prompt and context are not included separately
expect(requestBody.systemPrompt).toBeUndefined()
expect(requestBody.userPrompt).toBeUndefined()
})
it('should handle memory block output format', async () => {
const inputs = {
model: 'gpt-4o',
systemPrompt: 'You are a helpful assistant.',
userPrompt: 'Continue our conversation.',
memories: {
response: {
memories: [
{
key: 'conversation-1',
type: 'agent',
data: [
{ role: 'user', content: 'Hi there!' },
{ role: 'assistant', content: 'Hello! How can I help you?' },
],
},
],
},
},
apiKey: 'test-api-key',
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Verify messages were built correctly
expect(requestBody.messages).toBeDefined()
expect(requestBody.messages.length).toBe(4) // system + 2 memories + user prompt
// Check system prompt is first
expect(requestBody.messages[0].role).toBe('system')
expect(requestBody.messages[0].content).toBe('You are a helpful assistant.')
// Check memories from memory block
expect(requestBody.messages[1].role).toBe('user')
expect(requestBody.messages[1].content).toBe('Hi there!')
expect(requestBody.messages[2].role).toBe('assistant')
expect(requestBody.messages[2].content).toBe('Hello! How can I help you?')
// Check user prompt is last
expect(requestBody.messages[3].role).toBe('user')
expect(requestBody.messages[3].content).toBe('Continue our conversation.')
})
it('should not duplicate system prompt if it exists in memories', async () => {
const inputs = {
model: 'gpt-4o',
systemPrompt: 'You are a helpful assistant.',
userPrompt: 'What should I do?',
memories: [
{ role: 'system', content: 'You are a helpful assistant.' },
{ role: 'user', content: 'Hello!' },
{ role: 'assistant', content: 'Hi there!' },
],
apiKey: 'test-api-key',
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Verify messages were built correctly
expect(requestBody.messages).toBeDefined()
expect(requestBody.messages.length).toBe(4) // existing system + 2 memories + user prompt
// Check only one system message exists
const systemMessages = requestBody.messages.filter((msg: any) => msg.role === 'system')
expect(systemMessages.length).toBe(1)
expect(systemMessages[0].content).toBe('You are a helpful assistant.')
})
it('should prioritize explicit systemPrompt over system messages in memories', async () => {
const inputs = {
model: 'gpt-4o',
systemPrompt: 'You are a helpful assistant.',
userPrompt: 'What should I do?',
memories: [
{ role: 'system', content: 'Old system message from memories.' },
{ role: 'user', content: 'Hello!' },
{ role: 'assistant', content: 'Hi there!' },
],
apiKey: 'test-api-key',
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Verify messages were built correctly
expect(requestBody.messages).toBeDefined()
expect(requestBody.messages.length).toBe(4) // explicit system + 2 non-system memories + user prompt
// Check only one system message exists and it's the explicit one
const systemMessages = requestBody.messages.filter((msg: any) => msg.role === 'system')
expect(systemMessages.length).toBe(1)
expect(systemMessages[0].content).toBe('You are a helpful assistant.')
// Verify the explicit system prompt is first
expect(requestBody.messages[0].role).toBe('system')
expect(requestBody.messages[0].content).toBe('You are a helpful assistant.')
// Verify conversation order is preserved
expect(requestBody.messages[1].role).toBe('user')
expect(requestBody.messages[1].content).toBe('Hello!')
expect(requestBody.messages[2].role).toBe('assistant')
expect(requestBody.messages[2].content).toBe('Hi there!')
expect(requestBody.messages[3].role).toBe('user')
expect(requestBody.messages[3].content).toBe('What should I do?')
})
it('should handle multiple system messages in memories with explicit systemPrompt', async () => {
const inputs = {
model: 'gpt-4o',
systemPrompt: 'You are a helpful assistant.',
userPrompt: 'Continue our conversation.',
memories: [
{ role: 'system', content: 'First system message.' },
{ role: 'user', content: 'Hello!' },
{ role: 'system', content: 'Second system message.' },
{ role: 'assistant', content: 'Hi there!' },
{ role: 'system', content: 'Third system message.' },
],
apiKey: 'test-api-key',
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Verify messages were built correctly
expect(requestBody.messages).toBeDefined()
expect(requestBody.messages.length).toBe(4) // explicit system + 2 non-system memories + user prompt
// Check only one system message exists and message order is preserved
const systemMessages = requestBody.messages.filter((msg: any) => msg.role === 'system')
expect(systemMessages.length).toBe(1)
expect(systemMessages[0].content).toBe('You are a helpful assistant.')
// Verify conversation flow is preserved
expect(requestBody.messages[0].role).toBe('system')
expect(requestBody.messages[0].content).toBe('You are a helpful assistant.')
expect(requestBody.messages[1].role).toBe('user')
expect(requestBody.messages[1].content).toBe('Hello!')
expect(requestBody.messages[2].role).toBe('assistant')
expect(requestBody.messages[2].content).toBe('Hi there!')
expect(requestBody.messages[3].role).toBe('user')
expect(requestBody.messages[3].content).toBe('Continue our conversation.')
})
it('should preserve multiple system messages when no explicit systemPrompt is provided', async () => {
const inputs = {
model: 'gpt-4o',
userPrompt: 'What should I do?',
memories: [
{ role: 'system', content: 'First system message.' },
{ role: 'user', content: 'Hello!' },
{ role: 'system', content: 'Second system message.' },
{ role: 'assistant', content: 'Hi there!' },
],
apiKey: 'test-api-key',
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Verify messages were built correctly
expect(requestBody.messages).toBeDefined()
expect(requestBody.messages.length).toBe(5) // 2 system + 2 non-system memories + user prompt
// Check that multiple system messages are preserved when no explicit systemPrompt
const systemMessages = requestBody.messages.filter((msg: any) => msg.role === 'system')
expect(systemMessages.length).toBe(2)
expect(systemMessages[0].content).toBe('First system message.')
expect(systemMessages[1].content).toBe('Second system message.')
// Verify original order is preserved
expect(requestBody.messages[0].role).toBe('system')
expect(requestBody.messages[0].content).toBe('First system message.')
expect(requestBody.messages[1].role).toBe('user')
expect(requestBody.messages[1].content).toBe('Hello!')
expect(requestBody.messages[2].role).toBe('system')
expect(requestBody.messages[2].content).toBe('Second system message.')
expect(requestBody.messages[3].role).toBe('assistant')
expect(requestBody.messages[3].content).toBe('Hi there!')
expect(requestBody.messages[4].role).toBe('user')
expect(requestBody.messages[4].content).toBe('What should I do?')
})
it('should handle user prompt as object with input field', async () => {
const inputs = {
model: 'gpt-4o',
systemPrompt: 'You are a helpful assistant.',
userPrompt: {
input: 'What is the weather like?',
conversationId: 'abc-123',
},
memories: [],
apiKey: 'test-api-key',
}
mockGetProviderFromModel.mockReturnValue('openai')
await handler.execute(mockBlock, inputs, mockContext)
const fetchCall = mockFetch.mock.calls[0]
const requestBody = JSON.parse(fetchCall[1].body)
// Verify user prompt content was extracted correctly
expect(requestBody.messages).toBeDefined()
expect(requestBody.messages.length).toBe(2) // system + user prompt
expect(requestBody.messages[1].role).toBe('user')
expect(requestBody.messages[1].content).toBe('What is the weather like?')
expect(requestBody.messages[1]).not.toHaveProperty('conversationId')
})
})
})

View File

@@ -188,46 +188,86 @@ export class AgentBlockHandler implements BlockHandler {
)
}
// Parse messages if they're in string format
let parsedMessages = inputs.messages
if (typeof inputs.messages === 'string' && inputs.messages.trim()) {
try {
// Fast path: try standard JSON.parse first
try {
parsedMessages = JSON.parse(inputs.messages)
logger.info('Successfully parsed messages from JSON format')
} catch (_jsonError) {
// Fast direct approach for single-quoted JSON
// Replace single quotes with double quotes, but keep single quotes inside double quotes
// This optimized approach handles the most common cases in one pass
const preprocessed = inputs.messages
// Ensure we have valid JSON by replacing all single quotes with double quotes,
// except those inside existing double quotes
.replace(/(['"])(.*?)\1/g, (match, quote, content) => {
if (quote === '"') return match // Keep existing double quotes intact
return `"${content}"` // Replace single quotes with double quotes
})
// Initialize parsedMessages - will be built from memories/prompts if provided
let parsedMessages: any[] | undefined
try {
parsedMessages = JSON.parse(preprocessed)
logger.info('Successfully parsed messages after single-quote preprocessing')
} catch (_preprocessError) {
// Ultimate fallback: simply replace all single quotes
try {
parsedMessages = JSON.parse(inputs.messages.replace(/'/g, '"'))
logger.info('Successfully parsed messages using direct quote replacement')
} catch (finalError) {
logger.error('All parsing attempts failed', {
original: inputs.messages,
error: finalError,
})
// Keep original value
}
// Check if we're in advanced mode with the memories field
if (inputs.memories || (inputs.systemPrompt && inputs.userPrompt)) {
const messages: any[] = []
if (inputs.memories) {
const memories = inputs.memories
const memoryMessages = processMemories(memories, logger)
messages.push(...memoryMessages)
}
// Handle system prompt with clear precedence rules
if (inputs.systemPrompt) {
// Check for existing system messages in memories
const systemMessages = messages.filter((msg) => msg.role === 'system')
if (systemMessages.length > 1) {
logger.warn(
`Found ${systemMessages.length} system messages in memories. Explicit systemPrompt will take precedence.`
)
} else if (systemMessages.length === 1) {
logger.info(
'Found system message in memories. Explicit systemPrompt will take precedence.'
)
}
// Remove any existing system messages and add the explicit one at the beginning
messages.splice(0, 0, {
role: 'system',
content: inputs.systemPrompt,
})
// Remove any other system messages that came from memories
for (let i = messages.length - 1; i >= 1; i--) {
if (messages[i].role === 'system') {
messages.splice(i, 1)
}
}
} catch (error) {
logger.error('Failed to parse messages from string:', { error })
// Keep original value if all parsing fails
logger.info(
'Added explicit system prompt as first message, removed any system messages from memories'
)
} else {
// No explicit system prompt provided, check for multiple system messages in memories
const systemMessages = messages.filter((msg) => msg.role === 'system')
if (systemMessages.length > 1) {
logger.warn(
`Found ${systemMessages.length} system messages in memories with no explicit systemPrompt. Consider providing an explicit systemPrompt for consistent behavior.`
)
} else if (systemMessages.length === 1) {
logger.info('Using system message from memories')
}
}
if (inputs.userPrompt) {
let userContent = inputs.userPrompt
if (typeof userContent === 'object' && userContent.input) {
userContent = userContent.input
} else if (typeof userContent === 'object') {
userContent = JSON.stringify(userContent)
}
messages.push({
role: 'user',
content: userContent,
})
logger.info('Added user prompt to messages', { contentType: typeof userContent })
}
if (messages.length > 0) {
parsedMessages = messages
logger.info('Built messages from advanced mode', {
messageCount: messages.length,
firstMessage: messages[0],
lastMessage: messages[messages.length - 1],
})
}
}
@@ -262,11 +302,13 @@ export class AgentBlockHandler implements BlockHandler {
? { messages: parsedMessages }
: {
systemPrompt: inputs.systemPrompt,
context: Array.isArray(inputs.context)
? JSON.stringify(inputs.context, null, 2)
: typeof inputs.context === 'string'
? inputs.context
: JSON.stringify(inputs.context, null, 2),
context: inputs.userPrompt
? Array.isArray(inputs.userPrompt)
? JSON.stringify(inputs.userPrompt, null, 2)
: typeof inputs.userPrompt === 'string'
? inputs.userPrompt
: JSON.stringify(inputs.userPrompt, null, 2)
: undefined,
}),
tools: formattedTools.length > 0 ? formattedTools : undefined,
temperature: inputs.temperature,
@@ -282,7 +324,8 @@ export class AgentBlockHandler implements BlockHandler {
hasMessages: Array.isArray(parsedMessages) && parsedMessages.length > 0,
hasSystemPrompt:
!(Array.isArray(parsedMessages) && parsedMessages.length > 0) && !!inputs.systemPrompt,
hasContext: !(Array.isArray(parsedMessages) && parsedMessages.length > 0) && !!inputs.context,
hasContext:
!(Array.isArray(parsedMessages) && parsedMessages.length > 0) && !!inputs.userPrompt,
hasTools: !!providerRequest.tools,
hasApiKey: !!providerRequest.apiKey,
workflowId: providerRequest.workflowId,
@@ -544,3 +587,54 @@ export class AgentBlockHandler implements BlockHandler {
export function stripCustomToolPrefix(name: string) {
return name.startsWith('custom_') ? name.replace('custom_', '') : name
}
/**
* Helper function to process memories and convert them to message format
*/
function processMemories(memories: any, logger: any): any[] {
const messages: any[] = []
if (!memories) {
return messages
}
let memoryArray: any[] = []
// Handle different memory input formats
if (memories?.response?.memories && Array.isArray(memories.response.memories)) {
// Memory block output format: { response: { memories: [...] } }
memoryArray = memories.response.memories
} else if (memories?.memories && Array.isArray(memories.memories)) {
// Direct memory output format: { memories: [...] }
memoryArray = memories.memories
} else if (Array.isArray(memories)) {
// Direct array of messages: [{ role, content }, ...]
memoryArray = memories
} else {
logger.warn('Unexpected memories format', { memories })
return messages
}
// Process the memory array
memoryArray.forEach((memory: any) => {
if (memory.data && Array.isArray(memory.data)) {
// Memory object with data array: { key, type, data: [{ role, content }, ...] }
memory.data.forEach((msg: any) => {
if (msg.role && msg.content) {
messages.push({
role: msg.role,
content: msg.content,
})
}
})
} else if (memory.role && memory.content) {
// Direct message object: { role, content }
messages.push({
role: memory.role,
content: memory.content,
})
}
})
return messages
}

View File

@@ -1,5 +1,3 @@
import { env } from '../env'
/**
* Returns the base URL of the application, respecting environment variables for deployment environments
* @returns The base URL string (e.g., 'http://localhost:3000' or 'https://example.com')
@@ -9,13 +7,13 @@ export function getBaseUrl(): string {
return window.location.origin
}
const baseUrl = env.NEXT_PUBLIC_APP_URL
const baseUrl = process.env.NEXT_PUBLIC_APP_URL
if (baseUrl) {
if (baseUrl.startsWith('http://') || baseUrl.startsWith('https://')) {
return baseUrl
}
const isProd = env.NODE_ENV === 'production'
const isProd = process.env.NODE_ENV === 'production'
const protocol = isProd ? 'https://' : 'http://'
return `${protocol}${baseUrl}`
}
@@ -32,7 +30,7 @@ export function getBaseDomain(): string {
const url = new URL(getBaseUrl())
return url.host // host includes port if specified
} catch (_e) {
const isProd = env.NODE_ENV === 'production'
const isProd = process.env.NODE_ENV === 'production'
return isProd ? 'simstudio.ai' : 'localhost:3000'
}
}

View File

@@ -1,8 +1,18 @@
import { beforeEach, describe, expect, it } from 'vitest'
import { beforeEach, describe, expect, it, vi } from 'vitest'
import { useWorkflowRegistry } from '../registry/store'
import { useSubBlockStore } from '../subblock/store'
import { useWorkflowStore } from './store'
describe('workflow store', () => {
beforeEach(() => {
const localStorageMock = {
getItem: vi.fn(),
setItem: vi.fn(),
removeItem: vi.fn(),
clear: vi.fn(),
}
global.localStorage = localStorageMock as any
useWorkflowStore.setState({
blocks: {},
edges: [],
@@ -246,4 +256,135 @@ describe('workflow store', () => {
expect(state.history.past.length).toBe(initialHistoryLength + 2)
})
})
describe('mode switching', () => {
it('should toggle advanced mode on a block', () => {
const { addBlock, toggleBlockAdvancedMode } = useWorkflowStore.getState()
// Add an agent block
addBlock('agent1', 'agent', 'Test Agent', { x: 0, y: 0 })
// Initially should be in basic mode (advancedMode: false or undefined)
let state = useWorkflowStore.getState()
expect(state.blocks.agent1?.advancedMode).toBeUndefined()
// Toggle to advanced mode
toggleBlockAdvancedMode('agent1')
state = useWorkflowStore.getState()
expect(state.blocks.agent1?.advancedMode).toBe(true)
// Toggle back to basic mode
toggleBlockAdvancedMode('agent1')
state = useWorkflowStore.getState()
expect(state.blocks.agent1?.advancedMode).toBe(false)
})
it('should preserve systemPrompt and userPrompt when switching modes', () => {
const { addBlock, toggleBlockAdvancedMode } = useWorkflowStore.getState()
const { setState: setSubBlockState } = useSubBlockStore
// Set up a mock active workflow
useWorkflowRegistry.setState({ activeWorkflowId: 'test-workflow' })
// Add an agent block
addBlock('agent1', 'agent', 'Test Agent', { x: 0, y: 0 })
// Set initial values in basic mode
setSubBlockState({
workflowValues: {
'test-workflow': {
agent1: {
systemPrompt: 'You are a helpful assistant',
userPrompt: 'Hello, how are you?',
},
},
},
})
// Toggle to advanced mode
toggleBlockAdvancedMode('agent1')
// Check that prompts are preserved in advanced mode
let subBlockState = useSubBlockStore.getState()
expect(subBlockState.workflowValues['test-workflow'].agent1.systemPrompt).toBe(
'You are a helpful assistant'
)
expect(subBlockState.workflowValues['test-workflow'].agent1.userPrompt).toBe(
'Hello, how are you?'
)
// Toggle back to basic mode
toggleBlockAdvancedMode('agent1')
// Check that prompts are still preserved
subBlockState = useSubBlockStore.getState()
expect(subBlockState.workflowValues['test-workflow'].agent1.systemPrompt).toBe(
'You are a helpful assistant'
)
expect(subBlockState.workflowValues['test-workflow'].agent1.userPrompt).toBe(
'Hello, how are you?'
)
})
it('should clear memories when switching from advanced to basic mode', () => {
const { addBlock, toggleBlockAdvancedMode } = useWorkflowStore.getState()
const { setState: setSubBlockState } = useSubBlockStore
// Set up a mock active workflow
useWorkflowRegistry.setState({ activeWorkflowId: 'test-workflow' })
// Add an agent block in advanced mode
addBlock('agent1', 'agent', 'Test Agent', { x: 0, y: 0 })
// First toggle to advanced mode
toggleBlockAdvancedMode('agent1')
// Set values including memories
setSubBlockState({
workflowValues: {
'test-workflow': {
agent1: {
systemPrompt: 'You are a helpful assistant',
userPrompt: 'What did we discuss?',
memories: [
{ role: 'user', content: 'My name is John' },
{ role: 'assistant', content: 'Nice to meet you, John!' },
],
},
},
},
})
// Toggle back to basic mode
toggleBlockAdvancedMode('agent1')
// Check that prompts are preserved but memories are cleared
const subBlockState = useSubBlockStore.getState()
expect(subBlockState.workflowValues['test-workflow'].agent1.systemPrompt).toBe(
'You are a helpful assistant'
)
expect(subBlockState.workflowValues['test-workflow'].agent1.userPrompt).toBe(
'What did we discuss?'
)
expect(subBlockState.workflowValues['test-workflow'].agent1.memories).toBeNull()
})
it('should handle mode switching when no subblock values exist', () => {
const { addBlock, toggleBlockAdvancedMode } = useWorkflowStore.getState()
// Set up a mock active workflow
useWorkflowRegistry.setState({ activeWorkflowId: 'test-workflow' })
// Add an agent block
addBlock('agent1', 'agent', 'Test Agent', { x: 0, y: 0 })
// Toggle modes without any subblock values set
expect(useWorkflowStore.getState().blocks.agent1?.advancedMode).toBeUndefined()
expect(() => toggleBlockAdvancedMode('agent1')).not.toThrow()
// Verify the mode changed
const state = useWorkflowStore.getState()
expect(state.blocks.agent1?.advancedMode).toBe(true)
})
it('should not throw when toggling non-existent block', () => {
const { toggleBlockAdvancedMode } = useWorkflowStore.getState()
// Try to toggle a block that doesn't exist
expect(() => toggleBlockAdvancedMode('non-existent')).not.toThrow()
})
})
})

View File

@@ -920,12 +920,13 @@ export const useWorkflowStore = create<WorkflowStoreWithHistory>()(
const updatedValues = { ...blockValues }
if (!block.advancedMode) {
// Switching TO advanced mode, clear system prompt and context (basic mode fields)
updatedValues.systemPrompt = null
updatedValues.context = null
// Switching TO advanced mode
// Preserve systemPrompt and userPrompt, memories starts empty
// No need to clear anything since advanced mode has all fields
} else {
// Switching TO basic mode, clear messages (advanced mode field)
updatedValues.messages = null
// Switching TO basic mode
// Preserve systemPrompt and userPrompt, but clear memories
updatedValues.memories = null
}
// Update subblock store with the cleared values

View File

@@ -4,8 +4,7 @@ import type { MemoryResponse } from './types'
export const memoryAddTool: ToolConfig<any, MemoryResponse> = {
id: 'memory_add',
name: 'Add Memory',
description:
'Add a new memory to the database or append to existing memory with the same ID. When appending to existing memory, the memory types must match.',
description: 'Add a new memory to the database or append to existing memory with the same ID.',
version: '1.0.0',
params: {
id: {
@@ -14,26 +13,16 @@ export const memoryAddTool: ToolConfig<any, MemoryResponse> = {
description:
'Identifier for the memory. If a memory with this ID already exists, the new data will be appended to it.',
},
type: {
type: 'string',
required: true,
description: 'Type of memory (agent or raw)',
},
role: {
type: 'string',
required: false,
required: true,
description: 'Role for agent memory (user, assistant, or system)',
},
content: {
type: 'string',
required: false,
required: true,
description: 'Content for agent memory',
},
rawData: {
type: 'json',
required: false,
description: 'Raw data to store (JSON format)',
},
},
request: {
url: '/api/memory',
@@ -62,66 +51,28 @@ export const memoryAddTool: ToolConfig<any, MemoryResponse> = {
const body: Record<string, any> = {
key: params.id,
type: params.type,
type: 'agent', // Always agent type
workflowId,
}
// Set data based on type
if (params.type === 'agent') {
if (!params.role || !params.content) {
return {
_errorResponse: {
status: 400,
data: {
success: false,
error: {
message: 'Role and content are required for agent memory',
},
// Validate and set data
if (!params.role || !params.content) {
return {
_errorResponse: {
status: 400,
data: {
success: false,
error: {
message: 'Role and content are required for agent memory',
},
},
}
}
body.data = {
role: params.role,
content: params.content,
}
} else if (params.type === 'raw') {
if (!params.rawData) {
return {
_errorResponse: {
status: 400,
data: {
success: false,
error: {
message: 'Raw data is required for raw memory',
},
},
},
}
},
}
}
let parsedRawData
if (typeof params.rawData === 'string') {
try {
parsedRawData = JSON.parse(params.rawData)
} catch (_e) {
return {
_errorResponse: {
status: 400,
data: {
success: false,
error: {
message: 'Invalid JSON for raw data',
},
},
},
}
}
} else {
parsedRawData = params.rawData
}
body.data = parsedRawData
body.data = {
role: params.role,
content: params.content,
}
return body
@@ -135,15 +86,8 @@ export const memoryAddTool: ToolConfig<any, MemoryResponse> = {
const data = result.data || result
// Extract the memories from the response based on memory type
let memories
if (data.type === 'agent') {
// For agent memories, return the full array of message objects
memories = Array.isArray(data.data) ? data.data : [data.data]
} else {
// For raw memories, return the raw data object
memories = data.data
}
// For agent memories, return the full array of message objects
const memories = Array.isArray(data.data) ? data.data : [data.data]
return {
success: true,

View File

@@ -12,15 +12,11 @@ export interface AgentMemoryData {
content: string
}
export interface RawMemoryData {
[key: string]: any
}
export interface MemoryRecord {
id: string
key: string
type: 'agent' | 'raw'
data: AgentMemoryData[] | RawMemoryData
type: 'agent'
data: AgentMemoryData[]
createdAt: string
updatedAt: string
workflowId?: string