mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-08 22:48:14 -05:00
Created new providers/ directory for model providers, modified tool names to have '_' instead of '.', added support for function/tool calling by agent block. Works for OpenAI w firecrawl tool, needs to be implemented still for other providers
This commit is contained in:
@@ -1,10 +1,44 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { getTool } from '@/tools'
|
||||
import { anthropicProvider } from '@/providers/anthropic'
|
||||
import { openaiProvider } from '@/providers/openai'
|
||||
import { ProviderConfig } from '@/providers/types'
|
||||
|
||||
const providers: Record<string, ProviderConfig> = {
|
||||
'anthropic/chat': anthropicProvider,
|
||||
'openai/chat': openaiProvider
|
||||
}
|
||||
|
||||
export async function POST(request: Request) {
|
||||
try {
|
||||
const { toolId, params } = await request.json()
|
||||
|
||||
// Check if this is a provider chat request
|
||||
const provider = providers[toolId]
|
||||
if (provider) {
|
||||
const { apiKey, ...restParams } = params
|
||||
if (!apiKey) {
|
||||
throw new Error('API key is required')
|
||||
}
|
||||
|
||||
const response = await fetch(provider.baseUrl, {
|
||||
method: 'POST',
|
||||
headers: provider.headers(apiKey),
|
||||
body: JSON.stringify(restParams)
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json()
|
||||
throw new Error(error.error?.message || `${toolId} API error`)
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
output: await response.json()
|
||||
})
|
||||
}
|
||||
|
||||
// Handle regular tool requests
|
||||
const tool = getTool(toolId)
|
||||
if (!tool) {
|
||||
throw new Error(`Tool not found: ${toolId}`)
|
||||
|
||||
@@ -1,34 +1,49 @@
|
||||
import { AgentIcon } from '@/components/icons'
|
||||
import { BlockConfig } from '../types'
|
||||
import { ChatResponse } from '@/tools/openai/chat'
|
||||
import { AgentIcon } from '@/components/icons'
|
||||
import { MODEL_TOOLS, ModelType } from '../consts'
|
||||
import { ToolResponse } from '@/tools/types'
|
||||
|
||||
export const AgentBlock: BlockConfig<ChatResponse> = {
|
||||
interface AgentResponse extends ToolResponse {
|
||||
output: {
|
||||
content: string
|
||||
model: string
|
||||
tokens?: {
|
||||
prompt?: number
|
||||
completion?: number
|
||||
total?: number
|
||||
}
|
||||
toolCalls?: {
|
||||
list: Array<{
|
||||
name: string
|
||||
arguments: Record<string, any>
|
||||
}>
|
||||
count: number
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export const AgentBlock: BlockConfig<AgentResponse> = {
|
||||
type: 'agent',
|
||||
toolbar: {
|
||||
title: 'Agent',
|
||||
description: 'Use any LLM',
|
||||
description: 'Add an AI agent with tool access',
|
||||
bgColor: '#7F2FFF',
|
||||
icon: AgentIcon,
|
||||
category: 'blocks',
|
||||
},
|
||||
tools: {
|
||||
access: ['openai.chat', 'anthropic.chat', 'google.chat', 'xai.chat', 'deepseek.chat', 'deepseek.reasoner'],
|
||||
access: ['openai_chat', 'anthropic_chat', 'google_chat', 'xai_chat', 'deepseek_chat', 'deepseek_reasoner'],
|
||||
config: {
|
||||
tool: (params: Record<string, any>) => {
|
||||
const model = params.model || 'gpt-4o'
|
||||
|
||||
if (!model) {
|
||||
throw new Error('No model selected')
|
||||
}
|
||||
|
||||
const tool = MODEL_TOOLS[model as ModelType]
|
||||
|
||||
if (!tool) {
|
||||
throw new Error(`Invalid model selected: ${model}`)
|
||||
}
|
||||
|
||||
return tool
|
||||
return tool
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -36,6 +51,7 @@ export const AgentBlock: BlockConfig<ChatResponse> = {
|
||||
inputs: {
|
||||
systemPrompt: { type: 'string', required: true },
|
||||
context: { type: 'string', required: false },
|
||||
model: { type: 'string', required: true },
|
||||
apiKey: { type: 'string', required: true },
|
||||
responseFormat: { type: 'json', required: false },
|
||||
temperature: { type: 'number', required: false },
|
||||
@@ -47,7 +63,7 @@ export const AgentBlock: BlockConfig<ChatResponse> = {
|
||||
content: 'string',
|
||||
model: 'string',
|
||||
tokens: 'any',
|
||||
reasoning_tokens: 'any'
|
||||
toolCalls: 'any'
|
||||
}
|
||||
}
|
||||
},
|
||||
@@ -57,14 +73,14 @@ export const AgentBlock: BlockConfig<ChatResponse> = {
|
||||
title: 'System Prompt',
|
||||
type: 'long-input',
|
||||
layout: 'full',
|
||||
placeholder: 'Enter prompt'
|
||||
placeholder: 'Enter system prompt...'
|
||||
},
|
||||
{
|
||||
id: 'context',
|
||||
title: 'Context',
|
||||
type: 'short-input',
|
||||
layout: 'full',
|
||||
placeholder: 'Enter text'
|
||||
placeholder: 'Enter context or user message...'
|
||||
},
|
||||
{
|
||||
id: 'model',
|
||||
|
||||
@@ -12,7 +12,7 @@ export const ApiBlock: BlockConfig<RequestResponse> = {
|
||||
category: 'blocks',
|
||||
},
|
||||
tools: {
|
||||
access: ['http.request']
|
||||
access: ['http_request']
|
||||
},
|
||||
workflow: {
|
||||
inputs: {
|
||||
|
||||
@@ -12,7 +12,7 @@ export const CrewAIVisionBlock: BlockConfig<VisionResponse> = {
|
||||
category: 'tools'
|
||||
},
|
||||
tools: {
|
||||
access: ['crewai.vision']
|
||||
access: ['crewai_vision']
|
||||
},
|
||||
workflow: {
|
||||
inputs: {
|
||||
|
||||
@@ -12,7 +12,7 @@ export const FirecrawlScrapeBlock: BlockConfig<ScrapeResponse> = {
|
||||
category: 'tools'
|
||||
},
|
||||
tools: {
|
||||
access: ['firecrawl.scrape']
|
||||
access: ['firecrawl_scrape']
|
||||
},
|
||||
workflow: {
|
||||
inputs: {
|
||||
|
||||
@@ -12,7 +12,7 @@ export const FunctionBlock: BlockConfig<CodeExecutionOutput> = {
|
||||
category: 'blocks',
|
||||
},
|
||||
tools: {
|
||||
access: ['function.execute']
|
||||
access: ['function_execute']
|
||||
},
|
||||
workflow: {
|
||||
inputs: {
|
||||
|
||||
@@ -12,7 +12,7 @@ export const GitHubBlock: BlockConfig<RepoInfoResponse> = {
|
||||
category: 'tools',
|
||||
},
|
||||
tools: {
|
||||
access: ['github.repoinfo']
|
||||
access: ['github_repoinfo']
|
||||
},
|
||||
workflow: {
|
||||
inputs: {
|
||||
|
||||
@@ -12,7 +12,7 @@ export const JinaBlock: BlockConfig<ReadUrlResponse> = {
|
||||
category: 'tools',
|
||||
},
|
||||
tools: {
|
||||
access: ['jina.readurl']
|
||||
access: ['jina_readurl']
|
||||
},
|
||||
workflow: {
|
||||
inputs: {
|
||||
|
||||
@@ -12,7 +12,7 @@ export const SlackMessageBlock: BlockConfig<SlackMessageResponse> = {
|
||||
category: 'tools'
|
||||
},
|
||||
tools: {
|
||||
access: ['slack.message']
|
||||
access: ['slack_message']
|
||||
},
|
||||
workflow: {
|
||||
inputs: {
|
||||
|
||||
@@ -22,7 +22,7 @@ export const TranslateBlock: BlockConfig<ChatResponse> = {
|
||||
category: 'tools',
|
||||
},
|
||||
tools: {
|
||||
access: ['openai.chat', 'anthropic.chat', 'google.chat'],
|
||||
access: ['openai_chat', 'anthropic_chat', 'google_chat'],
|
||||
config: {
|
||||
tool: (params: Record<string, any>) => {
|
||||
const model = params.model || 'gpt-4o'
|
||||
|
||||
@@ -1,12 +1,12 @@
|
||||
export const MODEL_TOOLS = {
|
||||
'gpt-4o': 'openai.chat',
|
||||
'o1': 'openai.chat',
|
||||
'o1-mini': 'openai.chat',
|
||||
'deepseek-v3': 'deepseek.chat',
|
||||
'deepseek-r1': 'deepseek.reasoner',
|
||||
'claude-3-5-sonnet-20241022': 'anthropic.chat',
|
||||
'gemini-pro': 'google.chat',
|
||||
'grok-2-latest': 'xai.chat'
|
||||
'gpt-4o': 'openai_chat',
|
||||
'o1': 'openai_chat',
|
||||
'o1-mini': 'openai_chat',
|
||||
'deepseek-v3': 'deepseek_chat',
|
||||
'deepseek-r1': 'deepseek_reasoner',
|
||||
'claude-3-5-sonnet-20241022': 'anthropic_chat',
|
||||
'gemini-pro': 'google_chat',
|
||||
'grok-2-latest': 'xai_chat'
|
||||
} as const
|
||||
|
||||
export type ModelType = keyof typeof MODEL_TOOLS
|
||||
|
||||
@@ -1,17 +1,30 @@
|
||||
import type { SVGProps } from 'react'
|
||||
import type { JSX } from 'react'
|
||||
import { ToolResponse } from '@/tools/types'
|
||||
import { ExtractToolOutput, ToolOutputToValueType } from './utils'
|
||||
|
||||
// Tool output type utilities
|
||||
export type ExtractToolOutput<T> = T extends ToolResponse
|
||||
? T['output']
|
||||
: never
|
||||
|
||||
export type ToolOutputToValueType<T> = T extends Record<string, any>
|
||||
? {
|
||||
[K in keyof T]: T[K] extends string ? 'string'
|
||||
: T[K] extends number ? 'number'
|
||||
: T[K] extends boolean ? 'boolean'
|
||||
: T[K] extends object ? 'json'
|
||||
: 'any'
|
||||
}
|
||||
: never
|
||||
|
||||
export type BlockIcon = (props: SVGProps<SVGSVGElement>) => JSX.Element
|
||||
export type BlockCategory = 'blocks' | 'tools'
|
||||
|
||||
export type PrimitiveValueType = 'string' | 'number' | 'json' | 'boolean' | 'any'
|
||||
export type ValueType = PrimitiveValueType | Record<string, PrimitiveValueType>
|
||||
export type PrimitiveValueType = 'string' | 'number' | 'boolean' | 'json' | 'any'
|
||||
|
||||
export type BlockOutput =
|
||||
| PrimitiveValueType
|
||||
| { [key: string]: BlockOutput }
|
||||
| PrimitiveValueType
|
||||
| { [key: string]: PrimitiveValueType | Record<string, any> }
|
||||
|
||||
export type ParamType = 'string' | 'number' | 'boolean' | 'json'
|
||||
|
||||
@@ -63,4 +76,15 @@ export interface BlockConfig<T extends ToolResponse = ToolResponse> {
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
export interface OutputConfig {
|
||||
type: BlockOutput
|
||||
dependsOn?: {
|
||||
subBlockId: string
|
||||
condition: {
|
||||
whenEmpty: BlockOutput
|
||||
whenFilled: BlockOutput
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -1,27 +1,11 @@
|
||||
import { BlockState, SubBlockState } from '@/stores/workflow/types'
|
||||
import { BlockOutput, OutputConfig, PrimitiveValueType } from '@/blocks/types'
|
||||
import { ToolResponse } from '@/tools/types'
|
||||
import { BlockOutput, OutputConfig } from '@/blocks/types'
|
||||
|
||||
interface CodeLine {
|
||||
id: string
|
||||
content: string
|
||||
}
|
||||
|
||||
// Tool output type utilities
|
||||
export type ExtractToolOutput<T> = T extends ToolResponse
|
||||
? T['output']
|
||||
: never
|
||||
|
||||
export type ToolOutputToValueType<T> = T extends Record<string, any>
|
||||
? {
|
||||
[K in keyof T]: T[K] extends string ? 'string'
|
||||
: T[K] extends number ? 'number'
|
||||
: T[K] extends boolean ? 'boolean'
|
||||
: T[K] extends object ? 'json'
|
||||
: 'any'
|
||||
}
|
||||
: never
|
||||
|
||||
function isEmptyValue(value: SubBlockState['value']): boolean {
|
||||
if (value === null || value === undefined) return true
|
||||
if (typeof value === 'string') return value.trim() === ''
|
||||
|
||||
@@ -17,7 +17,10 @@ import {
|
||||
ExecutionResult,
|
||||
BlockLog
|
||||
} from './types'
|
||||
import { tools, executeTool } from '@/tools'
|
||||
import { tools, executeTool, getTool } from '@/tools'
|
||||
import { executeProviderRequest } from '@/providers/service'
|
||||
import { getAllBlocks } from '@/blocks'
|
||||
import { BlockConfig } from '@/blocks/types'
|
||||
|
||||
export class Executor {
|
||||
constructor(
|
||||
@@ -209,7 +212,93 @@ export class Executor {
|
||||
block: SerializedBlock,
|
||||
inputs: Record<string, any>,
|
||||
context: ExecutionContext
|
||||
): Promise<BlockOutput> {
|
||||
): Promise<{ response: Record<string, any> }> {
|
||||
// Special handling for agent blocks that use providers
|
||||
if (block.metadata?.type === 'agent') {
|
||||
const model = inputs.model || 'gpt-4o'
|
||||
const providerId = model.startsWith('gpt') || model.startsWith('o1')
|
||||
? 'openai'
|
||||
: model.startsWith('claude')
|
||||
? 'anthropic'
|
||||
: model.startsWith('gemini')
|
||||
? 'google'
|
||||
: model.startsWith('grok')
|
||||
? 'xai'
|
||||
: 'deepseek'
|
||||
|
||||
// Format tools if they exist
|
||||
const tools = Array.isArray(inputs.tools) ? inputs.tools.map((tool: any) => {
|
||||
// Get the tool ID from the block type
|
||||
const block = getAllBlocks().find((b: BlockConfig) => b.type === tool.type)
|
||||
const toolId = block?.tools.access[0]
|
||||
if (!toolId) return null
|
||||
|
||||
// Get the tool configuration
|
||||
const toolConfig = getTool(toolId)
|
||||
if (!toolConfig) return null
|
||||
|
||||
// Return the tool configuration with parameters
|
||||
return {
|
||||
id: toolConfig.id,
|
||||
name: toolConfig.name,
|
||||
description: toolConfig.description,
|
||||
// Store the actual parameters from the tool input
|
||||
params: tool.params || {},
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: Object.entries(toolConfig.params).reduce((acc, [key, config]) => ({
|
||||
...acc,
|
||||
[key]: {
|
||||
type: config.type === 'json' ? 'object' : config.type,
|
||||
description: config.description || '',
|
||||
...(key in (tool.params || {}) && { default: tool.params[key] })
|
||||
}
|
||||
}), {}),
|
||||
required: Object.entries(toolConfig.params)
|
||||
.filter(([_, config]) => config.required)
|
||||
.map(([key]) => key)
|
||||
}
|
||||
}
|
||||
}).filter((t): t is NonNullable<typeof t> => t !== null) : []
|
||||
|
||||
const requestPayload = {
|
||||
model,
|
||||
systemPrompt: inputs.systemPrompt,
|
||||
context: inputs.context,
|
||||
tools: tools.length > 0 ? tools : undefined,
|
||||
temperature: inputs.temperature,
|
||||
maxTokens: inputs.maxTokens,
|
||||
apiKey: inputs.apiKey
|
||||
}
|
||||
|
||||
// Log the request payload for debugging
|
||||
console.log('Provider Request:', {
|
||||
providerId,
|
||||
model,
|
||||
tools: requestPayload.tools,
|
||||
})
|
||||
|
||||
const response = await executeProviderRequest(providerId, requestPayload)
|
||||
|
||||
// Return the actual response values
|
||||
return {
|
||||
response: {
|
||||
content: response.content,
|
||||
model: response.model,
|
||||
tokens: response.tokens || {
|
||||
prompt: 0,
|
||||
completion: 0,
|
||||
total: 0
|
||||
},
|
||||
toolCalls: {
|
||||
list: response.toolCalls || [],
|
||||
count: response.toolCalls?.length || 0
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Regular tool execution for non-agent blocks
|
||||
const toolId = block.config.tool
|
||||
if (!toolId) {
|
||||
throw new Error(`Block "${block.id}" does not specify a tool`)
|
||||
|
||||
115
providers/anthropic/index.ts
Normal file
115
providers/anthropic/index.ts
Normal file
@@ -0,0 +1,115 @@
|
||||
import { ProviderConfig, FunctionCallResponse, ProviderToolConfig, ProviderRequest, Message } from '../types'
|
||||
import { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const anthropicProvider: ProviderConfig = {
|
||||
id: 'anthropic',
|
||||
name: 'Anthropic',
|
||||
description: 'Anthropic\'s Claude models',
|
||||
version: '1.0.0',
|
||||
models: ['claude-3-5-sonnet-20241022'],
|
||||
defaultModel: 'claude-3-5-sonnet-20241022',
|
||||
|
||||
baseUrl: 'https://api.anthropic.com/v1/messages',
|
||||
headers: (apiKey: string) => ({
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': apiKey,
|
||||
'anthropic-version': '2023-06-01'
|
||||
}),
|
||||
|
||||
createRequest: (request: ProviderRequest, functions?: any) => ({
|
||||
model: request.model || anthropicProvider.defaultModel,
|
||||
messages: [
|
||||
...(request.context ? [{ role: 'user', content: request.context }] : [])
|
||||
],
|
||||
system: request.systemPrompt,
|
||||
temperature: request.temperature,
|
||||
max_tokens: request.maxTokens,
|
||||
...(functions && {
|
||||
tools: functions
|
||||
})
|
||||
}),
|
||||
|
||||
extractResponse: (response: any) => {
|
||||
const data = response.output || response
|
||||
const textContent = data.content?.find((item: any) => item.type === 'text')
|
||||
|
||||
return {
|
||||
content: textContent?.text || '',
|
||||
tokens: {
|
||||
prompt: data.usage?.input_tokens,
|
||||
completion: data.usage?.output_tokens,
|
||||
total: data.usage?.input_tokens + data.usage?.output_tokens
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
handleToolCall: (response: any) => {
|
||||
const data = response.output || response
|
||||
const hasToolUse = data.content?.some((item: any) => item.type === 'tool_use')
|
||||
|
||||
if (!hasToolUse) {
|
||||
const textContent = data.content?.find((item: any) => item.type === 'text')
|
||||
return {
|
||||
hasFunctionCall: false,
|
||||
content: textContent?.text || ''
|
||||
}
|
||||
}
|
||||
|
||||
return {
|
||||
hasFunctionCall: true
|
||||
}
|
||||
},
|
||||
|
||||
createToolCallMessage: (functionCall: FunctionCallResponse, result: any): Message => ({
|
||||
role: 'assistant',
|
||||
content: [
|
||||
{
|
||||
type: 'tool_use',
|
||||
name: functionCall.name,
|
||||
input: functionCall.arguments
|
||||
}
|
||||
]
|
||||
}),
|
||||
|
||||
transformToolsToFunctions: (tools: ProviderToolConfig[]) => {
|
||||
return tools.map(tool => ({
|
||||
type: 'function',
|
||||
name: tool.id,
|
||||
description: tool.description,
|
||||
parameters: {
|
||||
type: 'object',
|
||||
properties: Object.entries(tool.params).reduce((acc, [key, config]) => {
|
||||
acc[key] = {
|
||||
type: config.type,
|
||||
description: config.description,
|
||||
...(config.default && { default: config.default })
|
||||
}
|
||||
return acc
|
||||
}, {} as Record<string, any>),
|
||||
required: Object.entries(tool.params)
|
||||
.filter(([_, config]) => config.required)
|
||||
.map(([key]) => key)
|
||||
}
|
||||
}))
|
||||
},
|
||||
|
||||
transformFunctionCallResponse: (response: any): FunctionCallResponse => {
|
||||
const content = response.output ? response.output.content : response.content
|
||||
|
||||
if (!content || !Array.isArray(content)) {
|
||||
throw new Error('Invalid response format: content is missing or not an array')
|
||||
}
|
||||
|
||||
const toolUse = content.find(item => item.type === 'tool_use')
|
||||
if (!toolUse) {
|
||||
throw new Error('No tool use found in response')
|
||||
}
|
||||
|
||||
return {
|
||||
name: toolUse.name,
|
||||
arguments: typeof toolUse.input === 'string'
|
||||
? JSON.parse(toolUse.input)
|
||||
: toolUse.input
|
||||
}
|
||||
}
|
||||
}
|
||||
60
providers/openai/index.ts
Normal file
60
providers/openai/index.ts
Normal file
@@ -0,0 +1,60 @@
|
||||
import { ProviderConfig, FunctionCallResponse, ProviderToolConfig } from '../types'
|
||||
import { ToolConfig } from '@/tools/types'
|
||||
|
||||
export const openaiProvider: ProviderConfig = {
|
||||
id: 'openai',
|
||||
name: 'OpenAI',
|
||||
description: 'OpenAI\'s GPT models',
|
||||
version: '1.0.0',
|
||||
models: ['gpt-4o', 'o1', 'o1-mini'],
|
||||
defaultModel: 'gpt-4o',
|
||||
|
||||
baseUrl: 'https://api.openai.com/v1/chat/completions',
|
||||
headers: (apiKey: string) => ({
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${apiKey}`
|
||||
}),
|
||||
|
||||
transformToolsToFunctions: (tools: ProviderToolConfig[]) => {
|
||||
if (!tools || tools.length === 0) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
return tools.map(tool => ({
|
||||
name: tool.id,
|
||||
description: tool.description || '',
|
||||
parameters: {
|
||||
...tool.parameters,
|
||||
properties: Object.entries(tool.parameters.properties).reduce((acc, [key, value]) => ({
|
||||
...acc,
|
||||
[key]: {
|
||||
...value,
|
||||
...(key in tool.params && { default: tool.params[key] })
|
||||
}
|
||||
}), {})
|
||||
}
|
||||
}))
|
||||
},
|
||||
|
||||
transformFunctionCallResponse: (response: any, tools?: ProviderToolConfig[]): FunctionCallResponse => {
|
||||
const functionCall = response.choices?.[0]?.message?.function_call
|
||||
if (!functionCall) {
|
||||
throw new Error('No function call found in response')
|
||||
}
|
||||
|
||||
const args = typeof functionCall.arguments === 'string'
|
||||
? JSON.parse(functionCall.arguments)
|
||||
: functionCall.arguments
|
||||
|
||||
const tool = tools?.find(t => t.id === functionCall.name)
|
||||
const toolParams = tool?.params || {}
|
||||
|
||||
return {
|
||||
name: functionCall.name,
|
||||
arguments: {
|
||||
...toolParams, // First spread the stored params to ensure they're used as defaults
|
||||
...args // Then spread any overrides from the function call
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
215
providers/service.ts
Normal file
215
providers/service.ts
Normal file
@@ -0,0 +1,215 @@
|
||||
import { ProviderConfig, ProviderRequest, ProviderResponse, Message } from './types'
|
||||
import { openaiProvider } from './openai'
|
||||
import { anthropicProvider } from './anthropic'
|
||||
import { ToolConfig } from '@/tools/types'
|
||||
import { getTool, executeTool } from '@/tools'
|
||||
|
||||
// Register providers
|
||||
const providers: Record<string, ProviderConfig> = {
|
||||
openai: openaiProvider,
|
||||
anthropic: anthropicProvider,
|
||||
// Add other providers here as they're implemented
|
||||
}
|
||||
|
||||
export async function executeProviderRequest(
|
||||
providerId: string,
|
||||
request: ProviderRequest
|
||||
): Promise<ProviderResponse> {
|
||||
const provider = providers[providerId]
|
||||
if (!provider) {
|
||||
throw new Error(`Provider not found: ${providerId}`)
|
||||
}
|
||||
|
||||
// Only transform tools if they are provided and non-empty
|
||||
const functions = request.tools && request.tools.length > 0
|
||||
? provider.transformToolsToFunctions(request.tools)
|
||||
: undefined
|
||||
|
||||
// Base payload that's common across providers
|
||||
const basePayload = {
|
||||
model: request.model || provider.defaultModel,
|
||||
messages: [
|
||||
{ role: 'system' as const, content: request.systemPrompt },
|
||||
...(request.context ? [{ role: 'user' as const, content: request.context }] : [])
|
||||
] as Message[],
|
||||
temperature: request.temperature,
|
||||
max_tokens: request.maxTokens
|
||||
}
|
||||
|
||||
// Provider-specific payload adjustments
|
||||
let payload
|
||||
switch (providerId) {
|
||||
case 'openai':
|
||||
payload = {
|
||||
...basePayload,
|
||||
...(functions && {
|
||||
functions,
|
||||
function_call: 'auto'
|
||||
})
|
||||
}
|
||||
break
|
||||
case 'anthropic':
|
||||
payload = {
|
||||
...basePayload,
|
||||
system: request.systemPrompt,
|
||||
messages: request.context ? [{ role: 'user', content: request.context }] : [],
|
||||
...(functions && {
|
||||
tools: functions
|
||||
})
|
||||
}
|
||||
break
|
||||
default:
|
||||
payload = {
|
||||
...basePayload,
|
||||
...(functions && { functions })
|
||||
}
|
||||
}
|
||||
|
||||
// Make the API request through the proxy
|
||||
const response = await fetch('/api/proxy', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
toolId: `${providerId}/chat`,
|
||||
params: {
|
||||
...payload,
|
||||
apiKey: request.apiKey
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json()
|
||||
throw new Error(error.error || 'Provider API error')
|
||||
}
|
||||
|
||||
const { output: data } = await response.json()
|
||||
|
||||
// Extract content and tokens based on provider
|
||||
let content = ''
|
||||
let tokens = undefined
|
||||
|
||||
switch (providerId) {
|
||||
case 'anthropic':
|
||||
content = data.content?.[0]?.text || ''
|
||||
tokens = {
|
||||
prompt: data.usage?.input_tokens,
|
||||
completion: data.usage?.output_tokens,
|
||||
total: data.usage?.input_tokens + data.usage?.output_tokens
|
||||
}
|
||||
break
|
||||
default:
|
||||
content = data.choices?.[0]?.message?.content || ''
|
||||
tokens = data.usage && {
|
||||
prompt: data.usage.prompt_tokens,
|
||||
completion: data.usage.completion_tokens,
|
||||
total: data.usage.total_tokens
|
||||
}
|
||||
}
|
||||
|
||||
// Check for function calls
|
||||
let toolCalls = []
|
||||
let toolResults = []
|
||||
let currentMessages = [...basePayload.messages]
|
||||
|
||||
try {
|
||||
let currentResponse = data
|
||||
let hasMoreCalls = true
|
||||
|
||||
while (hasMoreCalls) {
|
||||
const hasFunctionCall =
|
||||
(providerId === 'openai' && currentResponse.choices?.[0]?.message?.function_call) ||
|
||||
(providerId === 'anthropic' && currentResponse.content?.some((item: any) => item.type === 'function_call'))
|
||||
|
||||
if (!hasFunctionCall) {
|
||||
// No more function calls, use the content from the current response
|
||||
content = currentResponse.choices?.[0]?.message?.content || ''
|
||||
hasMoreCalls = false
|
||||
continue
|
||||
}
|
||||
|
||||
const functionCall = provider.transformFunctionCallResponse(currentResponse, request.tools)
|
||||
if (!functionCall) {
|
||||
hasMoreCalls = false
|
||||
continue
|
||||
}
|
||||
|
||||
// Execute the tool
|
||||
const tool = getTool(functionCall.name)
|
||||
if (!tool) {
|
||||
throw new Error(`Tool not found: ${functionCall.name}`)
|
||||
}
|
||||
|
||||
const result = await executeTool(functionCall.name, functionCall.arguments)
|
||||
if (result.success) {
|
||||
toolResults.push(result.output)
|
||||
toolCalls.push(functionCall)
|
||||
|
||||
// Add the assistant's function call and the function result to the message history
|
||||
currentMessages.push({
|
||||
role: 'assistant',
|
||||
content: null,
|
||||
function_call: {
|
||||
name: functionCall.name,
|
||||
arguments: JSON.stringify(functionCall.arguments)
|
||||
}
|
||||
})
|
||||
currentMessages.push({
|
||||
role: 'function',
|
||||
name: functionCall.name,
|
||||
content: JSON.stringify(result.output)
|
||||
})
|
||||
|
||||
// Make the next call through the proxy
|
||||
const nextResponse = await fetch('/api/proxy', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
toolId: `${providerId}/chat`,
|
||||
params: {
|
||||
...basePayload,
|
||||
messages: currentMessages,
|
||||
...(functions && { functions, function_call: 'auto' }),
|
||||
apiKey: request.apiKey
|
||||
}
|
||||
})
|
||||
})
|
||||
|
||||
if (!nextResponse.ok) {
|
||||
const error = await nextResponse.json()
|
||||
throw new Error(error.error || 'Provider API error')
|
||||
}
|
||||
|
||||
const { output: nextData } = await nextResponse.json()
|
||||
currentResponse = nextData
|
||||
|
||||
// Update tokens
|
||||
if (nextData.usage) {
|
||||
tokens = {
|
||||
prompt: (tokens?.prompt || 0) + nextData.usage.prompt_tokens,
|
||||
completion: (tokens?.completion || 0) + nextData.usage.completion_tokens,
|
||||
total: (tokens?.total || 0) + nextData.usage.total_tokens
|
||||
}
|
||||
}
|
||||
} else {
|
||||
hasMoreCalls = false
|
||||
}
|
||||
}
|
||||
} catch (error: any) {
|
||||
console.error('Error executing tool:', error)
|
||||
throw error
|
||||
}
|
||||
|
||||
return {
|
||||
content,
|
||||
model: data.model,
|
||||
tokens,
|
||||
toolCalls: toolCalls.length > 0 ? toolCalls : undefined,
|
||||
toolResults: toolResults.length > 0 ? toolResults : undefined
|
||||
}
|
||||
}
|
||||
|
||||
74
providers/types.ts
Normal file
74
providers/types.ts
Normal file
@@ -0,0 +1,74 @@
|
||||
import { ToolConfig } from '@/tools/types'
|
||||
|
||||
export interface ProviderConfig {
|
||||
id: string
|
||||
name: string
|
||||
description: string
|
||||
version: string
|
||||
models: string[]
|
||||
defaultModel: string
|
||||
|
||||
// Provider-specific configuration
|
||||
baseUrl: string
|
||||
headers: (apiKey: string) => Record<string, string>
|
||||
|
||||
// Tool calling support
|
||||
transformToolsToFunctions: (tools: ProviderToolConfig[]) => any
|
||||
transformFunctionCallResponse: (response: any, tools?: ProviderToolConfig[]) => FunctionCallResponse
|
||||
|
||||
// Internal state for tool name mapping
|
||||
_toolNameMapping?: Map<string, string>
|
||||
}
|
||||
|
||||
export interface FunctionCallResponse {
|
||||
name: string
|
||||
arguments: Record<string, any>
|
||||
}
|
||||
|
||||
export interface ProviderResponse {
|
||||
content: string
|
||||
model: string
|
||||
tokens?: {
|
||||
prompt?: number
|
||||
completion?: number
|
||||
total?: number
|
||||
}
|
||||
toolCalls?: FunctionCallResponse[]
|
||||
toolResults?: any[]
|
||||
}
|
||||
|
||||
export interface ProviderToolConfig {
|
||||
id: string
|
||||
name: string
|
||||
description: string
|
||||
params: Record<string, any>
|
||||
parameters: {
|
||||
type: string
|
||||
properties: Record<string, any>
|
||||
required: string[]
|
||||
}
|
||||
}
|
||||
|
||||
export interface Message {
|
||||
role: 'system' | 'user' | 'assistant' | 'function'
|
||||
content: string | null
|
||||
name?: string
|
||||
function_call?: {
|
||||
name: string
|
||||
arguments: string
|
||||
}
|
||||
}
|
||||
|
||||
export interface ProviderRequest {
|
||||
model: string
|
||||
systemPrompt: string
|
||||
context?: string
|
||||
tools?: ProviderToolConfig[]
|
||||
temperature?: number
|
||||
maxTokens?: number
|
||||
apiKey: string
|
||||
messages?: Message[]
|
||||
}
|
||||
|
||||
// Map of provider IDs to their configurations
|
||||
export const providers: Record<string, ProviderConfig> = {}
|
||||
@@ -20,7 +20,7 @@ export interface ChatResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const chatTool: ToolConfig<ChatParams, ChatResponse> = {
|
||||
id: 'anthropic.chat',
|
||||
id: 'anthropic_chat',
|
||||
name: 'Anthropic Chat',
|
||||
description: 'Chat with Anthropic Claude models',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -16,7 +16,7 @@ export interface VisionResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const visionTool: ToolConfig<VisionParams, VisionResponse> = {
|
||||
id: 'crewai.vision',
|
||||
id: 'crewai_vision',
|
||||
name: 'Vision Analysis',
|
||||
description: 'Analyze images using vision models',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -23,7 +23,7 @@ export interface ChatResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const chatTool: ToolConfig<ChatParams, ChatResponse> = {
|
||||
id: 'deepseek.chat',
|
||||
id: 'deepseek_chat',
|
||||
name: 'DeepSeek Chat',
|
||||
description: 'Chat with DeepSeek-v3 model',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -22,7 +22,7 @@ export interface ChatResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const reasonerTool: ToolConfig<ChatParams, ChatResponse> = {
|
||||
id: 'deepseek.reasoner',
|
||||
id: 'deepseek_reasoner',
|
||||
name: 'DeepSeek Reasoner',
|
||||
description: 'Chat with DeepSeek-R1 reasoning model',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -32,7 +32,7 @@ export interface ScrapeResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const scrapeTool: ToolConfig<ScrapeParams, ScrapeResponse> = {
|
||||
id: 'firecrawl.scrape',
|
||||
id: 'firecrawl_scrape',
|
||||
name: 'Firecrawl Website Scraper',
|
||||
description: 'Extract clean content from any webpage in markdown format',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -13,7 +13,7 @@ export interface CodeExecutionOutput extends ToolResponse {
|
||||
}
|
||||
|
||||
export const functionExecuteTool: ToolConfig<CodeExecutionInput, CodeExecutionOutput> = {
|
||||
id: 'function.execute',
|
||||
id: 'function_execute',
|
||||
name: 'Function Execute',
|
||||
description: 'Execute code in a sandboxed environment',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -18,7 +18,7 @@ export interface RepoInfoResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const repoInfoTool: ToolConfig<RepoInfoParams, RepoInfoResponse> = {
|
||||
id: 'github.repoinfo',
|
||||
id: 'github_repoinfo',
|
||||
name: 'GitHub Repository Info',
|
||||
description: 'Fetch detailed information about a GitHub repository',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -21,7 +21,7 @@ export interface ChatResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const chatTool: ToolConfig<ChatParams, ChatResponse> = {
|
||||
id: 'google.chat',
|
||||
id: 'google_chat',
|
||||
name: 'Google Chat',
|
||||
description: 'Chat with Google Gemini models',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -21,7 +21,7 @@ export interface RequestResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const requestTool: ToolConfig<RequestParams, RequestResponse> = {
|
||||
id: 'http.request',
|
||||
id: 'http_request',
|
||||
name: 'HTTP Request',
|
||||
description: 'Make HTTP requests to any endpoint with support for CRUD operations',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -27,7 +27,7 @@ export interface ContactsResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const contactsTool: ToolConfig<ContactsParams, ContactsResponse> = {
|
||||
id: 'hubspot.contacts',
|
||||
id: 'hubspot_contacts',
|
||||
name: 'HubSpot Contacts',
|
||||
description: 'Manage HubSpot contacts - create, search, and update contact records',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -18,29 +18,29 @@ import { repoInfoTool } from './github/repo'
|
||||
// Registry of all available tools
|
||||
export const tools: Record<string, ToolConfig> = {
|
||||
// AI Models
|
||||
'openai.chat': openAIChat,
|
||||
'anthropic.chat': anthropicChat,
|
||||
'google.chat': googleChat,
|
||||
'xai.chat': xaiChat,
|
||||
'deepseek.chat': deepseekChat,
|
||||
'deepseek.reasoner': deepseekReasoner,
|
||||
'openai_chat': openAIChat,
|
||||
'anthropic_chat': anthropicChat,
|
||||
'google_chat': googleChat,
|
||||
'xai_chat': xaiChat,
|
||||
'deepseek_chat': deepseekChat,
|
||||
'deepseek_reasoner': deepseekReasoner,
|
||||
// HTTP
|
||||
'http.request': httpRequest,
|
||||
'http_request': httpRequest,
|
||||
// CRM Tools
|
||||
'hubspot.contacts': hubspotContacts,
|
||||
'salesforce.opportunities': salesforceOpportunities,
|
||||
'hubspot_contacts': hubspotContacts,
|
||||
'salesforce_opportunities': salesforceOpportunities,
|
||||
// Function Tools
|
||||
'function.execute': functionExecute,
|
||||
'function_execute': functionExecute,
|
||||
// CrewAI Tools
|
||||
'crewai.vision': crewAIVision,
|
||||
'crewai_vision': crewAIVision,
|
||||
// Firecrawl Tools
|
||||
'firecrawl.scrape': scrapeTool,
|
||||
'firecrawl_scrape': scrapeTool,
|
||||
// Jina Tools
|
||||
'jina.readurl': readUrlTool,
|
||||
'jina_readurl': readUrlTool,
|
||||
// Slack Tools
|
||||
'slack.message': slackMessageTool,
|
||||
'slack_message': slackMessageTool,
|
||||
// GitHub Tools
|
||||
'github.repoinfo': repoInfoTool
|
||||
'github_repoinfo': repoInfoTool
|
||||
}
|
||||
|
||||
// Get a tool by its ID
|
||||
|
||||
@@ -16,7 +16,7 @@ export interface ReadUrlResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const readUrlTool: ToolConfig<ReadUrlParams, ReadUrlResponse> = {
|
||||
id: 'jina.readurl',
|
||||
id: 'jina_readurl',
|
||||
name: 'Jina Reader',
|
||||
description: 'Convert any URL to LLM-friendly text using Jina AI Reader',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -24,7 +24,7 @@ export interface ChatResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const chatTool: ToolConfig<ChatParams, ChatResponse> = {
|
||||
id: 'openai.chat',
|
||||
id: 'openai_chat',
|
||||
name: 'OpenAI Chat',
|
||||
description: 'Chat with OpenAI models',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -28,7 +28,7 @@ export interface OpportunityResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const opportunitiesTool: ToolConfig<OpportunityParams, OpportunityResponse> = {
|
||||
id: 'salesforce.opportunities',
|
||||
id: 'salesforce_opportunities',
|
||||
name: 'Salesforce Opportunities',
|
||||
description: 'Manage Salesforce opportunities - create, query, and update opportunity records',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -14,7 +14,7 @@ export interface SlackMessageResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const slackMessageTool: ToolConfig<SlackMessageParams, SlackMessageResponse> = {
|
||||
id: 'slack.message',
|
||||
id: 'slack_message',
|
||||
name: 'Slack Message',
|
||||
description: 'Send a message to a Slack channel',
|
||||
version: '1.0.0',
|
||||
|
||||
@@ -22,7 +22,7 @@ export interface ChatResponse extends ToolResponse {
|
||||
}
|
||||
|
||||
export const chatTool: ToolConfig<ChatParams, ChatResponse> = {
|
||||
id: 'xai.chat',
|
||||
id: 'xai_chat',
|
||||
name: 'xAI Chat',
|
||||
description: 'Chat with xAI models',
|
||||
version: '1.0.0',
|
||||
|
||||
Reference in New Issue
Block a user