mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-09 15:07:55 -05:00
feat(providers): removed providers from tools directory, added cerebras sdk
This commit is contained in:
@@ -1,4 +1,5 @@
|
||||
import { NextResponse } from 'next/server'
|
||||
import { MODEL_PROVIDERS } from '@/providers/consts'
|
||||
import { getProvider } from '@/providers/registry'
|
||||
import { getTool } from '@/tools'
|
||||
|
||||
@@ -31,6 +32,37 @@ export async function POST(request: Request) {
|
||||
})
|
||||
}
|
||||
|
||||
// Check if this is an LLM provider tool (e.g., openai_chat, anthropic_chat)
|
||||
const providerPrefix = toolId.split('_')[0]
|
||||
if (Object.values(MODEL_PROVIDERS).includes(providerPrefix)) {
|
||||
// Redirect to the provider system
|
||||
const providerInstance = getProvider(providerPrefix)
|
||||
if (!providerInstance) {
|
||||
throw new Error(`Provider not found for tool: ${toolId}`)
|
||||
}
|
||||
|
||||
const { apiKey, ...restParams } = params
|
||||
if (!apiKey) {
|
||||
throw new Error('API key is required')
|
||||
}
|
||||
|
||||
const response = await fetch(providerInstance.baseUrl, {
|
||||
method: 'POST',
|
||||
headers: providerInstance.headers(apiKey),
|
||||
body: JSON.stringify(restParams),
|
||||
})
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json()
|
||||
throw new Error(error.error?.message || `${toolId} API error`)
|
||||
}
|
||||
|
||||
return NextResponse.json({
|
||||
success: true,
|
||||
output: await response.json(),
|
||||
})
|
||||
}
|
||||
|
||||
// Handle regular tool requests
|
||||
const tool = getTool(toolId)
|
||||
if (!tool) {
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { AgentIcon } from '@/components/icons'
|
||||
import { MODEL_PROVIDERS } from '@/providers/consts'
|
||||
import { ToolResponse } from '@/tools/types'
|
||||
import { MODEL_TOOLS, ModelType } from '../consts'
|
||||
import { BlockConfig } from '../types'
|
||||
|
||||
interface AgentResponse extends ToolResponse {
|
||||
@@ -49,7 +49,7 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
|
||||
title: 'Model',
|
||||
type: 'dropdown',
|
||||
layout: 'half',
|
||||
options: Object.keys(MODEL_TOOLS),
|
||||
options: Object.keys(MODEL_PROVIDERS),
|
||||
},
|
||||
{
|
||||
id: 'temperature',
|
||||
@@ -96,7 +96,7 @@ export const AgentBlock: BlockConfig<AgentResponse> = {
|
||||
if (!model) {
|
||||
throw new Error('No model selected')
|
||||
}
|
||||
const tool = MODEL_TOOLS[model as ModelType]
|
||||
const tool = MODEL_PROVIDERS[model]
|
||||
if (!tool) {
|
||||
throw new Error(`Invalid model selected: ${model}`)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { ChartBarIcon } from '@/components/icons'
|
||||
import { MODEL_PROVIDERS } from '@/providers/consts'
|
||||
import { ProviderId } from '@/providers/registry'
|
||||
import { ToolResponse } from '@/tools/types'
|
||||
import { MODEL_TOOLS, ModelType } from '../consts'
|
||||
import { BlockConfig, ParamType } from '../types'
|
||||
|
||||
interface Metric {
|
||||
@@ -81,7 +82,7 @@ export const EvaluatorBlock: BlockConfig<EvaluatorResponse> = {
|
||||
title: 'Model',
|
||||
type: 'dropdown',
|
||||
layout: 'half',
|
||||
options: Object.keys(MODEL_TOOLS),
|
||||
options: Object.keys(MODEL_PROVIDERS),
|
||||
},
|
||||
{
|
||||
id: 'apiKey',
|
||||
@@ -125,7 +126,7 @@ export const EvaluatorBlock: BlockConfig<EvaluatorResponse> = {
|
||||
if (!model) {
|
||||
throw new Error('No model selected')
|
||||
}
|
||||
const tool = MODEL_TOOLS[model as ModelType]
|
||||
const tool = MODEL_PROVIDERS[model as ProviderId]
|
||||
if (!tool) {
|
||||
throw new Error(`Invalid model selected: ${model}`)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,7 @@
|
||||
import { ConnectIcon } from '@/components/icons'
|
||||
import { MODEL_PROVIDERS } from '@/providers/consts'
|
||||
import { ProviderId } from '@/providers/registry'
|
||||
import { ToolResponse } from '@/tools/types'
|
||||
import { MODEL_TOOLS, ModelType } from '../consts'
|
||||
import { BlockConfig } from '../types'
|
||||
|
||||
interface RouterResponse extends ToolResponse {
|
||||
@@ -105,7 +106,7 @@ export const RouterBlock: BlockConfig<RouterResponse> = {
|
||||
title: 'Model',
|
||||
type: 'dropdown',
|
||||
layout: 'half',
|
||||
options: Object.keys(MODEL_TOOLS),
|
||||
options: Object.keys(MODEL_PROVIDERS),
|
||||
},
|
||||
{
|
||||
id: 'apiKey',
|
||||
@@ -142,7 +143,7 @@ export const RouterBlock: BlockConfig<RouterResponse> = {
|
||||
if (!model) {
|
||||
throw new Error('No model selected')
|
||||
}
|
||||
const tool = MODEL_TOOLS[model as ModelType]
|
||||
const tool = MODEL_PROVIDERS[model as ProviderId]
|
||||
if (!tool) {
|
||||
throw new Error(`Invalid model selected: ${model}`)
|
||||
}
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
import { TranslateIcon } from '@/components/icons'
|
||||
import { ChatResponse } from '@/tools/openai/chat'
|
||||
import { MODEL_TOOLS, ModelType } from '../consts'
|
||||
import { MODEL_PROVIDERS } from '@/providers/consts'
|
||||
import { ProviderId } from '@/providers/registry'
|
||||
import { BlockConfig } from '../types'
|
||||
|
||||
const getTranslationPrompt = (
|
||||
@@ -14,7 +14,7 @@ const getTranslationPrompt = (
|
||||
|
||||
Only return the translated text without any explanations or notes. The translation should be natural and fluent in ${targetLanguage || 'English'}.`
|
||||
|
||||
export const TranslateBlock: BlockConfig<ChatResponse> = {
|
||||
export const TranslateBlock: BlockConfig = {
|
||||
type: 'translate',
|
||||
name: 'Translate',
|
||||
description: 'Translate text to any language',
|
||||
@@ -41,7 +41,7 @@ export const TranslateBlock: BlockConfig<ChatResponse> = {
|
||||
title: 'Model',
|
||||
type: 'dropdown',
|
||||
layout: 'half',
|
||||
options: Object.keys(MODEL_TOOLS),
|
||||
options: Object.keys(MODEL_PROVIDERS),
|
||||
},
|
||||
{
|
||||
id: 'apiKey',
|
||||
@@ -73,7 +73,7 @@ export const TranslateBlock: BlockConfig<ChatResponse> = {
|
||||
throw new Error('No model selected')
|
||||
}
|
||||
|
||||
const tool = MODEL_TOOLS[model as ModelType]
|
||||
const tool = MODEL_PROVIDERS[model as ProviderId]
|
||||
|
||||
if (!tool) {
|
||||
throw new Error(`Invalid model selected: ${model}`)
|
||||
|
||||
@@ -1,13 +1,14 @@
|
||||
export const MODEL_TOOLS = {
|
||||
'gpt-4o': 'openai_chat',
|
||||
o1: 'openai_chat',
|
||||
'o3-mini': 'openai_chat',
|
||||
'deepseek-v3': 'deepseek_chat',
|
||||
'deepseek-r1': 'deepseek_reasoner',
|
||||
'claude-3-7-sonnet-20250219': 'anthropic_chat',
|
||||
'gemini-2.0-flash': 'google_chat',
|
||||
'grok-2-latest': 'xai_chat',
|
||||
} as const
|
||||
// export const MODEL_TOOLS = {
|
||||
// 'gpt-4o': 'openai_chat',
|
||||
// o1: 'openai_chat',
|
||||
// 'o3-mini': 'openai_chat',
|
||||
// 'deepseek-v3': 'deepseek_chat',
|
||||
// 'deepseek-r1': 'deepseek_reasoner',
|
||||
// 'claude-3-7-sonnet-20250219': 'anthropic_chat',
|
||||
// 'gemini-2.0-flash': 'google_chat',
|
||||
// 'grok-2-latest': 'xai_chat',
|
||||
// 'llama-3.3-70b': 'cerebras_chat',
|
||||
// } as const
|
||||
|
||||
export type ModelType = keyof typeof MODEL_TOOLS
|
||||
export type ToolType = (typeof MODEL_TOOLS)[ModelType]
|
||||
// export type ModelType = keyof typeof MODEL_TOOLS
|
||||
// export type ToolType = (typeof MODEL_TOOLS)[ModelType]
|
||||
|
||||
@@ -103,6 +103,9 @@ export class AgentBlockHandler implements BlockHandler {
|
||||
.filter((t): t is NonNullable<typeof t> => t !== null)
|
||||
: []
|
||||
|
||||
// Add local_execution: true for Cerebras provider
|
||||
const additionalParams = providerId === 'cerebras' ? { local_execution: true } : {}
|
||||
|
||||
const response = await executeProviderRequest(providerId, {
|
||||
model,
|
||||
systemPrompt: inputs.systemPrompt,
|
||||
@@ -114,6 +117,7 @@ export class AgentBlockHandler implements BlockHandler {
|
||||
maxTokens: inputs.maxTokens,
|
||||
apiKey: inputs.apiKey,
|
||||
responseFormat,
|
||||
...additionalParams,
|
||||
})
|
||||
|
||||
// Return structured or standard response based on responseFormat
|
||||
|
||||
31
package-lock.json
generated
31
package-lock.json
generated
@@ -8,6 +8,7 @@
|
||||
"name": "sim",
|
||||
"version": "0.1.0",
|
||||
"dependencies": {
|
||||
"@cerebras/cerebras_cloud_sdk": "^1.23.0",
|
||||
"@radix-ui/react-alert-dialog": "^1.1.5",
|
||||
"@radix-ui/react-checkbox": "^1.1.3",
|
||||
"@radix-ui/react-dialog": "^1.1.5",
|
||||
@@ -646,6 +647,36 @@
|
||||
"resolved": "https://registry.npmjs.org/@better-fetch/fetch/-/fetch-1.1.12.tgz",
|
||||
"integrity": "sha512-B3bfloI/2UBQWIATRN6qmlORrvx3Mp0kkNjmXLv0b+DtbtR+pP4/I5kQA/rDUv+OReLywCCldf6co4LdDmh8JA=="
|
||||
},
|
||||
"node_modules/@cerebras/cerebras_cloud_sdk": {
|
||||
"version": "1.23.0",
|
||||
"resolved": "https://registry.npmjs.org/@cerebras/cerebras_cloud_sdk/-/cerebras_cloud_sdk-1.23.0.tgz",
|
||||
"integrity": "sha512-1krbmU4nTbJICUbcJGQGGo+MtB0nzHx/jwW24ZhoBzuC5QT8H/WzNjLdKtvdf3TB8GS1AtdWUkUHNJf1EZfvJA==",
|
||||
"license": "Apache-2.0",
|
||||
"dependencies": {
|
||||
"@types/node": "^18.11.18",
|
||||
"@types/node-fetch": "^2.6.4",
|
||||
"abort-controller": "^3.0.0",
|
||||
"agentkeepalive": "^4.2.1",
|
||||
"form-data-encoder": "1.7.2",
|
||||
"formdata-node": "^4.3.2",
|
||||
"node-fetch": "^2.6.7"
|
||||
}
|
||||
},
|
||||
"node_modules/@cerebras/cerebras_cloud_sdk/node_modules/@types/node": {
|
||||
"version": "18.19.76",
|
||||
"resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.76.tgz",
|
||||
"integrity": "sha512-yvR7Q9LdPz2vGpmpJX5LolrgRdWvB67MJKDPSgIIzpFbaf9a1j/f5DnLp5VDyHGMR0QZHlTr1afsD87QCXFHKw==",
|
||||
"license": "MIT",
|
||||
"dependencies": {
|
||||
"undici-types": "~5.26.4"
|
||||
}
|
||||
},
|
||||
"node_modules/@cerebras/cerebras_cloud_sdk/node_modules/undici-types": {
|
||||
"version": "5.26.5",
|
||||
"resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz",
|
||||
"integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==",
|
||||
"license": "MIT"
|
||||
},
|
||||
"node_modules/@drizzle-team/brocli": {
|
||||
"version": "0.10.2",
|
||||
"resolved": "https://registry.npmjs.org/@drizzle-team/brocli/-/brocli-0.10.2.tgz",
|
||||
|
||||
@@ -17,6 +17,7 @@
|
||||
"test:coverage": "jest --coverage"
|
||||
},
|
||||
"dependencies": {
|
||||
"@cerebras/cerebras_cloud_sdk": "^1.23.0",
|
||||
"@radix-ui/react-alert-dialog": "^1.1.5",
|
||||
"@radix-ui/react-checkbox": "^1.1.3",
|
||||
"@radix-ui/react-dialog": "^1.1.5",
|
||||
|
||||
115
providers/cerebras/index.ts
Normal file
115
providers/cerebras/index.ts
Normal file
@@ -0,0 +1,115 @@
|
||||
import { ToolConfig } from '@/tools/types'
|
||||
import { FunctionCallResponse, ProviderConfig, ProviderRequest, ProviderToolConfig } from '../types'
|
||||
|
||||
export const cerebrasProvider: ProviderConfig = {
|
||||
id: 'cerebras',
|
||||
name: 'Cerebras',
|
||||
description: "Cerebras' Llama models",
|
||||
version: '1.0.0',
|
||||
models: ['llama-3.3-70b'],
|
||||
defaultModel: 'llama-3.3-70b',
|
||||
|
||||
// Since we're using the SDK directly, we'll set these to empty values
|
||||
// They won't be used since we'll handle the execution locally
|
||||
baseUrl: '',
|
||||
headers: (apiKey: string) => ({}),
|
||||
|
||||
transformToolsToFunctions: (tools: ProviderToolConfig[]) => {
|
||||
if (!tools || tools.length === 0) {
|
||||
return undefined
|
||||
}
|
||||
|
||||
return tools.map((tool) => ({
|
||||
name: tool.id,
|
||||
description: tool.description,
|
||||
parameters: tool.parameters,
|
||||
}))
|
||||
},
|
||||
|
||||
transformFunctionCallResponse: (
|
||||
response: any,
|
||||
tools?: ProviderToolConfig[]
|
||||
): FunctionCallResponse => {
|
||||
const functionCall = response.choices?.[0]?.message?.function_call
|
||||
if (!functionCall) {
|
||||
throw new Error('No function call found in response')
|
||||
}
|
||||
|
||||
const tool = tools?.find((t) => t.id === functionCall.name)
|
||||
const toolParams = tool?.params || {}
|
||||
|
||||
return {
|
||||
name: functionCall.name,
|
||||
arguments: {
|
||||
...toolParams,
|
||||
...JSON.parse(functionCall.arguments),
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformRequest: (request: ProviderRequest, functions?: any) => {
|
||||
// Start with an empty array for all messages
|
||||
const allMessages = []
|
||||
|
||||
// Add system prompt if present
|
||||
if (request.systemPrompt) {
|
||||
allMessages.push({
|
||||
role: 'system',
|
||||
content: request.systemPrompt,
|
||||
})
|
||||
}
|
||||
|
||||
// Add context if present
|
||||
if (request.context) {
|
||||
allMessages.push({
|
||||
role: 'user',
|
||||
content: request.context,
|
||||
})
|
||||
}
|
||||
|
||||
// Add remaining messages
|
||||
if (request.messages) {
|
||||
allMessages.push(...request.messages)
|
||||
}
|
||||
|
||||
// Build the request payload
|
||||
const payload: any = {
|
||||
model: request.model || 'llama-3.3-70b',
|
||||
messages: allMessages,
|
||||
local_execution: true, // Enable local execution with the SDK
|
||||
}
|
||||
|
||||
// Add standard parameters
|
||||
if (request.temperature !== undefined) payload.temperature = request.temperature
|
||||
if (request.maxTokens !== undefined) payload.max_tokens = request.maxTokens
|
||||
|
||||
// Add function calling support
|
||||
if (functions) {
|
||||
payload.functions = functions
|
||||
payload.function_call = 'auto'
|
||||
}
|
||||
|
||||
return payload
|
||||
},
|
||||
|
||||
transformResponse: (response: any) => {
|
||||
const output = {
|
||||
content: response.choices?.[0]?.message?.content || '',
|
||||
tokens: undefined as any,
|
||||
}
|
||||
|
||||
if (response.usage) {
|
||||
output.tokens = {
|
||||
prompt: response.usage.prompt_tokens,
|
||||
completion: response.usage.completion_tokens,
|
||||
total: response.usage.total_tokens,
|
||||
}
|
||||
}
|
||||
|
||||
return output
|
||||
},
|
||||
|
||||
hasFunctionCall: (response: any) => {
|
||||
return !!response.choices?.[0]?.message?.function_call
|
||||
},
|
||||
}
|
||||
56
providers/cerebras/service.ts
Normal file
56
providers/cerebras/service.ts
Normal file
@@ -0,0 +1,56 @@
|
||||
import { Cerebras } from '@cerebras/cerebras_cloud_sdk'
|
||||
import { ProviderRequest, ProviderResponse } from '../types'
|
||||
import { cerebrasProvider } from './index'
|
||||
|
||||
// This function will be used to execute Cerebras requests locally using their SDK
|
||||
export async function executeCerebrasRequest(request: ProviderRequest): Promise<ProviderResponse> {
|
||||
try {
|
||||
const client = new Cerebras({
|
||||
apiKey: request.apiKey,
|
||||
})
|
||||
|
||||
// Transform the request using the provider's transformRequest method
|
||||
const payload = cerebrasProvider.transformRequest(request)
|
||||
|
||||
// Prepare the messages for the SDK
|
||||
const messages = payload.messages
|
||||
|
||||
// Prepare the options for the SDK
|
||||
const options = {
|
||||
temperature: payload.temperature,
|
||||
max_tokens: payload.max_tokens,
|
||||
functions: payload.functions,
|
||||
function_call: payload.function_call,
|
||||
}
|
||||
|
||||
// Execute the request using the SDK
|
||||
const response = await client.chat.completions.create({
|
||||
model: payload.model,
|
||||
messages,
|
||||
...options,
|
||||
})
|
||||
|
||||
// Transform the response using the provider's transformResponse method
|
||||
const transformedResponse = cerebrasProvider.transformResponse(response)
|
||||
|
||||
// Check for function calls
|
||||
const hasFunctionCall = cerebrasProvider.hasFunctionCall(response)
|
||||
let toolCalls = undefined
|
||||
|
||||
if (hasFunctionCall) {
|
||||
const functionCall = cerebrasProvider.transformFunctionCallResponse(response, request.tools)
|
||||
toolCalls = [functionCall]
|
||||
}
|
||||
|
||||
// Return the response in the expected format
|
||||
return {
|
||||
content: transformedResponse.content,
|
||||
model: request.model,
|
||||
tokens: transformedResponse.tokens,
|
||||
toolCalls,
|
||||
}
|
||||
} catch (error: any) {
|
||||
console.error('Error executing Cerebras request:', error)
|
||||
throw new Error(`Cerebras API error: ${error.message}`)
|
||||
}
|
||||
}
|
||||
17
providers/consts.ts
Normal file
17
providers/consts.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
import { ProviderId } from './registry'
|
||||
|
||||
/**
|
||||
* Direct mapping from model names to provider IDs
|
||||
* This replaces the need for the MODEL_TOOLS mapping in blocks/consts.ts
|
||||
*/
|
||||
export const MODEL_PROVIDERS: Record<string, ProviderId> = {
|
||||
'gpt-4o': 'openai',
|
||||
o1: 'openai',
|
||||
'o3-mini': 'openai',
|
||||
'claude-3-7-sonnet-20250219': 'anthropic',
|
||||
'gemini-2.0-flash': 'google',
|
||||
'grok-2-latest': 'xai',
|
||||
'deepseek-v3': 'deepseek',
|
||||
'deepseek-r1': 'deepseek',
|
||||
'llama-3.3-70b': 'cerebras',
|
||||
}
|
||||
@@ -1,11 +1,12 @@
|
||||
import { anthropicProvider } from './anthropic'
|
||||
import { cerebrasProvider } from './cerebras'
|
||||
import { deepseekProvider } from './deepseek'
|
||||
import { googleProvider } from './google'
|
||||
import { openaiProvider } from './openai'
|
||||
import { ProviderConfig } from './types'
|
||||
import { xAIProvider } from './xai'
|
||||
|
||||
export type ProviderId = 'openai' | 'anthropic' | 'google' | 'deepseek' | 'xai'
|
||||
export type ProviderId = 'openai' | 'anthropic' | 'google' | 'deepseek' | 'xai' | 'cerebras'
|
||||
|
||||
export const providers: Record<ProviderId, ProviderConfig> = {
|
||||
openai: openaiProvider,
|
||||
@@ -13,6 +14,7 @@ export const providers: Record<ProviderId, ProviderConfig> = {
|
||||
google: googleProvider,
|
||||
deepseek: deepseekProvider,
|
||||
xai: xAIProvider,
|
||||
cerebras: cerebrasProvider,
|
||||
}
|
||||
|
||||
export function getProvider(id: string): ProviderConfig | undefined {
|
||||
|
||||
@@ -1,4 +1,5 @@
|
||||
import { executeTool, getTool } from '@/tools'
|
||||
import { executeCerebrasRequest } from './cerebras/service'
|
||||
import { getProvider } from './registry'
|
||||
import { ProviderRequest, ProviderResponse, TokenInfo } from './types'
|
||||
import { extractAndParseJSON } from './utils'
|
||||
@@ -64,6 +65,11 @@ export async function executeProviderRequest(
|
||||
throw new Error(`Provider not found: ${providerId}`)
|
||||
}
|
||||
|
||||
// Special handling for Cerebras provider which uses SDK directly
|
||||
if (providerId === 'cerebras') {
|
||||
return executeCerebrasRequest(request)
|
||||
}
|
||||
|
||||
// If responseFormat is provided, modify the system prompt to enforce structured output
|
||||
if (request.responseFormat) {
|
||||
const structuredOutputInstructions = generateStructuredOutputInstructions(
|
||||
|
||||
@@ -1,9 +1,23 @@
|
||||
import { MODEL_TOOLS, ModelType } from '@/blocks/consts'
|
||||
import { ProviderId } from './registry'
|
||||
|
||||
/**
|
||||
* Direct mapping from model names to provider IDs
|
||||
*/
|
||||
export const MODEL_PROVIDERS: Record<string, ProviderId> = {
|
||||
'gpt-4o': 'openai',
|
||||
o1: 'openai',
|
||||
'o3-mini': 'openai',
|
||||
'claude-3-7-sonnet-20250219': 'anthropic',
|
||||
'gemini-2.0-flash': 'google',
|
||||
'grok-2-latest': 'xai',
|
||||
'deepseek-v3': 'deepseek',
|
||||
'deepseek-r1': 'deepseek',
|
||||
'llama-3.3-70b': 'cerebras',
|
||||
}
|
||||
|
||||
/**
|
||||
* Determines the provider ID based on the model name.
|
||||
* Uses the existing MODEL_TOOLS mapping and falls back to pattern matching if needed.
|
||||
* Uses the MODEL_PROVIDERS mapping and falls back to pattern matching if needed.
|
||||
*
|
||||
* @param model - The model name/identifier
|
||||
* @returns The corresponding provider ID
|
||||
@@ -11,11 +25,9 @@ import { ProviderId } from './registry'
|
||||
export function getProviderFromModel(model: string): ProviderId {
|
||||
const normalizedModel = model.toLowerCase()
|
||||
|
||||
// First try to match exactly from our MODEL_TOOLS mapping
|
||||
if (normalizedModel in MODEL_TOOLS) {
|
||||
const toolId = MODEL_TOOLS[normalizedModel as ModelType]
|
||||
// Extract provider ID from tool ID (e.g., 'openai_chat' -> 'openai')
|
||||
return toolId.split('_')[0] as ProviderId
|
||||
// First try to match exactly from our MODEL_PROVIDERS mapping
|
||||
if (normalizedModel in MODEL_PROVIDERS) {
|
||||
return MODEL_PROVIDERS[normalizedModel]
|
||||
}
|
||||
|
||||
// If no exact match, use pattern matching as fallback
|
||||
@@ -35,6 +47,10 @@ export function getProviderFromModel(model: string): ProviderId {
|
||||
return 'xai'
|
||||
}
|
||||
|
||||
if (normalizedModel.startsWith('llama')) {
|
||||
return 'cerebras'
|
||||
}
|
||||
|
||||
// Default to deepseek for any other models
|
||||
return 'deepseek'
|
||||
}
|
||||
|
||||
@@ -1,112 +0,0 @@
|
||||
import { ToolConfig, ToolResponse } from '../types'
|
||||
|
||||
export interface ChatParams {
|
||||
apiKey: string
|
||||
systemPrompt: string
|
||||
context?: string
|
||||
model?: string
|
||||
temperature?: number
|
||||
maxTokens?: number
|
||||
topP?: number
|
||||
stream?: boolean
|
||||
}
|
||||
|
||||
export interface ChatResponse extends ToolResponse {
|
||||
output: {
|
||||
content: string
|
||||
model: string
|
||||
tokens?: number
|
||||
}
|
||||
}
|
||||
|
||||
export const chatTool: ToolConfig<ChatParams, ChatResponse> = {
|
||||
id: 'anthropic_chat',
|
||||
name: 'Anthropic Chat',
|
||||
description:
|
||||
"Interact with Anthropic's Claude models for advanced language understanding, reasoning, and generation tasks. Supports system prompts, context management, and configurable parameters for response generation.",
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
description: 'Anthropic API key',
|
||||
},
|
||||
systemPrompt: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
description: 'System prompt to send to the model',
|
||||
},
|
||||
context: {
|
||||
type: 'string',
|
||||
description: 'User message/context to send to the model',
|
||||
},
|
||||
model: {
|
||||
type: 'string',
|
||||
default: 'claude-3-7-sonnet-20250219',
|
||||
description: 'Model to use',
|
||||
},
|
||||
temperature: {
|
||||
type: 'number',
|
||||
default: 0.7,
|
||||
description: 'Controls randomness in the response',
|
||||
},
|
||||
maxTokens: {
|
||||
type: 'number',
|
||||
default: 4096,
|
||||
description: 'Maximum number of tokens to generate',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: 'https://api.anthropic.com/v1/messages',
|
||||
method: 'POST',
|
||||
headers: (params) => ({
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': params.apiKey,
|
||||
'anthropic-version': '2023-06-01',
|
||||
}),
|
||||
body: (params) => {
|
||||
const messages = []
|
||||
|
||||
// Add user message if context is provided
|
||||
if (params.context) {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: params.context,
|
||||
})
|
||||
}
|
||||
|
||||
return {
|
||||
model: params.model || 'claude-3-7-sonnet-20250219',
|
||||
messages,
|
||||
system: params.systemPrompt,
|
||||
temperature: params.temperature || 0.7,
|
||||
max_tokens: params.maxTokens || 4096,
|
||||
}
|
||||
},
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
|
||||
if (!data.content) {
|
||||
throw new Error('Unable to extract content from Anthropic API response')
|
||||
}
|
||||
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
content: data.content[0].text,
|
||||
model: data.model,
|
||||
tokens: data.usage?.input_tokens + data.usage?.output_tokens,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformError: (error) => {
|
||||
const message = error.error?.message || error.message
|
||||
const code = error.error?.type || error.code
|
||||
return `${message} (${code})`
|
||||
},
|
||||
}
|
||||
@@ -1,126 +0,0 @@
|
||||
import { ToolConfig, ToolResponse } from '../types'
|
||||
|
||||
interface Message {
|
||||
role: 'system' | 'user' | 'assistant'
|
||||
content: string
|
||||
}
|
||||
|
||||
export interface ChatParams {
|
||||
apiKey: string
|
||||
systemPrompt?: string
|
||||
context?: string
|
||||
model?: string
|
||||
temperature?: number
|
||||
responseFormat?: string
|
||||
}
|
||||
|
||||
export interface ChatResponse extends ToolResponse {
|
||||
output: {
|
||||
content: string
|
||||
model: string
|
||||
tokens?: number
|
||||
}
|
||||
}
|
||||
|
||||
export const chatTool: ToolConfig<ChatParams, ChatResponse> = {
|
||||
id: 'deepseek_chat',
|
||||
name: 'DeepSeek Chat',
|
||||
description:
|
||||
"Interact with DeepSeek's advanced language models optimized for code understanding and generation. Supports system prompts, context-aware responses, and configurable output formats.",
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
description: 'DeepSeek API key',
|
||||
},
|
||||
systemPrompt: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
description: 'System prompt to guide the model',
|
||||
},
|
||||
context: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
description: 'User input context',
|
||||
},
|
||||
model: {
|
||||
type: 'string',
|
||||
default: 'deepseek-chat',
|
||||
description: 'Model to use',
|
||||
},
|
||||
temperature: {
|
||||
type: 'number',
|
||||
required: false,
|
||||
default: 0.7,
|
||||
description: 'Sampling temperature',
|
||||
},
|
||||
responseFormat: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
description: 'Response format specification',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: 'https://api.deepseek.com/v1/chat/completions',
|
||||
method: 'POST',
|
||||
headers: (params) => ({
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${params.apiKey}`,
|
||||
}),
|
||||
body: (params) => {
|
||||
const messages: Message[] = []
|
||||
|
||||
if (params.systemPrompt) {
|
||||
messages.push({
|
||||
role: 'system',
|
||||
content: params.systemPrompt,
|
||||
})
|
||||
}
|
||||
|
||||
if (params.context) {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: params.context,
|
||||
})
|
||||
}
|
||||
|
||||
const body: any = {
|
||||
model: 'deepseek-chat',
|
||||
messages,
|
||||
temperature: params.temperature,
|
||||
}
|
||||
|
||||
if (params.responseFormat === 'json') {
|
||||
body.response_format = { type: 'json_object' }
|
||||
}
|
||||
|
||||
return body
|
||||
},
|
||||
},
|
||||
|
||||
async transformResponse(response: Response): Promise<ChatResponse> {
|
||||
if (!response.ok) {
|
||||
const error = await response.json()
|
||||
throw new Error(`DeepSeek API error: ${error.message || response.statusText}`)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
content: data.choices[0].message.content,
|
||||
model: data.model,
|
||||
tokens: data.usage?.total_tokens,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformError(error: any): string {
|
||||
const message = error.error?.message || error.message
|
||||
const code = error.error?.type || error.code
|
||||
return `${message} (${code})`
|
||||
},
|
||||
}
|
||||
@@ -1,119 +0,0 @@
|
||||
import { ToolConfig, ToolResponse } from '../types'
|
||||
|
||||
interface Message {
|
||||
role: 'system' | 'user' | 'assistant'
|
||||
content: string
|
||||
}
|
||||
|
||||
export interface ChatParams {
|
||||
apiKey: string
|
||||
systemPrompt?: string
|
||||
context?: string
|
||||
model?: string
|
||||
temperature?: number
|
||||
}
|
||||
|
||||
export interface ChatResponse extends ToolResponse {
|
||||
output: {
|
||||
content: string
|
||||
model: string
|
||||
tokens?: number
|
||||
}
|
||||
}
|
||||
|
||||
export const reasonerTool: ToolConfig<ChatParams, ChatResponse> = {
|
||||
id: 'deepseek_reasoner',
|
||||
name: 'DeepSeek Reasoner',
|
||||
description:
|
||||
"Leverage DeepSeek's specialized reasoning model for complex problem-solving, logical analysis, and step-by-step deduction. Optimized for tasks requiring structured thinking and detailed explanations.",
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
description: 'DeepSeek API key',
|
||||
},
|
||||
systemPrompt: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
description: 'System prompt to guide the model',
|
||||
},
|
||||
context: {
|
||||
type: 'string',
|
||||
required: false,
|
||||
description: 'User input context',
|
||||
},
|
||||
model: {
|
||||
type: 'string',
|
||||
default: 'deepseek-reasoner',
|
||||
description: 'Model to use',
|
||||
},
|
||||
temperature: {
|
||||
type: 'number',
|
||||
required: false,
|
||||
description: 'Temperature (has no effect on reasoner)',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: 'https://api.deepseek.com/v1/chat/completions',
|
||||
method: 'POST',
|
||||
headers: (params) => ({
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${params.apiKey}`,
|
||||
}),
|
||||
body: (params) => {
|
||||
const messages: Message[] = []
|
||||
|
||||
if (params.systemPrompt) {
|
||||
messages.push({
|
||||
role: 'system',
|
||||
content: params.systemPrompt,
|
||||
})
|
||||
}
|
||||
|
||||
// Always ensure the last message is a user message
|
||||
if (params.context) {
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: params.context,
|
||||
})
|
||||
} else if (params.systemPrompt) {
|
||||
// If we have a system prompt but no context, add an empty user message
|
||||
messages.push({
|
||||
role: 'user',
|
||||
content: 'Please respond.',
|
||||
})
|
||||
}
|
||||
|
||||
return {
|
||||
model: 'deepseek-reasoner',
|
||||
messages,
|
||||
}
|
||||
},
|
||||
},
|
||||
|
||||
async transformResponse(response: Response): Promise<ChatResponse> {
|
||||
if (!response.ok) {
|
||||
const error = await response.json()
|
||||
throw new Error(`DeepSeek API error: ${error.message || response.statusText}`)
|
||||
}
|
||||
|
||||
const data = await response.json()
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
content: data.choices[0].message.content,
|
||||
model: data.model,
|
||||
tokens: data.usage?.total_tokens,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformError(error: any): string {
|
||||
const message = error.error?.message || error.message
|
||||
const code = error.error?.type || error.code
|
||||
return `${message} (${code})`
|
||||
},
|
||||
}
|
||||
@@ -1,110 +0,0 @@
|
||||
import { ToolConfig, ToolResponse } from '../types'
|
||||
|
||||
export interface ChatParams {
|
||||
apiKey: string
|
||||
systemPrompt: string
|
||||
context?: string
|
||||
model?: string
|
||||
temperature?: number
|
||||
maxTokens?: number
|
||||
topP?: number
|
||||
topK?: number
|
||||
}
|
||||
|
||||
export interface ChatResponse extends ToolResponse {
|
||||
output: {
|
||||
content: string
|
||||
model: string
|
||||
tokens?: number
|
||||
safetyRatings?: any[]
|
||||
}
|
||||
}
|
||||
|
||||
export const chatTool: ToolConfig<ChatParams, ChatResponse> = {
|
||||
id: 'google_chat',
|
||||
name: 'Google Chat',
|
||||
description:
|
||||
"Interact with Google's Gemini models for advanced language tasks with built-in safety ratings. Supports system prompts, context management, and fine-tuned generation parameters including top-k and top-p sampling.",
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
description: 'Google API key',
|
||||
},
|
||||
systemPrompt: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
description: 'System prompt to send to the model',
|
||||
},
|
||||
context: {
|
||||
type: 'string',
|
||||
description: 'User message/context to send to the model',
|
||||
},
|
||||
model: {
|
||||
type: 'string',
|
||||
default: 'gemini-2.0-flash-001',
|
||||
description: 'Model to use',
|
||||
},
|
||||
temperature: {
|
||||
type: 'number',
|
||||
default: 0.7,
|
||||
description: 'Controls randomness in the response',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: 'https://generativelanguage.googleapis.com/v1/models/gemini-2.0-flash-001:generateContent',
|
||||
method: 'POST',
|
||||
headers: (params) => ({
|
||||
'Content-Type': 'application/json',
|
||||
'x-goog-api-key': params.apiKey,
|
||||
}),
|
||||
body: (params) => {
|
||||
const contents = [
|
||||
{
|
||||
role: 'model',
|
||||
parts: [{ text: params.systemPrompt }],
|
||||
},
|
||||
]
|
||||
|
||||
if (params.context) {
|
||||
contents.push({
|
||||
role: 'user',
|
||||
parts: [{ text: params.context }],
|
||||
})
|
||||
}
|
||||
|
||||
const body = {
|
||||
contents,
|
||||
generationConfig: {
|
||||
temperature: params.temperature,
|
||||
maxOutputTokens: params.maxTokens,
|
||||
topP: params.topP,
|
||||
topK: params.topK,
|
||||
},
|
||||
}
|
||||
return body
|
||||
},
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
content: data.candidates[0].content.parts[0].text,
|
||||
model: data.model,
|
||||
tokens: data.usage?.totalTokens,
|
||||
safetyRatings: data.candidates[0].safetyRatings,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformError: (error) => {
|
||||
const message = error.error?.message || error.message
|
||||
const code = error.error?.status || error.code
|
||||
return `${message} (${code})`
|
||||
},
|
||||
}
|
||||
@@ -1,7 +1,4 @@
|
||||
import { chatTool as anthropicChat } from './anthropic/chat'
|
||||
import { visionTool as crewAIVision } from './crewai/vision'
|
||||
import { chatTool as deepseekChat } from './deepseek/chat'
|
||||
import { reasonerTool as deepseekReasoner } from './deepseek/reasoner'
|
||||
import { scrapeTool } from './firecrawl/scrape'
|
||||
import { functionExecuteTool as functionExecute } from './function/execute'
|
||||
import { commentTool } from './github/comment'
|
||||
@@ -10,13 +7,11 @@ import { repoInfoTool } from './github/repo'
|
||||
import { gmailReadTool } from './gmail/read'
|
||||
import { gmailSearchTool } from './gmail/search'
|
||||
import { gmailSendTool } from './gmail/send'
|
||||
import { chatTool as googleChat } from './google/chat'
|
||||
import { requestTool as httpRequest } from './http/request'
|
||||
import { contactsTool as hubspotContacts } from './hubspot/contacts'
|
||||
import { readUrlTool } from './jina/reader'
|
||||
import { notionReadTool } from './notion/read'
|
||||
import { notionWriteTool } from './notion/write'
|
||||
import { chatTool as openAIChat } from './openai/chat'
|
||||
import { embeddingsTool as openAIEmbeddings } from './openai/embeddings'
|
||||
import { fetchTool as pineconeFetchTool } from './pinecone/fetch'
|
||||
import { generateEmbeddingsTool as pineconeGenerateEmbeddingsTool } from './pinecone/generate'
|
||||
@@ -33,18 +28,11 @@ import { readTool as xRead } from './x/read'
|
||||
import { searchTool as xSearch } from './x/search'
|
||||
import { userTool as xUser } from './x/user'
|
||||
import { writeTool as xWrite } from './x/write'
|
||||
import { chatTool as xaiChat } from './xai/chat'
|
||||
import { youtubeSearchTool } from './youtube/search'
|
||||
|
||||
// Registry of all available tools
|
||||
export const tools: Record<string, ToolConfig> = {
|
||||
openai_chat: openAIChat,
|
||||
openai_embeddings: openAIEmbeddings,
|
||||
anthropic_chat: anthropicChat,
|
||||
google_chat: googleChat,
|
||||
xai_chat: xaiChat,
|
||||
deepseek_chat: deepseekChat,
|
||||
deepseek_reasoner: deepseekReasoner,
|
||||
http_request: httpRequest,
|
||||
hubspot_contacts: hubspotContacts,
|
||||
salesforce_opportunities: salesforceOpportunities,
|
||||
|
||||
@@ -1,135 +0,0 @@
|
||||
import { ToolConfig, ToolResponse } from '../types'
|
||||
|
||||
interface ChatParams {
|
||||
apiKey: string
|
||||
systemPrompt: string
|
||||
context?: string
|
||||
model?: string
|
||||
temperature?: number
|
||||
maxTokens?: number
|
||||
maxCompletionTokens?: number
|
||||
topP?: number
|
||||
frequencyPenalty?: number
|
||||
presencePenalty?: number
|
||||
stream?: boolean
|
||||
}
|
||||
|
||||
export interface ChatResponse extends ToolResponse {
|
||||
output: {
|
||||
content: string
|
||||
model: string
|
||||
tokens?: number
|
||||
reasoning_tokens?: number
|
||||
}
|
||||
}
|
||||
|
||||
export const chatTool: ToolConfig<ChatParams, ChatResponse> = {
|
||||
id: 'openai_chat',
|
||||
name: 'OpenAI Chat',
|
||||
description:
|
||||
"Interact with OpenAI's GPT models for natural language processing and generation. Supports system prompts, context management, and fine-tuned response parameters including temperature and token control.",
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
description: 'OpenAI API key',
|
||||
},
|
||||
systemPrompt: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
description: 'System prompt to send to the model',
|
||||
},
|
||||
context: {
|
||||
type: 'string',
|
||||
description: 'User message/context to send to the model',
|
||||
},
|
||||
model: {
|
||||
type: 'string',
|
||||
default: 'gpt-4o',
|
||||
description: 'Model to use (gpt-4o, o1, o1-mini)',
|
||||
},
|
||||
temperature: {
|
||||
type: 'number',
|
||||
default: 0.7,
|
||||
description: 'Controls randomness in the response (not supported by o1 models)',
|
||||
},
|
||||
maxCompletionTokens: {
|
||||
type: 'number',
|
||||
description:
|
||||
'Maximum number of tokens to generate (including reasoning tokens) for o1 models',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: 'https://api.openai.com/v1/chat/completions',
|
||||
method: 'POST',
|
||||
headers: (params) => ({
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${params.apiKey}`,
|
||||
}),
|
||||
body: (params) => {
|
||||
const isO1Model = params.model?.startsWith('o1')
|
||||
const messages = []
|
||||
|
||||
// For o1-mini, we need to use 'user' role instead of 'system'
|
||||
if (params.model === 'o1-mini') {
|
||||
messages.push({ role: 'user', content: params.systemPrompt })
|
||||
} else {
|
||||
messages.push({ role: 'system', content: params.systemPrompt })
|
||||
}
|
||||
|
||||
if (params.context) {
|
||||
messages.push({ role: 'user', content: params.context })
|
||||
}
|
||||
|
||||
const body: any = {
|
||||
model: params.model || 'gpt-4o',
|
||||
messages,
|
||||
}
|
||||
|
||||
// Only add parameters supported by the model type
|
||||
if (!isO1Model) {
|
||||
body.temperature = params.temperature
|
||||
body.max_tokens = params.maxTokens
|
||||
body.top_p = params.topP
|
||||
body.frequency_penalty = params.frequencyPenalty
|
||||
body.presence_penalty = params.presencePenalty
|
||||
} else if (params.maxCompletionTokens) {
|
||||
body.max_completion_tokens = params.maxCompletionTokens
|
||||
}
|
||||
|
||||
body.stream = params.stream
|
||||
return body
|
||||
},
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
if (data.choices?.[0]?.delta?.content) {
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
content: data.choices[0].delta.content,
|
||||
model: data.model,
|
||||
},
|
||||
}
|
||||
}
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
content: data.choices[0].message.content,
|
||||
model: data.model,
|
||||
tokens: data.usage?.total_tokens,
|
||||
reasoning_tokens: data.usage?.completion_tokens_details?.reasoning_tokens,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformError: (error) => {
|
||||
const message = error.error?.message || error.message
|
||||
const code = error.error?.type || error.code
|
||||
return `${message} (${code})`
|
||||
},
|
||||
}
|
||||
@@ -1,103 +0,0 @@
|
||||
import { ToolConfig, ToolResponse } from '../types'
|
||||
|
||||
export interface ChatParams {
|
||||
apiKey: string
|
||||
systemPrompt: string
|
||||
context?: string
|
||||
model?: string
|
||||
temperature?: number
|
||||
maxTokens?: number
|
||||
topP?: number
|
||||
frequencyPenalty?: number
|
||||
presencePenalty?: number
|
||||
}
|
||||
|
||||
export interface ChatResponse extends ToolResponse {
|
||||
output: {
|
||||
content: string
|
||||
model: string
|
||||
tokens?: number
|
||||
reasoning?: string
|
||||
}
|
||||
}
|
||||
|
||||
export const chatTool: ToolConfig<ChatParams, ChatResponse> = {
|
||||
id: 'xai_chat',
|
||||
name: 'xAI Chat',
|
||||
description:
|
||||
"Interact with xAI's Grok models featuring advanced reasoning capabilities. Supports system prompts, context management, and provides detailed reasoning paths alongside responses.",
|
||||
version: '1.0.0',
|
||||
|
||||
params: {
|
||||
apiKey: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
description: 'xAI API key',
|
||||
},
|
||||
systemPrompt: {
|
||||
type: 'string',
|
||||
required: true,
|
||||
description: 'System prompt to send to the model',
|
||||
},
|
||||
context: {
|
||||
type: 'string',
|
||||
description: 'User message/context to send to the model',
|
||||
},
|
||||
model: {
|
||||
type: 'string',
|
||||
default: 'grok-2-latest',
|
||||
description: 'Model to use',
|
||||
},
|
||||
temperature: {
|
||||
type: 'number',
|
||||
default: 0.7,
|
||||
description: 'Controls randomness in the response',
|
||||
},
|
||||
},
|
||||
|
||||
request: {
|
||||
url: 'https://api.x.ai/v1/chat/completions',
|
||||
method: 'POST',
|
||||
headers: (params) => ({
|
||||
'Content-Type': 'application/json',
|
||||
Authorization: `Bearer ${params.apiKey}`,
|
||||
}),
|
||||
body: (params) => {
|
||||
const messages = [{ role: 'system', content: params.systemPrompt }]
|
||||
|
||||
if (params.context) {
|
||||
messages.push({ role: 'user', content: params.context })
|
||||
}
|
||||
|
||||
const body = {
|
||||
model: params.model || 'grok-2-latest',
|
||||
messages,
|
||||
temperature: params.temperature,
|
||||
max_tokens: params.maxTokens,
|
||||
top_p: params.topP,
|
||||
frequency_penalty: params.frequencyPenalty,
|
||||
presence_penalty: params.presencePenalty,
|
||||
}
|
||||
return body
|
||||
},
|
||||
},
|
||||
|
||||
transformResponse: async (response: Response) => {
|
||||
const data = await response.json()
|
||||
return {
|
||||
success: true,
|
||||
output: {
|
||||
content: data.choices[0].message.content,
|
||||
model: data.model,
|
||||
tokens: data.usage?.total_tokens,
|
||||
reasoning: data.choices[0]?.reasoning,
|
||||
},
|
||||
}
|
||||
},
|
||||
|
||||
transformError: (error) => {
|
||||
const message = error.error?.message || error.message
|
||||
const code = error.error?.type || error.code
|
||||
return `${message} (${code})`
|
||||
},
|
||||
}
|
||||
Reference in New Issue
Block a user