mirror of
https://github.com/simstudioai/sim.git
synced 2026-01-07 22:24:06 -05:00
Added model service under tools directory for LLM calls, added unit testing framework jest
This commit is contained in:
14
jest.config.js
Normal file
14
jest.config.js
Normal file
@@ -0,0 +1,14 @@
|
||||
/** @type {import('ts-jest').JestConfigWithTsJest} */
|
||||
module.exports = {
|
||||
preset: 'ts-jest',
|
||||
testEnvironment: 'node',
|
||||
moduleNameMapper: {
|
||||
'^@/(.*)$': '<rootDir>/$1'
|
||||
},
|
||||
testMatch: ['**/__tests__/**/*.test.ts'],
|
||||
transform: {
|
||||
'^.+\\.tsx?$': ['ts-jest', {
|
||||
tsconfig: 'tsconfig.json'
|
||||
}]
|
||||
}
|
||||
};
|
||||
3660
package-lock.json
generated
3660
package-lock.json
generated
File diff suppressed because it is too large
Load Diff
10
package.json
10
package.json
@@ -6,7 +6,9 @@
|
||||
"dev": "next dev --turbopack",
|
||||
"build": "next build",
|
||||
"start": "next start",
|
||||
"lint": "next lint"
|
||||
"lint": "next lint",
|
||||
"test": "jest",
|
||||
"test:watch": "jest --watch"
|
||||
},
|
||||
"dependencies": {
|
||||
"@radix-ui/react-dialog": "^1.1.4",
|
||||
@@ -30,11 +32,15 @@
|
||||
"tailwindcss-animate": "^1.0.7"
|
||||
},
|
||||
"devDependencies": {
|
||||
"@jest/globals": "^29.7.0",
|
||||
"@types/jest": "^29.5.14",
|
||||
"@types/node": "^20",
|
||||
"@types/react": "^19",
|
||||
"@types/react-dom": "^19",
|
||||
"jest": "^29.7.0",
|
||||
"postcss": "^8",
|
||||
"tailwindcss": "^3.4.1",
|
||||
"typescript": "^5"
|
||||
"ts-jest": "^29.2.5",
|
||||
"typescript": "^5.7.3"
|
||||
}
|
||||
}
|
||||
|
||||
201
tools/model-service/__tests__/providers.test.ts
Normal file
201
tools/model-service/__tests__/providers.test.ts
Normal file
@@ -0,0 +1,201 @@
|
||||
import { describe, expect, test, jest, beforeEach } from '@jest/globals';
|
||||
import { OpenAIProvider } from '../providers/openai';
|
||||
import { AnthropicProvider } from '../providers/anthropic';
|
||||
import { GoogleProvider } from '../providers/google';
|
||||
import { XAIProvider } from '../providers/xai';
|
||||
import { AgentConfig } from '../types/agent';
|
||||
import { ModelRequestOptions } from '../types/model';
|
||||
|
||||
// Setup fetch mock
|
||||
const mockFetch = jest.fn() as jest.MockedFunction<typeof fetch>;
|
||||
global.fetch = mockFetch;
|
||||
|
||||
describe('Model Providers', () => {
|
||||
beforeEach(() => {
|
||||
jest.clearAllMocks();
|
||||
});
|
||||
|
||||
describe('OpenAI Provider', () => {
|
||||
const provider = new OpenAIProvider();
|
||||
|
||||
test('should call OpenAI API successfully', async () => {
|
||||
const mockResponse = {
|
||||
choices: [{ message: { content: 'Test response' } }],
|
||||
usage: { total_tokens: 10 }
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve(mockResponse)
|
||||
} as Response);
|
||||
|
||||
const config: AgentConfig = {
|
||||
model: 'gpt-4o',
|
||||
systemPrompt: 'Test prompt',
|
||||
temperature: 0.7,
|
||||
apiKey: 'test-key'
|
||||
};
|
||||
|
||||
const result = await provider.callModel(config, { maxTokens: 100 });
|
||||
expect(result.response).toBe('Test response');
|
||||
expect(result.tokens).toBe(10);
|
||||
expect(result.model).toBe('gpt-4o');
|
||||
});
|
||||
|
||||
test('should handle API errors', async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
json: () => Promise.resolve({ error: { message: 'API Error' } })
|
||||
} as Response);
|
||||
|
||||
const config: AgentConfig = {
|
||||
model: 'gpt-4o',
|
||||
systemPrompt: 'Test prompt',
|
||||
temperature: 0.7,
|
||||
apiKey: 'invalid-key'
|
||||
};
|
||||
|
||||
await expect(provider.callModel(config, {})).rejects.toThrow('API Error');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Anthropic Provider', () => {
|
||||
const provider = new AnthropicProvider();
|
||||
|
||||
test('should call Anthropic API successfully', async () => {
|
||||
const mockResponse = {
|
||||
content: [{ text: 'Test response' }],
|
||||
usage: { input_tokens: 5, output_tokens: 5 }
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve(mockResponse)
|
||||
} as Response);
|
||||
|
||||
const config: AgentConfig = {
|
||||
model: 'claude-3.5-sonnet',
|
||||
systemPrompt: 'Test prompt',
|
||||
temperature: 0.7,
|
||||
apiKey: 'test-key'
|
||||
};
|
||||
|
||||
const result = await provider.callModel(config, { maxTokens: 100 });
|
||||
expect(result.response).toBe('Test response');
|
||||
expect(result.tokens).toBe(10);
|
||||
expect(result.model).toBe('claude-3.5-sonnet');
|
||||
});
|
||||
|
||||
test('should handle API errors', async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
json: () => Promise.resolve({ error: { message: 'API Error' } })
|
||||
} as Response);
|
||||
|
||||
const config: AgentConfig = {
|
||||
model: 'claude-3.5-sonnet',
|
||||
systemPrompt: 'Test prompt',
|
||||
temperature: 0.7,
|
||||
apiKey: 'invalid-key'
|
||||
};
|
||||
|
||||
await expect(provider.callModel(config, {})).rejects.toThrow('API Error');
|
||||
});
|
||||
});
|
||||
|
||||
describe('Google Provider', () => {
|
||||
const provider = new GoogleProvider();
|
||||
|
||||
test('should call Google API successfully', async () => {
|
||||
const mockResponse = {
|
||||
candidates: [{
|
||||
content: {
|
||||
parts: [{ text: 'Test response' }]
|
||||
}
|
||||
}],
|
||||
usage: { totalTokens: 10 }
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve(mockResponse)
|
||||
} as Response);
|
||||
|
||||
const config: AgentConfig = {
|
||||
model: 'gemini-2-flash',
|
||||
systemPrompt: 'Test prompt',
|
||||
temperature: 0.7,
|
||||
apiKey: 'test-key'
|
||||
};
|
||||
|
||||
const result = await provider.callModel(config, { maxTokens: 100 });
|
||||
expect(result.response).toBe('Test response');
|
||||
expect(result.tokens).toBe(10);
|
||||
expect(result.model).toBe('gemini-2-flash');
|
||||
});
|
||||
|
||||
test('should handle API errors', async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
json: () => Promise.resolve({ error: { message: 'API Error' } })
|
||||
} as Response);
|
||||
|
||||
const config: AgentConfig = {
|
||||
model: 'gemini-2-flash',
|
||||
systemPrompt: 'Test prompt',
|
||||
temperature: 0.7,
|
||||
apiKey: 'invalid-key'
|
||||
};
|
||||
|
||||
await expect(provider.callModel(config, {})).rejects.toThrow('API Error');
|
||||
});
|
||||
});
|
||||
|
||||
describe('XAI Provider', () => {
|
||||
const provider = new XAIProvider();
|
||||
|
||||
test('should call XAI API successfully', async () => {
|
||||
const mockResponse = {
|
||||
choices: [{
|
||||
message: {
|
||||
content: 'Test response'
|
||||
}
|
||||
}],
|
||||
usage: { total_tokens: 10 }
|
||||
};
|
||||
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: true,
|
||||
json: () => Promise.resolve(mockResponse)
|
||||
} as Response);
|
||||
|
||||
const config: AgentConfig = {
|
||||
model: 'grok-2-latest',
|
||||
systemPrompt: 'Test prompt',
|
||||
temperature: 0.7,
|
||||
apiKey: 'test-key'
|
||||
};
|
||||
|
||||
const result = await provider.callModel(config, { maxTokens: 100 });
|
||||
expect(result.response).toBe('Test response');
|
||||
expect(result.tokens).toBe(10);
|
||||
expect(result.model).toBe('grok-2-latest');
|
||||
});
|
||||
|
||||
test('should handle API errors', async () => {
|
||||
mockFetch.mockResolvedValueOnce({
|
||||
ok: false,
|
||||
json: () => Promise.resolve({ error: { message: 'API Error' } })
|
||||
} as Response);
|
||||
|
||||
const config: AgentConfig = {
|
||||
model: 'grok-2-latest',
|
||||
systemPrompt: 'Test prompt',
|
||||
temperature: 0.7,
|
||||
apiKey: 'invalid-key'
|
||||
};
|
||||
|
||||
await expect(provider.callModel(config, {})).rejects.toThrow('API Error');
|
||||
});
|
||||
});
|
||||
});
|
||||
62
tools/model-service/index.ts
Normal file
62
tools/model-service/index.ts
Normal file
@@ -0,0 +1,62 @@
|
||||
import { AgentConfig } from '@/app/w/core/types/runner';
|
||||
import { ModelProvider, ModelRequestOptions, ModelResponse } from './types/model';
|
||||
import { OpenAIProvider } from './providers/openai';
|
||||
import { AnthropicProvider } from './providers/anthropic';
|
||||
import { GoogleProvider } from './providers/google';
|
||||
import { XAIProvider } from './providers/xai';
|
||||
|
||||
export class ModelService {
|
||||
private static instance: ModelService;
|
||||
private providers: Map<string, ModelProvider>;
|
||||
|
||||
private constructor() {
|
||||
this.providers = new Map();
|
||||
this.initializeProviders();
|
||||
}
|
||||
|
||||
public static getInstance(): ModelService {
|
||||
if (!ModelService.instance) {
|
||||
ModelService.instance = new ModelService();
|
||||
}
|
||||
return ModelService.instance;
|
||||
}
|
||||
|
||||
private initializeProviders() {
|
||||
const openai = new OpenAIProvider();
|
||||
const anthropic = new AnthropicProvider();
|
||||
const google = new GoogleProvider();
|
||||
const xai = new XAIProvider();
|
||||
// OpenAI models
|
||||
this.providers.set('gpt-4o', openai);
|
||||
|
||||
// Anthropic models
|
||||
this.providers.set('claude-3.5-sonnet', anthropic);
|
||||
|
||||
// Google models
|
||||
this.providers.set('gemini-2-flash', google);
|
||||
|
||||
// XAI models
|
||||
this.providers.set('grok-2-latest', xai);
|
||||
}
|
||||
|
||||
public async callModel(config: AgentConfig, options: ModelRequestOptions = {}): Promise<ModelResponse> {
|
||||
const provider = this.providers.get(config.model);
|
||||
if (!provider) {
|
||||
throw new Error(`No provider found for model: ${config.model}`);
|
||||
}
|
||||
|
||||
await provider.validateConfig(config);
|
||||
return provider.callModel(config, options);
|
||||
}
|
||||
|
||||
public setApiKey(provider: string, apiKey: string): void {
|
||||
// Store API keys securely (in memory for now)
|
||||
// TODO: Implement secure storage
|
||||
}
|
||||
|
||||
public getApiKey(provider: string): string | null {
|
||||
// Retrieve API key
|
||||
// TODO: Implement secure retrieval
|
||||
return null;
|
||||
}
|
||||
}
|
||||
47
tools/model-service/providers/anthropic.ts
Normal file
47
tools/model-service/providers/anthropic.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import { AgentConfig } from '../types/agent';
|
||||
import { ModelProvider, ModelRequestOptions, ModelResponse } from '../types/model';
|
||||
|
||||
export class AnthropicProvider implements ModelProvider {
|
||||
private readonly SUPPORTED_MODELS = ['claude-3-sonnet', 'claude-3-opus'];
|
||||
private readonly API_URL = 'https://api.anthropic.com/v1/messages';
|
||||
|
||||
async callModel(config: AgentConfig, options: ModelRequestOptions): Promise<ModelResponse> {
|
||||
const response = await fetch(this.API_URL, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'x-api-key': config.apiKey,
|
||||
'anthropic-version': '2023-06-01'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: config.model,
|
||||
messages: [
|
||||
{ role: 'user', content: config.systemPrompt + '\n' + (config.prompt || '') }
|
||||
],
|
||||
temperature: config.temperature,
|
||||
max_tokens: options.maxTokens
|
||||
})
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json();
|
||||
throw new Error(error.error?.message || 'Anthropic API error');
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return {
|
||||
response: data.content[0].text,
|
||||
tokens: data.usage.input_tokens + data.usage.output_tokens,
|
||||
model: config.model
|
||||
};
|
||||
}
|
||||
|
||||
async validateConfig(config: AgentConfig): Promise<void> {
|
||||
if (!config.apiKey) {
|
||||
throw new Error('Anthropic API key is required');
|
||||
}
|
||||
if (!this.SUPPORTED_MODELS.includes(config.model)) {
|
||||
throw new Error(`Model ${config.model} is not supported. Use one of: ${this.SUPPORTED_MODELS.join(', ')}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
48
tools/model-service/providers/google.ts
Normal file
48
tools/model-service/providers/google.ts
Normal file
@@ -0,0 +1,48 @@
|
||||
import { AgentConfig } from '../types/agent';
|
||||
import { ModelProvider, ModelRequestOptions, ModelResponse } from '../types/model';
|
||||
|
||||
export class GoogleProvider implements ModelProvider {
|
||||
private readonly SUPPORTED_MODELS = ['gemini-pro'];
|
||||
private readonly API_URL = 'https://generativelanguage.googleapis.com/v1beta/models';
|
||||
|
||||
async callModel(config: AgentConfig, options: ModelRequestOptions): Promise<ModelResponse> {
|
||||
const response = await fetch(`${this.API_URL}/${config.model}:generateContent?key=${config.apiKey}`, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json'
|
||||
},
|
||||
body: JSON.stringify({
|
||||
contents: [{
|
||||
parts: [{
|
||||
text: config.systemPrompt + '\n' + (config.prompt || '')
|
||||
}]
|
||||
}],
|
||||
generationConfig: {
|
||||
temperature: config.temperature,
|
||||
maxOutputTokens: options.maxTokens
|
||||
}
|
||||
})
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json();
|
||||
throw new Error(error.error?.message || 'Google API error');
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return {
|
||||
response: data.candidates[0].content.parts[0].text,
|
||||
tokens: data.usage.totalTokens,
|
||||
model: config.model
|
||||
};
|
||||
}
|
||||
|
||||
async validateConfig(config: AgentConfig): Promise<void> {
|
||||
if (!config.apiKey) {
|
||||
throw new Error('Google API key is required');
|
||||
}
|
||||
if (!this.SUPPORTED_MODELS.includes(config.model)) {
|
||||
throw new Error(`Model ${config.model} is not supported. Use one of: ${this.SUPPORTED_MODELS.join(', ')}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
47
tools/model-service/providers/openai.ts
Normal file
47
tools/model-service/providers/openai.ts
Normal file
@@ -0,0 +1,47 @@
|
||||
import { AgentConfig } from '../types/agent';
|
||||
import { ModelProvider, ModelRequestOptions, ModelResponse } from '../types/model';
|
||||
|
||||
export class OpenAIProvider implements ModelProvider {
|
||||
private readonly SUPPORTED_MODELS = ['gpt-4', 'gpt-3.5-turbo'];
|
||||
private readonly API_URL = 'https://api.openai.com/v1/chat/completions';
|
||||
|
||||
async callModel(config: AgentConfig, options: ModelRequestOptions): Promise<ModelResponse> {
|
||||
const response = await fetch(this.API_URL, {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${config.apiKey}`
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: config.model,
|
||||
messages: [
|
||||
{ role: 'system', content: config.systemPrompt },
|
||||
{ role: 'user', content: config.prompt || '' }
|
||||
],
|
||||
temperature: config.temperature,
|
||||
max_tokens: options.maxTokens,
|
||||
})
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json();
|
||||
throw new Error(error.error?.message || 'OpenAI API error');
|
||||
}
|
||||
|
||||
const data = await response.json();
|
||||
return {
|
||||
response: data.choices[0].message.content,
|
||||
tokens: data.usage.total_tokens,
|
||||
model: config.model
|
||||
};
|
||||
}
|
||||
|
||||
async validateConfig(config: AgentConfig): Promise<void> {
|
||||
if (!config.apiKey) {
|
||||
throw new Error('OpenAI API key is required');
|
||||
}
|
||||
if (!this.SUPPORTED_MODELS.includes(config.model)) {
|
||||
throw new Error(`Model ${config.model} is not supported. Use one of: ${this.SUPPORTED_MODELS.join(', ')}`);
|
||||
}
|
||||
}
|
||||
}
|
||||
50
tools/model-service/providers/xai.ts
Normal file
50
tools/model-service/providers/xai.ts
Normal file
@@ -0,0 +1,50 @@
|
||||
import { AgentConfig } from '../types/agent';
|
||||
import { ModelProvider, ModelRequestOptions, ModelResponse } from '../types/model';
|
||||
|
||||
export class XAIProvider implements ModelProvider {
|
||||
async callModel(
|
||||
config: AgentConfig,
|
||||
options: ModelRequestOptions
|
||||
): Promise<ModelResponse> {
|
||||
const response = await fetch('https://api.x.ai/v1/chat/completions', {
|
||||
method: 'POST',
|
||||
headers: {
|
||||
'Content-Type': 'application/json',
|
||||
'Authorization': `Bearer ${config.apiKey}`
|
||||
},
|
||||
body: JSON.stringify({
|
||||
model: config.model,
|
||||
messages: [
|
||||
{
|
||||
role: 'system',
|
||||
content: config.systemPrompt
|
||||
}
|
||||
],
|
||||
temperature: config.temperature,
|
||||
max_tokens: options.maxTokens
|
||||
}),
|
||||
signal: AbortSignal.timeout(options.timeout || 10000)
|
||||
});
|
||||
|
||||
if (!response.ok) {
|
||||
const error = await response.json();
|
||||
throw new Error(error.error?.message || 'xAI API call failed');
|
||||
}
|
||||
|
||||
const result = await response.json();
|
||||
return {
|
||||
response: result.choices[0].message.content,
|
||||
tokens: result.usage?.total_tokens || 0,
|
||||
model: config.model
|
||||
};
|
||||
}
|
||||
|
||||
async validateConfig(config: AgentConfig): Promise<void> {
|
||||
if (!config.apiKey) {
|
||||
throw new Error('xAI API key is required');
|
||||
}
|
||||
if (!config.model.startsWith('grok')) {
|
||||
throw new Error('Invalid xAI model specified');
|
||||
}
|
||||
}
|
||||
}
|
||||
17
tools/model-service/types/agent.ts
Normal file
17
tools/model-service/types/agent.ts
Normal file
@@ -0,0 +1,17 @@
|
||||
export interface AgentConfig {
|
||||
model: string;
|
||||
systemPrompt: string;
|
||||
prompt?: string;
|
||||
temperature: number;
|
||||
apiKey: string;
|
||||
}
|
||||
|
||||
export interface AgentResult {
|
||||
success: boolean;
|
||||
data?: {
|
||||
response: string;
|
||||
tokens: number;
|
||||
model: string;
|
||||
};
|
||||
error?: string;
|
||||
}
|
||||
26
tools/model-service/types/model.ts
Normal file
26
tools/model-service/types/model.ts
Normal file
@@ -0,0 +1,26 @@
|
||||
import { AgentConfig } from "./agent";
|
||||
|
||||
export interface ModelResponse {
|
||||
response: string;
|
||||
tokens: number;
|
||||
model: string;
|
||||
}
|
||||
|
||||
export interface ModelRequestOptions {
|
||||
maxTokens?: number;
|
||||
timeout?: number;
|
||||
}
|
||||
|
||||
export interface ModelProvider {
|
||||
callModel(config: AgentConfig, options: ModelRequestOptions): Promise<ModelResponse>;
|
||||
validateConfig(config: AgentConfig): Promise<void>;
|
||||
}
|
||||
|
||||
export const DEFAULT_MODEL_CONFIGS = {
|
||||
'gpt-4': { provider: 'openai' },
|
||||
'gpt-3.5-turbo': { provider: 'openai' },
|
||||
'claude-3-sonnet': { provider: 'anthropic' },
|
||||
'claude-3-opus': { provider: 'anthropic' },
|
||||
'gemini-pro': { provider: 'google' },
|
||||
'grok-2-latest': { provider: 'xai' }
|
||||
} as const;
|
||||
@@ -1,27 +1,22 @@
|
||||
{
|
||||
"compilerOptions": {
|
||||
"target": "ES2017",
|
||||
"lib": ["dom", "dom.iterable", "esnext"],
|
||||
"allowJs": true,
|
||||
"skipLibCheck": true,
|
||||
"target": "es2020",
|
||||
"module": "commonjs",
|
||||
"lib": ["es2020", "dom"],
|
||||
"strict": true,
|
||||
"noEmit": true,
|
||||
"esModuleInterop": true,
|
||||
"module": "esnext",
|
||||
"moduleResolution": "bundler",
|
||||
"resolveJsonModule": true,
|
||||
"isolatedModules": true,
|
||||
"jsx": "preserve",
|
||||
"incremental": true,
|
||||
"plugins": [
|
||||
{
|
||||
"name": "next"
|
||||
}
|
||||
],
|
||||
"skipLibCheck": true,
|
||||
"forceConsistentCasingInFileNames": true,
|
||||
"baseUrl": ".",
|
||||
"paths": {
|
||||
"@/*": ["./*"]
|
||||
}
|
||||
},
|
||||
"include": ["next-env.d.ts", "**/*.ts", "**/*.tsx", ".next/types/**/*.ts"],
|
||||
"exclude": ["node_modules"]
|
||||
"include": [
|
||||
"**/*.ts",
|
||||
"**/*.tsx"
|
||||
],
|
||||
"exclude": [
|
||||
"node_modules"
|
||||
]
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user