Updated providers to latest models, tested in frontend with running agent block

This commit is contained in:
Waleed Latif
2025-01-16 18:13:33 -08:00
parent 1287fd3311
commit 49fbf801a4
7 changed files with 20 additions and 21 deletions

View File

@@ -41,7 +41,7 @@ export const AgentBlock: BlockConfig = {
title: 'Model',
type: 'dropdown',
layout: 'half',
options: ['GPT-4o', 'Gemini 2.0', 'Claude 3.5 Sonnet', 'DeepSeek V3', 'Grok 2'],
options: ['gpt-4o', 'gemini-pro', 'claude-3-5-sonnet-20241022', 'grok-2-latest', 'deepseek-v3'],
},
{
id: 'temperature',

View File

@@ -74,7 +74,7 @@ describe('Model Providers', () => {
} as Response);
const config: AgentConfig = {
model: 'claude-3.5-sonnet',
model: 'claude-3-5-sonnet-20241022',
systemPrompt: 'Test prompt',
temperature: 0.7,
apiKey: 'test-key'
@@ -83,7 +83,7 @@ describe('Model Providers', () => {
const result = await provider.callModel(config, { maxTokens: 100 });
expect(result.response).toBe('Test response');
expect(result.tokens).toBe(10);
expect(result.model).toBe('claude-3.5-sonnet');
expect(result.model).toBe('claude-3-5-sonnet-20241022');
});
test('should handle API errors', async () => {
@@ -93,7 +93,7 @@ describe('Model Providers', () => {
} as Response);
const config: AgentConfig = {
model: 'claude-3.5-sonnet',
model: 'claude-3-5-sonnet-20241022',
systemPrompt: 'Test prompt',
temperature: 0.7,
apiKey: 'invalid-key'
@@ -122,7 +122,7 @@ describe('Model Providers', () => {
} as Response);
const config: AgentConfig = {
model: 'gemini-2-flash',
model: 'gemini-pro',
systemPrompt: 'Test prompt',
temperature: 0.7,
apiKey: 'test-key'
@@ -131,7 +131,7 @@ describe('Model Providers', () => {
const result = await provider.callModel(config, { maxTokens: 100 });
expect(result.response).toBe('Test response');
expect(result.tokens).toBe(10);
expect(result.model).toBe('gemini-2-flash');
expect(result.model).toBe('gemini-pro');
});
test('should handle API errors', async () => {
@@ -141,7 +141,7 @@ describe('Model Providers', () => {
} as Response);
const config: AgentConfig = {
model: 'gemini-2-flash',
model: 'gemini-pro',
systemPrompt: 'Test prompt',
temperature: 0.7,
apiKey: 'invalid-key'

View File

@@ -30,10 +30,10 @@ export class ModelService {
this.providers.set('gpt-4o', openai);
// Anthropic models
this.providers.set('claude-3.5-sonnet', anthropic);
this.providers.set('claude-3-5-sonnet-20241022', anthropic);
// Google models
this.providers.set('gemini-2-flash', google);
this.providers.set('gemini-pro', google);
// XAI models
this.providers.set('grok-2-latest', xai);

View File

@@ -2,7 +2,7 @@ import { AgentConfig } from '../types/agent';
import { ModelProvider, ModelRequestOptions, ModelResponse } from '../types/model';
export class AnthropicProvider implements ModelProvider {
private readonly SUPPORTED_MODELS = ['claude-3-sonnet', 'claude-3-opus'];
private readonly SUPPORTED_MODELS = ['claude-3-5-sonnet-20241022'];
private readonly API_URL = 'https://api.anthropic.com/v1/messages';
async callModel(config: AgentConfig, options: ModelRequestOptions): Promise<ModelResponse> {

View File

@@ -3,10 +3,10 @@ import { ModelProvider, ModelRequestOptions, ModelResponse } from '../types/mode
export class GoogleProvider implements ModelProvider {
private readonly SUPPORTED_MODELS = ['gemini-pro'];
private readonly API_URL = 'https://generativelanguage.googleapis.com/v1beta/models';
private readonly API_URL = 'https://generativelanguage.googleapis.com/v1beta/models/gemini-pro:generateContent';
async callModel(config: AgentConfig, options: ModelRequestOptions): Promise<ModelResponse> {
const response = await fetch(`${this.API_URL}/${config.model}:generateContent?key=${config.apiKey}`, {
const response = await fetch(`${this.API_URL}?key=${config.apiKey}`, {
method: 'POST',
headers: {
'Content-Type': 'application/json'
@@ -32,7 +32,7 @@ export class GoogleProvider implements ModelProvider {
const data = await response.json();
return {
response: data.candidates[0].content.parts[0].text,
tokens: data.usage.totalTokens,
tokens: data.usage?.totalTokens || 0,
model: config.model
};
}

View File

@@ -2,7 +2,7 @@ import { AgentConfig } from '../types/agent';
import { ModelProvider, ModelRequestOptions, ModelResponse } from '../types/model';
export class OpenAIProvider implements ModelProvider {
private readonly SUPPORTED_MODELS = ['gpt-4', 'gpt-3.5-turbo'];
private readonly SUPPORTED_MODELS = ['gpt-4o'];
private readonly API_URL = 'https://api.openai.com/v1/chat/completions';
async callModel(config: AgentConfig, options: ModelRequestOptions): Promise<ModelResponse> {

View File

@@ -17,10 +17,9 @@ export interface ModelResponse {
}
export const DEFAULT_MODEL_CONFIGS = {
'gpt-4': { provider: 'openai' },
'gpt-3.5-turbo': { provider: 'openai' },
'claude-3-sonnet': { provider: 'anthropic' },
'claude-3-opus': { provider: 'anthropic' },
'gemini-pro': { provider: 'google' },
'grok-2-latest': { provider: 'xai' }
} as const;
'gpt-4o': { provider: 'openai' },
'claude': { provider: 'anthropic' },
'gemini': { provider: 'google' },
'grok': { provider: 'xai' },
'deepseek': { provider: 'deepseek' }
} as const;