feat(models): added llama4 groq

This commit is contained in:
Waleed Latif
2025-04-06 01:58:35 -07:00
parent d728ae6712
commit 47ad2bae85
3 changed files with 13 additions and 1 deletions

View File

@@ -10,7 +10,12 @@ export const groqProvider: ProviderConfig = {
name: 'Groq',
description: "Groq's LLM models with high-performance inference",
version: '1.0.0',
models: ['groq/llama-3.3-70b-specdec', 'groq/deepseek-r1-distill-llama-70b', 'groq/qwen-2.5-32b'],
models: [
'groq/llama-3.3-70b-specdec',
'groq/meta-llama/llama-4-scout-17b-16e-instruct',
'groq/deepseek-r1-distill-llama-70b',
'groq/qwen-2.5-32b',
],
defaultModel: 'groq/llama-3.3-70b-specdec',
executeRequest: async (request: ProviderRequest): Promise<ProviderResponse> => {

View File

@@ -92,6 +92,12 @@ const modelPricing: ModelPricingMap = {
output: 0.99,
updatedAt: '2025-03-21',
},
'groq/meta-llama/llama-4-scout-17b-16e-instruct': {
input: 0.4,
cachedInput: 0.2,
output: 0.6,
updatedAt: '2025-04-06',
},
'groq/deepseek-r1-distill-llama-70b': {
input: 0.75,
cachedInput: 0.38,

View File

@@ -56,6 +56,7 @@ export const providers: Record<
groq: {
...groqProvider,
models: [
'groq/meta-llama/llama-4-scout-17b-16e-instruct',
'groq/llama-3.3-70b-specdec',
'groq/deepseek-r1-distill-llama-70b',
'groq/qwen-2.5-32b',