diff --git a/README.md b/README.md index 1ef3fe8..74c4229 100644 --- a/README.md +++ b/README.md @@ -106,7 +106,7 @@ Create a `.env` file and add OpenCommit config variables there like this: ```env ... -OCO_AI_PROVIDER= +OCO_AI_PROVIDER= OCO_API_KEY= // or other LLM provider API token OCO_API_URL= OCO_TOKENS_MAX_INPUT= diff --git a/src/commands/config.ts b/src/commands/config.ts index 5381a8d..ea7e370 100644 --- a/src/commands/config.ts +++ b/src/commands/config.ts @@ -128,6 +128,10 @@ export const MODEL_LIST = { 'mistral-embed', 'mistral-moderation-2411', 'mistral-moderation-latest', + ], + deepseek : [ + 'deepseek-chat', + 'deepseek-reasoner', ] }; @@ -145,6 +149,8 @@ const getDefaultModel = (provider: string | undefined): string => { return MODEL_LIST.groq[0]; case 'mistral': return MODEL_LIST.mistral[0]; + case 'deepseek': + return MODEL_LIST.deepseek[0]; default: return MODEL_LIST.openai[0]; } @@ -184,7 +190,7 @@ export const configValidators = { validateConfig( 'OCO_API_KEY', value, - 'You need to provide the OCO_API_KEY when OCO_AI_PROVIDER set to "openai" (default) or "ollama" or "mlx" or "azure" or "gemini" or "flowise" or "anthropic". Run `oco config set OCO_API_KEY=your_key OCO_AI_PROVIDER=openai`' + 'You need to provide the OCO_API_KEY when OCO_AI_PROVIDER set to "openai" (default) or "ollama" or "mlx" or "azure" or "gemini" or "flowise" or "anthropic" or "deepseek". Run `oco config set OCO_API_KEY=your_key OCO_AI_PROVIDER=openai`' ); return value; @@ -307,9 +313,10 @@ export const configValidators = { 'azure', 'test', 'flowise', - 'groq' + 'groq', + 'deepseek' ].includes(value) || value.startsWith('ollama'), - `${value} is not supported yet, use 'ollama', 'mlx', 'anthropic', 'azure', 'gemini', 'flowise', 'mistral' or 'openai' (default)` + `${value} is not supported yet, use 'ollama', 'mlx', 'anthropic', 'azure', 'gemini', 'flowise', 'mistral', 'deepseek' or 'openai' (default)` ); return value; @@ -356,7 +363,8 @@ export enum OCO_AI_PROVIDER_ENUM { FLOWISE = 'flowise', GROQ = 'groq', MISTRAL = 'mistral', - MLX = 'mlx' + MLX = 'mlx', + DEEPSEEK = 'deepseek' } export type ConfigType = { diff --git a/src/engine/deepseek.ts b/src/engine/deepseek.ts new file mode 100644 index 0000000..9424a9a --- /dev/null +++ b/src/engine/deepseek.ts @@ -0,0 +1,60 @@ +import axios from 'axios'; +import { OpenAI } from 'openai'; +import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff'; +import { tokenCount } from '../utils/tokenCount'; +import { OpenAiEngine, OpenAiConfig } from './openAI'; + +export interface DeepseekConfig extends OpenAiConfig {} + +export class DeepseekEngine extends OpenAiEngine { + constructor(config: DeepseekConfig) { + // Call OpenAIEngine constructor with forced Deepseek baseURL + super({ + ...config, + baseURL: 'https://api.deepseek.com/v1' + }); + } + + // Identical method from OpenAiEngine, re-implemented here + public generateCommitMessage = async ( + messages: Array + ): Promise => { + const params = { + model: this.config.model, + messages, + temperature: 0, + top_p: 0.1, + max_tokens: this.config.maxTokensOutput + }; + + try { + const REQUEST_TOKENS = messages + .map((msg) => tokenCount(msg.content as string) + 4) + .reduce((a, b) => a + b, 0); + + if ( + REQUEST_TOKENS > + this.config.maxTokensInput - this.config.maxTokensOutput + ) + throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens); + + const completion = await this.client.chat.completions.create(params); + + const message = completion.choices[0].message; + + return message?.content; + } catch (error) { + const err = error as Error; + if ( + axios.isAxiosError<{ error?: { message: string } }>(error) && + error.response?.status === 401 + ) { + const openAiError = error.response.data.error; + + if (openAiError) throw new Error(openAiError.message); + } + + throw err; + } + }; +} diff --git a/src/utils/engine.ts b/src/utils/engine.ts index 481a9f9..3137a05 100644 --- a/src/utils/engine.ts +++ b/src/utils/engine.ts @@ -10,6 +10,7 @@ import { MistralAiEngine } from '../engine/mistral'; import { TestAi, TestMockType } from '../engine/testAi'; import { GroqEngine } from '../engine/groq'; import { MLXEngine } from '../engine/mlx'; +import { DeepseekEngine } from '../engine/deepseek'; export function getEngine(): AiEngine { const config = getConfig(); @@ -51,6 +52,9 @@ export function getEngine(): AiEngine { case OCO_AI_PROVIDER_ENUM.MLX: return new MLXEngine(DEFAULT_CONFIG); + case OCO_AI_PROVIDER_ENUM.DEEPSEEK: + return new DeepseekEngine(DEFAULT_CONFIG); + default: return new OpenAiEngine(DEFAULT_CONFIG); }