diff --git a/README.md b/README.md index bb0fafe..d2cbe37 100644 --- a/README.md +++ b/README.md @@ -245,7 +245,13 @@ If you are behind a proxy, you can set it in the config: oco config set OCO_PROXY=http://127.0.0.1:7890 ``` -Or it will automatically use `HTTPS_PROXY` or `HTTP_PROXY` environment variables. +If `OCO_PROXY` is unset, OpenCommit will automatically use `HTTPS_PROXY` or `HTTP_PROXY` environment variables. + +To explicitly disable proxy use for OpenCommit, even when those environment variables are set: + +```sh +oco config set OCO_PROXY=null +``` ### Locale configuration diff --git a/src/cli.ts b/src/cli.ts index 80b1e22..0522129 100755 --- a/src/cli.ts +++ b/src/cli.ts @@ -8,7 +8,7 @@ import { commitlintConfigCommand } from './commands/commitlint'; import { configCommand, getConfig } from './commands/config'; import { hookCommand, isHookCalled } from './commands/githook.js'; import { prepareCommitMessageHook } from './commands/prepare-commit-msg-hook'; -import { setupProxy } from './utils/proxy'; +import { resolveProxy, setupProxy } from './utils/proxy'; import { setupCommand, isFirstRun, @@ -20,7 +20,7 @@ import { checkIsLatestVersion } from './utils/checkIsLatestVersion'; import { runMigrations } from './migrations/_run.js'; const config = getConfig(); -setupProxy(config.OCO_PROXY); +setupProxy(resolveProxy(config.OCO_PROXY)); const OCO_FLAGS_WITH_VALUE = new Set(['-c', '--context']); const OCO_BOOLEAN_FLAGS = new Set(['-y', '--yes', '--fgm']); diff --git a/src/commands/commit.ts b/src/commands/commit.ts index 98047d9..b2060c6 100644 --- a/src/commands/commit.ts +++ b/src/commands/commit.ts @@ -249,7 +249,9 @@ ${chalk.grey('——————————————————')}` const errorConfig = getConfig(); const provider = errorConfig.OCO_AI_PROVIDER || 'openai'; - const formatted = formatUserFriendlyError(error, provider); + const formatted = formatUserFriendlyError(error, provider, { + baseURL: errorConfig.OCO_API_URL + }); outro(printFormattedError(formatted)); process.exit(1); diff --git a/src/commands/config.ts b/src/commands/config.ts index 95fc8d7..cff653f 100644 --- a/src/commands/config.ts +++ b/src/commands/config.ts @@ -723,7 +723,8 @@ export const configValidators = { [CONFIG_KEYS.OCO_API_URL](value: any) { validateConfig( CONFIG_KEYS.OCO_API_URL, - typeof value === 'string', + typeof value === 'string' && + /^(https?:\/\/)/.test(value), `${value} is not a valid URL. It should start with 'http://' or 'https://'.` ); return value; @@ -732,7 +733,8 @@ export const configValidators = { [CONFIG_KEYS.OCO_PROXY](value: any) { validateConfig( CONFIG_KEYS.OCO_PROXY, - typeof value === 'string', + value === null || + (typeof value === 'string' && /^(https?:\/\/)/.test(value)), `${value} is not a valid URL. It should start with 'http://' or 'https://'.` ); return value; @@ -900,7 +902,7 @@ export type ConfigType = { [CONFIG_KEYS.OCO_TOKENS_MAX_INPUT]: number; [CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT]: number; [CONFIG_KEYS.OCO_API_URL]?: string; - [CONFIG_KEYS.OCO_PROXY]?: string; + [CONFIG_KEYS.OCO_PROXY]?: string | null; [CONFIG_KEYS.OCO_API_CUSTOM_HEADERS]?: string; [CONFIG_KEYS.OCO_DESCRIPTION]: boolean; [CONFIG_KEYS.OCO_EMOJI]: boolean; @@ -986,10 +988,7 @@ const getEnvConfig = (envPath: string) => { return { OCO_MODEL: process.env.OCO_MODEL, OCO_API_URL: process.env.OCO_API_URL, - OCO_PROXY: - process.env.OCO_PROXY || - process.env.HTTPS_PROXY || - process.env.HTTP_PROXY, + OCO_PROXY: process.env.OCO_PROXY, OCO_API_KEY: process.env.OCO_API_KEY, OCO_API_CUSTOM_HEADERS: process.env.OCO_API_CUSTOM_HEADERS, OCO_AI_PROVIDER: process.env.OCO_AI_PROVIDER as OCO_AI_PROVIDER_ENUM, @@ -1027,16 +1026,13 @@ export const getIsGlobalConfigFileExist = ( }; export const getGlobalConfig = (configPath: string = defaultConfigPath) => { - let globalConfig: ConfigType; - const isGlobalConfigFileExist = getIsGlobalConfigFileExist(configPath); - if (!isGlobalConfigFileExist) globalConfig = initGlobalConfig(configPath); - else { - const configFile = readFileSync(configPath, 'utf8'); - globalConfig = iniParse(configFile) as ConfigType; + if (!isGlobalConfigFileExist) { + return { ...DEFAULT_CONFIG }; } - return globalConfig; + const configFile = readFileSync(configPath, 'utf8'); + return iniParse(configFile) as ConfigType; }; /** @@ -1049,7 +1045,10 @@ export const getGlobalConfig = (configPath: string = defaultConfigPath) => { const mergeConfigs = (main: Partial, fallback: ConfigType) => { const allKeys = new Set([...Object.keys(main), ...Object.keys(fallback)]); return Array.from(allKeys).reduce((acc, key) => { - acc[key] = parseConfigVarValue(main[key] ?? fallback[key]); + const mainValue = main[key]; + acc[key] = parseConfigVarValue( + mainValue !== undefined ? mainValue : fallback[key] + ); return acc; }, {} as ConfigType); }; @@ -1218,7 +1217,10 @@ function getConfigKeyDetails(key) { case CONFIG_KEYS.OCO_PROXY: return { description: 'HTTP/HTTPS Proxy URL', - values: ["URL string (must start with 'http://' or 'https://')"] + values: [ + "URL string (must start with 'http://' or 'https://')", + 'null (disable proxy even when HTTP_PROXY/HTTPS_PROXY are set)' + ] }; case CONFIG_KEYS.OCO_MESSAGE_TEMPLATE_PLACEHOLDER: return { diff --git a/src/commands/setup.ts b/src/commands/setup.ts index 7f044e5..28d4029 100644 --- a/src/commands/setup.ts +++ b/src/commands/setup.ts @@ -427,22 +427,23 @@ export async function runSetup(): Promise { } export function isFirstRun(): boolean { + const hasGlobalConfig = getIsGlobalConfigFileExist(); const config = getConfig(); - // Check if API key is missing for providers that need it const provider = config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI; - if (MODEL_REQUIRED_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) { - // For Ollama/MLX, check if model is set - return !config.OCO_MODEL; - } - if (provider === OCO_AI_PROVIDER_ENUM.TEST) { return false; } - // For other providers, check if API key is set - return !config.OCO_API_KEY; + const hasRequiredConfig = MODEL_REQUIRED_PROVIDERS.includes( + provider as OCO_AI_PROVIDER_ENUM + ) + ? Boolean(config.OCO_MODEL) + : Boolean(config.OCO_API_KEY); + + // Trigger the full setup wizard only when nothing usable was configured yet. + return !hasGlobalConfig && !hasRequiredConfig; } export async function promptForMissingApiKey(): Promise { diff --git a/src/engine/Engine.ts b/src/engine/Engine.ts index 0ddaa8d..95fbc5d 100644 --- a/src/engine/Engine.ts +++ b/src/engine/Engine.ts @@ -11,7 +11,7 @@ export interface AiEngineConfig { maxTokensOutput: number; maxTokensInput: number; baseURL?: string; - proxy?: string; + proxy?: string | null; customHeaders?: Record; ollamaThink?: boolean; } diff --git a/src/engine/anthropic.ts b/src/engine/anthropic.ts index 0ceee8d..3716a24 100644 --- a/src/engine/anthropic.ts +++ b/src/engine/anthropic.ts @@ -5,8 +5,8 @@ import { MessageParam } from '@anthropic-ai/sdk/resources/messages.mjs'; import { OpenAI } from 'openai'; -import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff'; import { normalizeEngineError } from '../utils/engineErrorHandler'; +import { GenerateCommitMessageErrorEnum } from '../utils/generateCommitMessageErrors'; import { removeContentTags } from '../utils/removeContentTags'; import { tokenCount } from '../utils/tokenCount'; import { AiEngine, AiEngineConfig } from './Engine'; @@ -21,8 +21,7 @@ export class AnthropicEngine implements AiEngine { this.config = config; const clientOptions: any = { apiKey: this.config.apiKey }; - const proxy = - config.proxy || process.env.HTTPS_PROXY || process.env.HTTP_PROXY; + const proxy = config.proxy; if (proxy) { clientOptions.httpAgent = new HttpsProxyAgent(proxy); } diff --git a/src/engine/azure.ts b/src/engine/azure.ts index d7eec21..b9bf281 100644 --- a/src/engine/azure.ts +++ b/src/engine/azure.ts @@ -3,8 +3,8 @@ import { OpenAIClient as AzureOpenAIClient } from '@azure/openai'; import { OpenAI } from 'openai'; -import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff'; import { normalizeEngineError } from '../utils/engineErrorHandler'; +import { GenerateCommitMessageErrorEnum } from '../utils/generateCommitMessageErrors'; import { removeContentTags } from '../utils/removeContentTags'; import { tokenCount } from '../utils/tokenCount'; import { AiEngine, AiEngineConfig } from './Engine'; diff --git a/src/engine/deepseek.ts b/src/engine/deepseek.ts index c1794d3..cec122b 100644 --- a/src/engine/deepseek.ts +++ b/src/engine/deepseek.ts @@ -1,6 +1,6 @@ import { OpenAI } from 'openai'; -import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff'; import { normalizeEngineError } from '../utils/engineErrorHandler'; +import { GenerateCommitMessageErrorEnum } from '../utils/generateCommitMessageErrors'; import { removeContentTags } from '../utils/removeContentTags'; import { tokenCount } from '../utils/tokenCount'; import { OpenAiEngine, OpenAiConfig } from './openAi'; diff --git a/src/engine/mistral.ts b/src/engine/mistral.ts index ebdfa54..cfe6197 100644 --- a/src/engine/mistral.ts +++ b/src/engine/mistral.ts @@ -1,7 +1,8 @@ import { OpenAI } from 'openai'; import { HttpsProxyAgent } from 'https-proxy-agent'; -import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff'; +import { createRequire } from 'module'; import { normalizeEngineError } from '../utils/engineErrorHandler'; +import { GenerateCommitMessageErrorEnum } from '../utils/generateCommitMessageErrors'; import { removeContentTags } from '../utils/removeContentTags'; import { tokenCount } from '../utils/tokenCount'; import { AiEngine, AiEngineConfig } from './Engine'; @@ -10,8 +11,14 @@ import { AiEngine, AiEngineConfig } from './Engine'; export interface MistralAiConfig extends AiEngineConfig {} export type MistralCompletionMessageParam = Array; -// Import Mistral dynamically to avoid TS errors -// eslint-disable-next-line @typescript-eslint/no-var-requires +let require: NodeRequire; + +try { + require = createRequire(__filename); +} catch { + require = createRequire(import.meta.url); +} + const Mistral = require('@mistralai/mistralai').Mistral; export class MistralAiEngine implements AiEngine { diff --git a/src/engine/openAi.ts b/src/engine/openAi.ts index 3c407c3..f057e4c 100644 --- a/src/engine/openAi.ts +++ b/src/engine/openAi.ts @@ -1,8 +1,8 @@ import { OpenAI } from 'openai'; import { HttpsProxyAgent } from 'https-proxy-agent'; -import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff'; -import { parseCustomHeaders } from '../utils/engine'; +import { parseCustomHeaders } from '../utils/customHeaders'; import { normalizeEngineError } from '../utils/engineErrorHandler'; +import { GenerateCommitMessageErrorEnum } from '../utils/generateCommitMessageErrors'; import { removeContentTags } from '../utils/removeContentTags'; import { tokenCount } from '../utils/tokenCount'; import { AiEngine, AiEngineConfig } from './Engine'; @@ -24,8 +24,7 @@ export class OpenAiEngine implements AiEngine { clientOptions.baseURL = config.baseURL; } - const proxy = - config.proxy || process.env.HTTPS_PROXY || process.env.HTTP_PROXY; + const proxy = config.proxy; if (proxy) { clientOptions.httpAgent = new HttpsProxyAgent(proxy); } diff --git a/src/generateCommitMessageFromGitDiff.ts b/src/generateCommitMessageFromGitDiff.ts index 9e5f071..2f1f7c6 100644 --- a/src/generateCommitMessageFromGitDiff.ts +++ b/src/generateCommitMessageFromGitDiff.ts @@ -16,6 +16,7 @@ import { getSuggestedModels, ModelNotFoundError } from './utils/errors'; +import { GenerateCommitMessageErrorEnum } from './utils/generateCommitMessageErrors'; import { mergeDiffs } from './utils/mergeDiffs'; import { tokenCount } from './utils/tokenCount'; @@ -43,13 +44,6 @@ const generateCommitMessageChatCompletionPrompt = async ( return chatContextAsCompletionRequest; }; -export enum GenerateCommitMessageErrorEnum { - tooMuchTokens = 'TOO_MUCH_TOKENS', - internalError = 'INTERNAL_ERROR', - emptyMessage = 'EMPTY_MESSAGE', - outputTokensTooHigh = `Token limit exceeded, OCO_TOKENS_MAX_OUTPUT must not be much higher than the default ${DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT} tokens.` -} - async function handleModelNotFoundError( error: Error, provider: string, diff --git a/src/utils/customHeaders.ts b/src/utils/customHeaders.ts new file mode 100644 index 0000000..6ec86e3 --- /dev/null +++ b/src/utils/customHeaders.ts @@ -0,0 +1,21 @@ +export function parseCustomHeaders(headers: any): Record { + let parsedHeaders = {}; + + if (!headers) { + return parsedHeaders; + } + + try { + if (typeof headers === 'object' && !Array.isArray(headers)) { + parsedHeaders = headers; + } else { + parsedHeaders = JSON.parse(headers); + } + } catch { + console.warn( + 'Invalid OCO_API_CUSTOM_HEADERS format, ignoring custom headers' + ); + } + + return parsedHeaders; +} diff --git a/src/utils/engine.ts b/src/utils/engine.ts index 436f7a1..0a07fe4 100644 --- a/src/utils/engine.ts +++ b/src/utils/engine.ts @@ -13,41 +13,22 @@ import { MLXEngine } from '../engine/mlx'; import { DeepseekEngine } from '../engine/deepseek'; import { AimlApiEngine } from '../engine/aimlapi'; import { OpenRouterEngine } from '../engine/openrouter'; - -export function parseCustomHeaders(headers: any): Record { - let parsedHeaders = {}; - - if (!headers) { - return parsedHeaders; - } - - try { - if (typeof headers === 'object' && !Array.isArray(headers)) { - parsedHeaders = headers; - } else { - parsedHeaders = JSON.parse(headers); - } - } catch (error) { - console.warn( - 'Invalid OCO_API_CUSTOM_HEADERS format, ignoring custom headers' - ); - } - - return parsedHeaders; -} +import { parseCustomHeaders } from './customHeaders'; +import { resolveProxy } from './proxy'; export function getEngine(): AiEngine { const config = getConfig(); const provider = config.OCO_AI_PROVIDER; const customHeaders = parseCustomHeaders(config.OCO_API_CUSTOM_HEADERS); + const resolvedProxy = resolveProxy(config.OCO_PROXY); const DEFAULT_CONFIG = { model: config.OCO_MODEL!, maxTokensOutput: config.OCO_TOKENS_MAX_OUTPUT!, maxTokensInput: config.OCO_TOKENS_MAX_INPUT!, baseURL: config.OCO_API_URL!, - proxy: config.OCO_PROXY!, + proxy: resolvedProxy, apiKey: config.OCO_API_KEY!, customHeaders }; diff --git a/src/utils/errors.ts b/src/utils/errors.ts index 1fd9887..ae988e3 100644 --- a/src/utils/errors.ts +++ b/src/utils/errors.ts @@ -349,10 +349,44 @@ export interface FormattedError { suggestion: string | null; } +export interface ErrorFormattingContext { + baseURL?: string; +} + +function getCustomEndpointLabel(baseURL?: string): string | null { + if (!baseURL) { + return null; + } + + try { + return new URL(baseURL).host; + } catch { + return null; + } +} + +function getServiceUnavailableMessage( + provider: string, + context?: ErrorFormattingContext +): string { + const endpointLabel = getCustomEndpointLabel(context?.baseURL); + + if (endpointLabel) { + return `The configured API endpoint (${endpointLabel}) is temporarily unavailable.`; + } + + if (context?.baseURL) { + return 'The configured API endpoint is temporarily unavailable.'; + } + + return `The ${provider} service is temporarily unavailable.`; +} + // Format an error into a user-friendly structure export function formatUserFriendlyError( error: unknown, - provider: string + provider: string, + context?: ErrorFormattingContext ): FormattedError { const billingUrl = PROVIDER_BILLING_URLS[provider] || null; @@ -381,7 +415,7 @@ export function formatUserFriendlyError( if (error instanceof ServiceUnavailableError) { return { title: 'Service Unavailable', - message: `The ${provider} service is temporarily unavailable.`, + message: getServiceUnavailableMessage(provider, context), helpUrl: null, suggestion: 'Please try again in a few moments.' }; @@ -427,7 +461,7 @@ export function formatUserFriendlyError( if (isServiceUnavailableError(error)) { return { title: 'Service Unavailable', - message: `The ${provider} service is temporarily unavailable.`, + message: getServiceUnavailableMessage(provider, context), helpUrl: null, suggestion: 'Please try again in a few moments.' }; diff --git a/src/utils/generateCommitMessageErrors.ts b/src/utils/generateCommitMessageErrors.ts new file mode 100644 index 0000000..2400b0e --- /dev/null +++ b/src/utils/generateCommitMessageErrors.ts @@ -0,0 +1,8 @@ +import { DEFAULT_TOKEN_LIMITS } from '../commands/config'; + +export enum GenerateCommitMessageErrorEnum { + tooMuchTokens = 'TOO_MUCH_TOKENS', + internalError = 'INTERNAL_ERROR', + emptyMessage = 'EMPTY_MESSAGE', + outputTokensTooHigh = `Token limit exceeded, OCO_TOKENS_MAX_OUTPUT must not be much higher than the default ${DEFAULT_TOKEN_LIMITS.DEFAULT_MAX_TOKENS_OUTPUT} tokens.` +} diff --git a/src/utils/git.ts b/src/utils/git.ts index 902d61a..c834151 100644 --- a/src/utils/git.ts +++ b/src/utils/git.ts @@ -93,36 +93,34 @@ export const gitAdd = async ({ files }: { files: string[] }) => { gitAddSpinner.stop(`Staged ${files.length} files`); }; +const isFileExcludedFromDiff = (file: string) => + file.includes('.lock') || + file.includes('-lock.') || + file.includes('.svg') || + file.includes('.png') || + file.includes('.jpg') || + file.includes('.jpeg') || + file.includes('.webp') || + file.includes('.gif'); + export const getDiff = async ({ files }: { files: string[] }) => { const gitDir = await getGitDir(); - const lockFiles = files.filter( - (file) => - file.includes('.lock') || - file.includes('-lock.') || - file.includes('.svg') || - file.includes('.png') || - file.includes('.jpg') || - file.includes('.jpeg') || - file.includes('.webp') || - file.includes('.gif') - ); + const excludedFiles = files.filter(isFileExcludedFromDiff); - if (lockFiles.length) { + if (excludedFiles.length) { outro( - `Some files are excluded by default from 'git diff'. No commit messages are generated for this files:\n${lockFiles.join( + `Some files are excluded by default from 'git diff'. No commit messages are generated for this files:\n${excludedFiles.join( '\n' )}` ); } - const filesWithoutLocks = files.filter( - (file) => !file.includes('.lock') && !file.includes('-lock.') - ); + const diffableFiles = files.filter((file) => !isFileExcludedFromDiff(file)); const { stdout: diff } = await execa( 'git', - ['diff', '--staged', '--', ...filesWithoutLocks], + ['diff', '--staged', '--', ...diffableFiles], { cwd: gitDir } ); diff --git a/src/utils/proxy.ts b/src/utils/proxy.ts index dea840a..f2ae46e 100644 --- a/src/utils/proxy.ts +++ b/src/utils/proxy.ts @@ -1,21 +1,56 @@ -import { setGlobalDispatcher, ProxyAgent } from 'undici'; import axios from 'axios'; import { HttpsProxyAgent } from 'https-proxy-agent'; +import { + Agent, + ProxyAgent, + setGlobalDispatcher +} from 'undici'; -export function setupProxy(proxyUrl?: string) { - const proxy = proxyUrl || process.env.HTTPS_PROXY || process.env.HTTP_PROXY; - if (proxy) { - try { - // Set global dispatcher for undici (affects globalThis.fetch used by Gemini and others) - const dispatcher = new ProxyAgent(proxy); - setGlobalDispatcher(dispatcher); +export type ProxySetting = string | null | undefined; - // Set axios global agent - const agent = new HttpsProxyAgent(proxy); - axios.defaults.httpsAgent = agent; - axios.defaults.proxy = false; // Disable axios built-in proxy handling to use agent - } catch (error) { - console.warn(`[Proxy Error] Failed to set proxy: ${error.message}`); +export function resolveProxy(proxySetting?: ProxySetting): ProxySetting { + if (proxySetting === null) { + return null; + } + + if (typeof proxySetting === 'string' && proxySetting.trim().length > 0) { + return proxySetting; + } + + return process.env.HTTPS_PROXY || process.env.HTTP_PROXY; +} + +function resetProxySetup(disableEnvProxy: boolean) { + setGlobalDispatcher(new Agent()); + axios.defaults.httpAgent = undefined; + axios.defaults.httpsAgent = undefined; + axios.defaults.proxy = disableEnvProxy ? false : undefined; +} + +export function setupProxy(proxySetting?: ProxySetting) { + try { + if (proxySetting === null) { + resetProxySetup(true); + return; } + + resetProxySetup(false); + + if (!proxySetting) { + return; + } + + // Set global dispatcher for undici (affects globalThis.fetch used by Gemini and others) + const dispatcher = new ProxyAgent(proxySetting); + setGlobalDispatcher(dispatcher); + + // Set axios global agents and disable axios built-in proxy handling. + const agent = new HttpsProxyAgent(proxySetting); + axios.defaults.httpAgent = agent; + axios.defaults.httpsAgent = agent; + axios.defaults.proxy = false; + } catch (error) { + const message = error instanceof Error ? error.message : String(error); + console.warn(`[Proxy Error] Failed to set proxy: ${message}`); } } diff --git a/test/unit/config.test.ts b/test/unit/config.test.ts index fc4709d..5b3e44f 100644 --- a/test/unit/config.test.ts +++ b/test/unit/config.test.ts @@ -199,6 +199,48 @@ describe('config', () => { expect(config).not.toEqual(null); expect(config.OCO_API_KEY).toEqual(undefined); }); + + it('should not create a global config file when only reading defaults', async () => { + globalConfigFile = await generateConfig('.opencommit', {}); + rmSync(globalConfigFile.filePath); + + const config = getConfig({ + globalPath: globalConfigFile.filePath + }); + + expect(config.OCO_MODEL).toEqual(DEFAULT_CONFIG.OCO_MODEL); + expect(existsSync(globalConfigFile.filePath)).toBe(false); + }); + + it('should not materialize ambient proxy env vars into OCO_PROXY', async () => { + process.env.HTTPS_PROXY = 'http://127.0.0.1:7890'; + + globalConfigFile = await generateConfig('.opencommit', {}); + envConfigFile = await generateConfig('.env', {}); + + const config = getConfig({ + globalPath: globalConfigFile.filePath, + envPath: envConfigFile.filePath + }); + + expect(config.OCO_PROXY).toEqual(undefined); + }); + + it('should parse OCO_PROXY=null from local .env as explicit disable', async () => { + globalConfigFile = await generateConfig('.opencommit', { + OCO_PROXY: 'http://global-proxy:8080' + }); + envConfigFile = await generateConfig('.env', { + OCO_PROXY: 'null' + }); + + const config = getConfig({ + globalPath: globalConfigFile.filePath, + envPath: envConfigFile.filePath + }); + + expect(config.OCO_PROXY).toEqual(null); + }); }); describe('setConfig', () => { @@ -325,5 +367,20 @@ describe('config', () => { const fileContent2 = readFileSync(globalConfigFile.filePath, 'utf8'); expect(fileContent2).toContain('OCO_MODEL=gpt-4'); }); + + it('should persist OCO_PROXY=null as an explicit disable', async () => { + await setConfig( + [[CONFIG_KEYS.OCO_PROXY, null]], + globalConfigFile.filePath + ); + + const config = getConfig({ + globalPath: globalConfigFile.filePath + }); + const fileContent = readFileSync(globalConfigFile.filePath, 'utf8'); + + expect(config.OCO_PROXY).toEqual(null); + expect(fileContent).toContain('OCO_PROXY=null'); + }); }); }); diff --git a/test/unit/errors.test.ts b/test/unit/errors.test.ts new file mode 100644 index 0000000..331a71b --- /dev/null +++ b/test/unit/errors.test.ts @@ -0,0 +1,29 @@ +import { + formatUserFriendlyError, + ServiceUnavailableError +} from '../../src/utils/errors'; + +describe('formatUserFriendlyError', () => { + it('should keep provider wording when no custom API URL is configured', () => { + const formatted = formatUserFriendlyError( + new ServiceUnavailableError('openai'), + 'openai' + ); + + expect(formatted.message).toEqual( + 'The openai service is temporarily unavailable.' + ); + }); + + it('should use configured endpoint wording when a custom API URL is provided', () => { + const formatted = formatUserFriendlyError( + new ServiceUnavailableError('openai'), + 'openai', + { baseURL: 'http://127.0.0.1:1234/v1' } + ); + + expect(formatted.message).toContain('configured API endpoint'); + expect(formatted.message).toContain('127.0.0.1:1234'); + expect(formatted.message).not.toContain('openai service'); + }); +}); diff --git a/test/unit/gemini.test.ts b/test/unit/gemini.test.ts index d7024f0..ed1d6a9 100644 --- a/test/unit/gemini.test.ts +++ b/test/unit/gemini.test.ts @@ -1,96 +1,65 @@ +import { OpenAI } from 'openai'; import { GeminiEngine } from '../../src/engine/gemini'; -import { GenerativeModel, GoogleGenerativeAI } from '@google/generative-ai'; -import { - ConfigType, - getConfig, - OCO_AI_PROVIDER_ENUM -} from '../../src/commands/config'; -import { OpenAI } from 'openai'; - -describe('Gemini', () => { - let gemini: GeminiEngine; - let mockConfig: ConfigType; - let mockGoogleGenerativeAi: GoogleGenerativeAI; - let mockGenerativeModel: GenerativeModel; - let mockExit: jest.SpyInstance; - - const noop: (...args: any[]) => any = (...args: any[]) => {}; - - const mockGemini = () => { - mockConfig = getConfig() as ConfigType; - - gemini = new GeminiEngine({ - apiKey: mockConfig.OCO_API_KEY, - model: mockConfig.OCO_MODEL +describe('GeminiEngine', () => { + it('maps OpenAI-style chat messages into Gemini request payloads', async () => { + const engine = new GeminiEngine({ + apiKey: 'mock-api-key', + model: 'gemini-1.5-flash', + baseURL: 'http://127.0.0.1:8080/v1', + maxTokensOutput: 256, + maxTokensInput: 4096 }); - }; - const oldEnv = process.env; - - beforeEach(() => { - jest.resetModules(); - process.env = { ...oldEnv }; - - jest.mock('@google/generative-ai'); - jest.mock('../src/commands/config'); - - jest.mock('@clack/prompts', () => ({ - intro: jest.fn(), - outro: jest.fn() - })); - - mockExit = jest.spyOn(process, 'exit').mockImplementation(); - - mockConfig = getConfig() as ConfigType; - - mockConfig.OCO_AI_PROVIDER = OCO_AI_PROVIDER_ENUM.GEMINI; - mockConfig.OCO_API_KEY = 'mock-api-key'; - mockConfig.OCO_MODEL = 'gemini-1.5-flash'; - - mockGoogleGenerativeAi = new GoogleGenerativeAI(mockConfig.OCO_API_KEY); - mockGenerativeModel = mockGoogleGenerativeAi.getGenerativeModel({ - model: mockConfig.OCO_MODEL + const generateContent = jest.fn().mockResolvedValue({ + response: { + text: () => 'feat(gemini): translate the diffhidden' + } + }); + const getGenerativeModel = jest.fn().mockReturnValue({ + generateContent }); - }); - afterEach(() => { - gemini = undefined as any; - }); + engine.client = { + getGenerativeModel + } as any; - afterAll(() => { - mockExit.mockRestore(); - process.env = oldEnv; - }); + const messages: Array = [ + { role: 'system', content: 'system message' }, + { role: 'assistant', content: 'assistant guidance' }, + { role: 'user', content: 'diff --git a/file b/file' } + ]; - it.skip('should exit process if OCO_GEMINI_API_KEY is not set and command is not config', () => { - process.env.OCO_GEMINI_API_KEY = undefined; - process.env.OCO_AI_PROVIDER = 'gemini'; + const result = await engine.generateCommitMessage(messages); - mockGemini(); - - expect(mockExit).toHaveBeenCalledWith(1); - }); - - it('should generate commit message', async () => { - const mockGenerateContent = jest - .fn() - .mockResolvedValue({ response: { text: () => 'generated content' } }); - mockGenerativeModel.generateContent = mockGenerateContent; - - mockGemini(); - - const messages: Array = - [ - { role: 'system', content: 'system message' }, - { role: 'assistant', content: 'assistant message' } - ]; - - jest - .spyOn(gemini, 'generateCommitMessage') - .mockImplementation(async () => 'generated content'); - const result = await gemini.generateCommitMessage(messages); - - expect(result).toEqual('generated content'); + expect(result).toEqual('feat(gemini): translate the diff'); + expect(getGenerativeModel).toHaveBeenCalledWith( + { + model: 'gemini-1.5-flash', + systemInstruction: 'system message' + }, + { + baseUrl: 'http://127.0.0.1:8080/v1' + } + ); + expect(generateContent).toHaveBeenCalledWith( + expect.objectContaining({ + contents: [ + { + parts: [{ text: 'assistant guidance' }], + role: 'model' + }, + { + parts: [{ text: 'diff --git a/file b/file' }], + role: 'user' + } + ], + generationConfig: expect.objectContaining({ + maxOutputTokens: 256, + temperature: 0, + topP: 0.1 + }) + }) + ); }); }); diff --git a/test/unit/openAi.test.ts b/test/unit/openAi.test.ts index c1defd5..2ebeeb3 100644 --- a/test/unit/openAi.test.ts +++ b/test/unit/openAi.test.ts @@ -1,26 +1,71 @@ -// Test the reasoning model detection regex used in OpenAiEngine. -// Integration test with the engine is not possible because mistral.ts -// uses require() which is unavailable in the ESM test environment. -const REASONING_MODEL_RE = /^(o[1-9]|gpt-5)/; +import { OpenAI } from 'openai'; +import { OpenAiEngine } from '../../src/engine/openAi'; -describe('OpenAiEngine reasoning model detection', () => { - it.each([ - ['o1', true], - ['o1-preview', true], - ['o1-mini', true], - ['o3', true], - ['o3-mini', true], - ['o4-mini', true], - ['gpt-5', true], - ['gpt-5-nano', true], - ['gpt-4o', false], - ['gpt-4o-mini', false], - ['gpt-4', false], - ['gpt-3.5-turbo', false] - ])( - 'model "%s" isReasoning=%s', - (model, expected) => { - expect(REASONING_MODEL_RE.test(model)).toBe(expected); - } - ); +describe('OpenAiEngine', () => { + const baseConfig = { + apiKey: 'test-openai-key', + maxTokensInput: 4096, + maxTokensOutput: 256 + }; + + const messages: Array = [ + { role: 'system', content: 'system message' }, + { role: 'user', content: 'diff --git a/file b/file' } + ]; + + it('uses max_completion_tokens for reasoning models', async () => { + const engine = new OpenAiEngine({ + ...baseConfig, + model: 'o3-mini' + }); + + const create = jest + .spyOn(engine.client.chat.completions, 'create') + .mockResolvedValue({ + choices: [{ message: { content: 'feat(openai): reasoning path' } }] + } as any); + + await engine.generateCommitMessage(messages); + + expect(create).toHaveBeenCalledWith( + expect.objectContaining({ + model: 'o3-mini', + max_completion_tokens: 256 + }) + ); + expect(create).toHaveBeenCalledWith( + expect.not.objectContaining({ + max_tokens: expect.anything() + }) + ); + }); + + it('uses max_tokens and sampling params for non-reasoning models', async () => { + const engine = new OpenAiEngine({ + ...baseConfig, + model: 'gpt-4o-mini' + }); + + const create = jest + .spyOn(engine.client.chat.completions, 'create') + .mockResolvedValue({ + choices: [{ message: { content: 'feat(openai): standard path' } }] + } as any); + + await engine.generateCommitMessage(messages); + + expect(create).toHaveBeenCalledWith( + expect.objectContaining({ + model: 'gpt-4o-mini', + max_tokens: 256, + temperature: 0, + top_p: 0.1 + }) + ); + expect(create).toHaveBeenCalledWith( + expect.not.objectContaining({ + max_completion_tokens: expect.anything() + }) + ); + }); }); diff --git a/test/unit/proxy.test.ts b/test/unit/proxy.test.ts new file mode 100644 index 0000000..5b8a126 --- /dev/null +++ b/test/unit/proxy.test.ts @@ -0,0 +1,126 @@ +import axios from 'axios'; +import { getGlobalDispatcher } from 'undici'; +import { AnthropicEngine } from '../../src/engine/anthropic'; +import { OpenAiEngine } from '../../src/engine/openAi'; +import { resolveProxy, setupProxy } from '../../src/utils/proxy'; + +describe('proxy utilities', () => { + const originalEnv = { ...process.env }; + const originalAxiosProxy = axios.defaults.proxy; + const originalAxiosHttpAgent = axios.defaults.httpAgent; + const originalAxiosHttpsAgent = axios.defaults.httpsAgent; + + function resetEnv(env: NodeJS.ProcessEnv) { + Object.keys(process.env).forEach((key) => { + if (!(key in env)) { + delete process.env[key]; + } else { + process.env[key] = env[key]; + } + }); + } + + beforeEach(() => { + resetEnv(originalEnv); + setupProxy(undefined); + }); + + afterEach(() => { + resetEnv(originalEnv); + setupProxy(undefined); + axios.defaults.proxy = originalAxiosProxy; + axios.defaults.httpAgent = originalAxiosHttpAgent; + axios.defaults.httpsAgent = originalAxiosHttpsAgent; + }); + + it('should prefer an explicit proxy URL over ambient proxy env vars', () => { + process.env.HTTPS_PROXY = 'http://ambient-proxy:8080'; + + expect(resolveProxy('http://explicit-proxy:8080')).toEqual( + 'http://explicit-proxy:8080' + ); + }); + + it('should return null when proxy is explicitly disabled', () => { + process.env.HTTPS_PROXY = 'http://ambient-proxy:8080'; + + expect(resolveProxy(null)).toEqual(null); + }); + + it('should fall back to ambient proxy env vars when proxy is unset', () => { + process.env.HTTPS_PROXY = 'http://ambient-proxy:8080'; + + expect(resolveProxy(undefined)).toEqual('http://ambient-proxy:8080'); + }); + + it('should disable proxy usage when setupProxy receives null', () => { + process.env.HTTPS_PROXY = 'http://ambient-proxy:8080'; + + setupProxy(null); + + expect(getGlobalDispatcher().constructor.name).toEqual('Agent'); + expect(axios.defaults.proxy).toEqual(false); + expect(axios.defaults.httpAgent).toBeUndefined(); + expect(axios.defaults.httpsAgent).toBeUndefined(); + }); + + it('should install proxy agents when setupProxy receives a proxy URL', () => { + setupProxy('http://127.0.0.1:7890'); + + expect(getGlobalDispatcher().constructor.name).toEqual('ProxyAgent'); + expect(axios.defaults.proxy).toEqual(false); + expect(axios.defaults.httpAgent).toBeDefined(); + expect(axios.defaults.httpsAgent).toBeDefined(); + }); +}); + +describe('engine proxy handling', () => { + const originalEnv = { ...process.env }; + const baseConfig = { + apiKey: 'test-key', + model: 'gpt-4o-mini', + maxTokensInput: 4096, + maxTokensOutput: 256 + }; + + function resetEnv(env: NodeJS.ProcessEnv) { + Object.keys(process.env).forEach((key) => { + if (!(key in env)) { + delete process.env[key]; + } else { + process.env[key] = env[key]; + } + }); + } + + beforeEach(() => { + resetEnv(originalEnv); + }); + + afterEach(() => { + resetEnv(originalEnv); + }); + + it('should not let OpenAI engine re-read proxy env vars when proxy is unset', () => { + process.env.HTTPS_PROXY = 'http://ambient-proxy:8080'; + + const engine = new OpenAiEngine({ + ...baseConfig, + proxy: undefined + }); + + expect(engine.client.httpAgent).toBeUndefined(); + }); + + it('should not let Anthropic engine re-read proxy env vars when proxy is unset', () => { + process.env.HTTPS_PROXY = 'http://ambient-proxy:8080'; + + const engine = new AnthropicEngine({ + ...baseConfig, + model: 'claude-sonnet-4-20250514', + proxy: undefined + }); + + expect(engine.client.httpAgent).toBeUndefined(); + }); +});