fix(cli): tighten proxy and setup behavior

Support explicit proxy disabling and ambient proxy fallback without leaking env state into config.

Improve first-run detection, endpoint-specific error messaging, diff exclusions, and runtime helper boundaries covered by unit tests.
This commit is contained in:
di-sukharev
2026-04-10 15:16:11 +03:00
parent 7fa2384761
commit cf27085ac9
23 changed files with 527 additions and 214 deletions

View File

@@ -199,6 +199,48 @@ describe('config', () => {
expect(config).not.toEqual(null);
expect(config.OCO_API_KEY).toEqual(undefined);
});
it('should not create a global config file when only reading defaults', async () => {
globalConfigFile = await generateConfig('.opencommit', {});
rmSync(globalConfigFile.filePath);
const config = getConfig({
globalPath: globalConfigFile.filePath
});
expect(config.OCO_MODEL).toEqual(DEFAULT_CONFIG.OCO_MODEL);
expect(existsSync(globalConfigFile.filePath)).toBe(false);
});
it('should not materialize ambient proxy env vars into OCO_PROXY', async () => {
process.env.HTTPS_PROXY = 'http://127.0.0.1:7890';
globalConfigFile = await generateConfig('.opencommit', {});
envConfigFile = await generateConfig('.env', {});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config.OCO_PROXY).toEqual(undefined);
});
it('should parse OCO_PROXY=null from local .env as explicit disable', async () => {
globalConfigFile = await generateConfig('.opencommit', {
OCO_PROXY: 'http://global-proxy:8080'
});
envConfigFile = await generateConfig('.env', {
OCO_PROXY: 'null'
});
const config = getConfig({
globalPath: globalConfigFile.filePath,
envPath: envConfigFile.filePath
});
expect(config.OCO_PROXY).toEqual(null);
});
});
describe('setConfig', () => {
@@ -325,5 +367,20 @@ describe('config', () => {
const fileContent2 = readFileSync(globalConfigFile.filePath, 'utf8');
expect(fileContent2).toContain('OCO_MODEL=gpt-4');
});
it('should persist OCO_PROXY=null as an explicit disable', async () => {
await setConfig(
[[CONFIG_KEYS.OCO_PROXY, null]],
globalConfigFile.filePath
);
const config = getConfig({
globalPath: globalConfigFile.filePath
});
const fileContent = readFileSync(globalConfigFile.filePath, 'utf8');
expect(config.OCO_PROXY).toEqual(null);
expect(fileContent).toContain('OCO_PROXY=null');
});
});
});

29
test/unit/errors.test.ts Normal file
View File

@@ -0,0 +1,29 @@
import {
formatUserFriendlyError,
ServiceUnavailableError
} from '../../src/utils/errors';
describe('formatUserFriendlyError', () => {
it('should keep provider wording when no custom API URL is configured', () => {
const formatted = formatUserFriendlyError(
new ServiceUnavailableError('openai'),
'openai'
);
expect(formatted.message).toEqual(
'The openai service is temporarily unavailable.'
);
});
it('should use configured endpoint wording when a custom API URL is provided', () => {
const formatted = formatUserFriendlyError(
new ServiceUnavailableError('openai'),
'openai',
{ baseURL: 'http://127.0.0.1:1234/v1' }
);
expect(formatted.message).toContain('configured API endpoint');
expect(formatted.message).toContain('127.0.0.1:1234');
expect(formatted.message).not.toContain('openai service');
});
});

View File

@@ -1,96 +1,65 @@
import { OpenAI } from 'openai';
import { GeminiEngine } from '../../src/engine/gemini';
import { GenerativeModel, GoogleGenerativeAI } from '@google/generative-ai';
import {
ConfigType,
getConfig,
OCO_AI_PROVIDER_ENUM
} from '../../src/commands/config';
import { OpenAI } from 'openai';
describe('Gemini', () => {
let gemini: GeminiEngine;
let mockConfig: ConfigType;
let mockGoogleGenerativeAi: GoogleGenerativeAI;
let mockGenerativeModel: GenerativeModel;
let mockExit: jest.SpyInstance<never, [code?: number | undefined], any>;
const noop: (...args: any[]) => any = (...args: any[]) => {};
const mockGemini = () => {
mockConfig = getConfig() as ConfigType;
gemini = new GeminiEngine({
apiKey: mockConfig.OCO_API_KEY,
model: mockConfig.OCO_MODEL
describe('GeminiEngine', () => {
it('maps OpenAI-style chat messages into Gemini request payloads', async () => {
const engine = new GeminiEngine({
apiKey: 'mock-api-key',
model: 'gemini-1.5-flash',
baseURL: 'http://127.0.0.1:8080/v1',
maxTokensOutput: 256,
maxTokensInput: 4096
});
};
const oldEnv = process.env;
beforeEach(() => {
jest.resetModules();
process.env = { ...oldEnv };
jest.mock('@google/generative-ai');
jest.mock('../src/commands/config');
jest.mock('@clack/prompts', () => ({
intro: jest.fn(),
outro: jest.fn()
}));
mockExit = jest.spyOn(process, 'exit').mockImplementation();
mockConfig = getConfig() as ConfigType;
mockConfig.OCO_AI_PROVIDER = OCO_AI_PROVIDER_ENUM.GEMINI;
mockConfig.OCO_API_KEY = 'mock-api-key';
mockConfig.OCO_MODEL = 'gemini-1.5-flash';
mockGoogleGenerativeAi = new GoogleGenerativeAI(mockConfig.OCO_API_KEY);
mockGenerativeModel = mockGoogleGenerativeAi.getGenerativeModel({
model: mockConfig.OCO_MODEL
const generateContent = jest.fn().mockResolvedValue({
response: {
text: () => 'feat(gemini): translate the diff<think>hidden</think>'
}
});
const getGenerativeModel = jest.fn().mockReturnValue({
generateContent
});
});
afterEach(() => {
gemini = undefined as any;
});
engine.client = {
getGenerativeModel
} as any;
afterAll(() => {
mockExit.mockRestore();
process.env = oldEnv;
});
const messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam> = [
{ role: 'system', content: 'system message' },
{ role: 'assistant', content: 'assistant guidance' },
{ role: 'user', content: 'diff --git a/file b/file' }
];
it.skip('should exit process if OCO_GEMINI_API_KEY is not set and command is not config', () => {
process.env.OCO_GEMINI_API_KEY = undefined;
process.env.OCO_AI_PROVIDER = 'gemini';
const result = await engine.generateCommitMessage(messages);
mockGemini();
expect(mockExit).toHaveBeenCalledWith(1);
});
it('should generate commit message', async () => {
const mockGenerateContent = jest
.fn()
.mockResolvedValue({ response: { text: () => 'generated content' } });
mockGenerativeModel.generateContent = mockGenerateContent;
mockGemini();
const messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam> =
[
{ role: 'system', content: 'system message' },
{ role: 'assistant', content: 'assistant message' }
];
jest
.spyOn(gemini, 'generateCommitMessage')
.mockImplementation(async () => 'generated content');
const result = await gemini.generateCommitMessage(messages);
expect(result).toEqual('generated content');
expect(result).toEqual('feat(gemini): translate the diff');
expect(getGenerativeModel).toHaveBeenCalledWith(
{
model: 'gemini-1.5-flash',
systemInstruction: 'system message'
},
{
baseUrl: 'http://127.0.0.1:8080/v1'
}
);
expect(generateContent).toHaveBeenCalledWith(
expect.objectContaining({
contents: [
{
parts: [{ text: 'assistant guidance' }],
role: 'model'
},
{
parts: [{ text: 'diff --git a/file b/file' }],
role: 'user'
}
],
generationConfig: expect.objectContaining({
maxOutputTokens: 256,
temperature: 0,
topP: 0.1
})
})
);
});
});

View File

@@ -1,26 +1,71 @@
// Test the reasoning model detection regex used in OpenAiEngine.
// Integration test with the engine is not possible because mistral.ts
// uses require() which is unavailable in the ESM test environment.
const REASONING_MODEL_RE = /^(o[1-9]|gpt-5)/;
import { OpenAI } from 'openai';
import { OpenAiEngine } from '../../src/engine/openAi';
describe('OpenAiEngine reasoning model detection', () => {
it.each([
['o1', true],
['o1-preview', true],
['o1-mini', true],
['o3', true],
['o3-mini', true],
['o4-mini', true],
['gpt-5', true],
['gpt-5-nano', true],
['gpt-4o', false],
['gpt-4o-mini', false],
['gpt-4', false],
['gpt-3.5-turbo', false]
])(
'model "%s" isReasoning=%s',
(model, expected) => {
expect(REASONING_MODEL_RE.test(model)).toBe(expected);
}
);
describe('OpenAiEngine', () => {
const baseConfig = {
apiKey: 'test-openai-key',
maxTokensInput: 4096,
maxTokensOutput: 256
};
const messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam> = [
{ role: 'system', content: 'system message' },
{ role: 'user', content: 'diff --git a/file b/file' }
];
it('uses max_completion_tokens for reasoning models', async () => {
const engine = new OpenAiEngine({
...baseConfig,
model: 'o3-mini'
});
const create = jest
.spyOn(engine.client.chat.completions, 'create')
.mockResolvedValue({
choices: [{ message: { content: 'feat(openai): reasoning path' } }]
} as any);
await engine.generateCommitMessage(messages);
expect(create).toHaveBeenCalledWith(
expect.objectContaining({
model: 'o3-mini',
max_completion_tokens: 256
})
);
expect(create).toHaveBeenCalledWith(
expect.not.objectContaining({
max_tokens: expect.anything()
})
);
});
it('uses max_tokens and sampling params for non-reasoning models', async () => {
const engine = new OpenAiEngine({
...baseConfig,
model: 'gpt-4o-mini'
});
const create = jest
.spyOn(engine.client.chat.completions, 'create')
.mockResolvedValue({
choices: [{ message: { content: 'feat(openai): standard path' } }]
} as any);
await engine.generateCommitMessage(messages);
expect(create).toHaveBeenCalledWith(
expect.objectContaining({
model: 'gpt-4o-mini',
max_tokens: 256,
temperature: 0,
top_p: 0.1
})
);
expect(create).toHaveBeenCalledWith(
expect.not.objectContaining({
max_completion_tokens: expect.anything()
})
);
});
});

126
test/unit/proxy.test.ts Normal file
View File

@@ -0,0 +1,126 @@
import axios from 'axios';
import { getGlobalDispatcher } from 'undici';
import { AnthropicEngine } from '../../src/engine/anthropic';
import { OpenAiEngine } from '../../src/engine/openAi';
import { resolveProxy, setupProxy } from '../../src/utils/proxy';
describe('proxy utilities', () => {
const originalEnv = { ...process.env };
const originalAxiosProxy = axios.defaults.proxy;
const originalAxiosHttpAgent = axios.defaults.httpAgent;
const originalAxiosHttpsAgent = axios.defaults.httpsAgent;
function resetEnv(env: NodeJS.ProcessEnv) {
Object.keys(process.env).forEach((key) => {
if (!(key in env)) {
delete process.env[key];
} else {
process.env[key] = env[key];
}
});
}
beforeEach(() => {
resetEnv(originalEnv);
setupProxy(undefined);
});
afterEach(() => {
resetEnv(originalEnv);
setupProxy(undefined);
axios.defaults.proxy = originalAxiosProxy;
axios.defaults.httpAgent = originalAxiosHttpAgent;
axios.defaults.httpsAgent = originalAxiosHttpsAgent;
});
it('should prefer an explicit proxy URL over ambient proxy env vars', () => {
process.env.HTTPS_PROXY = 'http://ambient-proxy:8080';
expect(resolveProxy('http://explicit-proxy:8080')).toEqual(
'http://explicit-proxy:8080'
);
});
it('should return null when proxy is explicitly disabled', () => {
process.env.HTTPS_PROXY = 'http://ambient-proxy:8080';
expect(resolveProxy(null)).toEqual(null);
});
it('should fall back to ambient proxy env vars when proxy is unset', () => {
process.env.HTTPS_PROXY = 'http://ambient-proxy:8080';
expect(resolveProxy(undefined)).toEqual('http://ambient-proxy:8080');
});
it('should disable proxy usage when setupProxy receives null', () => {
process.env.HTTPS_PROXY = 'http://ambient-proxy:8080';
setupProxy(null);
expect(getGlobalDispatcher().constructor.name).toEqual('Agent');
expect(axios.defaults.proxy).toEqual(false);
expect(axios.defaults.httpAgent).toBeUndefined();
expect(axios.defaults.httpsAgent).toBeUndefined();
});
it('should install proxy agents when setupProxy receives a proxy URL', () => {
setupProxy('http://127.0.0.1:7890');
expect(getGlobalDispatcher().constructor.name).toEqual('ProxyAgent');
expect(axios.defaults.proxy).toEqual(false);
expect(axios.defaults.httpAgent).toBeDefined();
expect(axios.defaults.httpsAgent).toBeDefined();
});
});
describe('engine proxy handling', () => {
const originalEnv = { ...process.env };
const baseConfig = {
apiKey: 'test-key',
model: 'gpt-4o-mini',
maxTokensInput: 4096,
maxTokensOutput: 256
};
function resetEnv(env: NodeJS.ProcessEnv) {
Object.keys(process.env).forEach((key) => {
if (!(key in env)) {
delete process.env[key];
} else {
process.env[key] = env[key];
}
});
}
beforeEach(() => {
resetEnv(originalEnv);
});
afterEach(() => {
resetEnv(originalEnv);
});
it('should not let OpenAI engine re-read proxy env vars when proxy is unset', () => {
process.env.HTTPS_PROXY = 'http://ambient-proxy:8080';
const engine = new OpenAiEngine({
...baseConfig,
proxy: undefined
});
expect(engine.client.httpAgent).toBeUndefined();
});
it('should not let Anthropic engine re-read proxy env vars when proxy is unset', () => {
process.env.HTTPS_PROXY = 'http://ambient-proxy:8080';
const engine = new AnthropicEngine({
...baseConfig,
model: 'claude-sonnet-4-20250514',
proxy: undefined
});
expect(engine.client.httpAgent).toBeUndefined();
});
});