Merge remote-tracking branch 'upstream/master'

This commit is contained in:
Barnabas Busa
2026-04-08 14:43:22 +02:00
30 changed files with 18605 additions and 90738 deletions

View File

@@ -5,9 +5,10 @@ import { cli } from 'cleye';
import packageJSON from '../package.json';
import { commit } from './commands/commit';
import { commitlintConfigCommand } from './commands/commitlint';
import { configCommand } from './commands/config';
import { configCommand, getConfig } from './commands/config';
import { hookCommand, isHookCalled } from './commands/githook.js';
import { prepareCommitMessageHook } from './commands/prepare-commit-msg-hook';
import { setupProxy } from './utils/proxy';
import {
setupCommand,
isFirstRun,
@@ -18,13 +19,22 @@ import { modelsCommand } from './commands/models';
import { checkIsLatestVersion } from './utils/checkIsLatestVersion';
import { runMigrations } from './migrations/_run.js';
const config = getConfig();
setupProxy(config.OCO_PROXY);
const extraArgs = process.argv.slice(2);
cli(
{
version: packageJSON.version,
name: 'opencommit',
commands: [configCommand, hookCommand, commitlintConfigCommand, setupCommand, modelsCommand],
commands: [
configCommand,
hookCommand,
commitlintConfigCommand,
setupCommand,
modelsCommand
],
flags: {
fgm: {
type: Boolean,
@@ -48,28 +58,29 @@ cli(
help: { description: packageJSON.description }
},
async ({ flags }) => {
if (await isHookCalled()) {
await prepareCommitMessageHook();
return;
}
await runMigrations();
await checkIsLatestVersion();
if (await isHookCalled()) {
prepareCommitMessageHook();
} else {
// Check for first run and trigger setup wizard
if (isFirstRun()) {
const setupComplete = await runSetup();
if (!setupComplete) {
process.exit(1);
}
}
// Check for missing API key and prompt if needed
const hasApiKey = await promptForMissingApiKey();
if (!hasApiKey) {
// Check for first run and trigger setup wizard
if (isFirstRun()) {
const setupComplete = await runSetup();
if (!setupComplete) {
process.exit(1);
}
commit(extraArgs, flags.context, false, flags.fgm, flags.yes);
}
// Check for missing API key and prompt if needed
const hasApiKey = await promptForMissingApiKey();
if (!hasApiKey) {
process.exit(1);
}
commit(extraArgs, flags.context, false, flags.fgm, flags.yes);
},
extraArgs
);

View File

@@ -11,10 +11,7 @@ import {
import chalk from 'chalk';
import { execa } from 'execa';
import { generateCommitMessageByDiff } from '../generateCommitMessageFromGitDiff';
import {
formatUserFriendlyError,
printFormattedError
} from '../utils/errors';
import { formatUserFriendlyError, printFormattedError } from '../utils/errors';
import {
assertGitRepo,
getChangedFiles,

View File

@@ -25,6 +25,7 @@ export enum CONFIG_KEYS {
OCO_ONE_LINE_COMMIT = 'OCO_ONE_LINE_COMMIT',
OCO_TEST_MOCK_TYPE = 'OCO_TEST_MOCK_TYPE',
OCO_API_URL = 'OCO_API_URL',
OCO_PROXY = 'OCO_PROXY',
OCO_API_CUSTOM_HEADERS = 'OCO_API_CUSTOM_HEADERS',
OCO_OMIT_SCOPE = 'OCO_OMIT_SCOPE',
OCO_GITPUSH = 'OCO_GITPUSH', // todo: deprecate
@@ -727,6 +728,15 @@ export const configValidators = {
return value;
},
[CONFIG_KEYS.OCO_PROXY](value: any) {
validateConfig(
CONFIG_KEYS.OCO_PROXY,
typeof value === 'string',
`${value} is not a valid URL. It should start with 'http://' or 'https://'.`
);
return value;
},
[CONFIG_KEYS.OCO_MODEL](value: any, config: any = {}) {
validateConfig(
CONFIG_KEYS.OCO_MODEL,
@@ -849,7 +859,8 @@ export enum OCO_AI_PROVIDER_ENUM {
export const PROVIDER_API_KEY_URLS: Record<string, string | null> = {
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'https://platform.openai.com/api-keys',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'https://console.anthropic.com/settings/keys',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]:
'https://console.anthropic.com/settings/keys',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'https://aistudio.google.com/app/apikey',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'https://console.groq.com/keys',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'https://console.mistral.ai/api-keys/',
@@ -872,13 +883,14 @@ export const RECOMMENDED_MODELS: Record<string, string> = {
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'deepseek-chat',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'openai/gpt-4o-mini',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'gpt-4o-mini'
}
};
export type ConfigType = {
[CONFIG_KEYS.OCO_API_KEY]?: string;
[CONFIG_KEYS.OCO_TOKENS_MAX_INPUT]: number;
[CONFIG_KEYS.OCO_TOKENS_MAX_OUTPUT]: number;
[CONFIG_KEYS.OCO_API_URL]?: string;
[CONFIG_KEYS.OCO_PROXY]?: string;
[CONFIG_KEYS.OCO_API_CUSTOM_HEADERS]?: string;
[CONFIG_KEYS.OCO_DESCRIPTION]: boolean;
[CONFIG_KEYS.OCO_EMOJI]: boolean;
@@ -963,6 +975,10 @@ const getEnvConfig = (envPath: string) => {
return {
OCO_MODEL: process.env.OCO_MODEL,
OCO_API_URL: process.env.OCO_API_URL,
OCO_PROXY:
process.env.OCO_PROXY ||
process.env.HTTPS_PROXY ||
process.env.HTTP_PROXY,
OCO_API_KEY: process.env.OCO_API_KEY,
OCO_API_CUSTOM_HEADERS: process.env.OCO_API_CUSTOM_HEADERS,
OCO_AI_PROVIDER: process.env.OCO_AI_PROVIDER as OCO_AI_PROVIDER_ENUM,
@@ -1188,6 +1204,11 @@ function getConfigKeyDetails(key) {
'Custom API URL - may be used to set proxy path to OpenAI API',
values: ["URL string (must start with 'http://' or 'https://')"]
};
case CONFIG_KEYS.OCO_PROXY:
return {
description: 'HTTP/HTTPS Proxy URL',
values: ["URL string (must start with 'http://' or 'https://')"]
};
case CONFIG_KEYS.OCO_MESSAGE_TEMPLATE_PLACEHOLDER:
return {
description: 'Message template placeholder',

View File

@@ -2,11 +2,7 @@ import { intro, outro, spinner } from '@clack/prompts';
import chalk from 'chalk';
import { command } from 'cleye';
import { COMMANDS } from './ENUMS';
import {
MODEL_LIST,
OCO_AI_PROVIDER_ENUM,
getConfig
} from './config';
import { MODEL_LIST, OCO_AI_PROVIDER_ENUM, getConfig } from './config';
import {
fetchModelsForProvider,
clearModelCache,
@@ -31,7 +27,10 @@ function formatCacheAge(timestamp: number | null): string {
return 'just now';
}
async function listModels(provider: string, useCache: boolean = true): Promise<void> {
async function listModels(
provider: string,
useCache: boolean = true
): Promise<void> {
const config = getConfig();
const apiKey = config.OCO_API_KEY;
const currentModel = config.OCO_MODEL;
@@ -52,7 +51,9 @@ async function listModels(provider: string, useCache: boolean = true): Promise<v
models = MODEL_LIST[providerKey] || [];
}
console.log(`\n${chalk.bold('Available models for')} ${chalk.cyan(provider)}:\n`);
console.log(
`\n${chalk.bold('Available models for')} ${chalk.cyan(provider)}:\n`
);
if (models.length === 0) {
console.log(chalk.dim(' No models found'));
@@ -79,14 +80,23 @@ async function refreshModels(provider: string): Promise<void> {
clearModelCache();
try {
const models = await fetchModelsForProvider(provider, apiKey, undefined, true);
const models = await fetchModelsForProvider(
provider,
apiKey,
undefined,
true
);
loadingSpinner.stop(`${chalk.green('+')} Fetched ${models.length} models`);
// List the models
await listModels(provider, true);
} catch (error) {
loadingSpinner.stop(chalk.red('Failed to fetch models'));
console.error(chalk.red(`Error: ${error instanceof Error ? error.message : 'Unknown error'}`));
console.error(
chalk.red(
`Error: ${error instanceof Error ? error.message : 'Unknown error'}`
)
);
}
}
@@ -112,7 +122,8 @@ export const modelsCommand = command(
},
async ({ flags }) => {
const config = getConfig();
const provider = flags.provider || config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
const provider =
flags.provider || config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
intro(chalk.bgCyan(' OpenCommit Models '));
@@ -120,7 +131,9 @@ export const modelsCommand = command(
const cacheInfo = getCacheInfo();
if (cacheInfo.timestamp) {
console.log(
chalk.dim(` Cache last updated: ${formatCacheAge(cacheInfo.timestamp)}`)
chalk.dim(
` Cache last updated: ${formatCacheAge(cacheInfo.timestamp)}`
)
);
if (cacheInfo.providers.length > 0) {
console.log(
@@ -137,8 +150,6 @@ export const modelsCommand = command(
await listModels(provider);
}
outro(
`Run ${chalk.cyan('oco models --refresh')} to update the model list`
);
outro(`Run ${chalk.cyan('oco models --refresh')} to update the model list`);
}
);

View File

@@ -52,6 +52,12 @@ const OTHER_PROVIDERS = [
];
const NO_API_KEY_PROVIDERS = [
OCO_AI_PROVIDER_ENUM.OLLAMA,
OCO_AI_PROVIDER_ENUM.MLX,
OCO_AI_PROVIDER_ENUM.TEST
];
const MODEL_REQUIRED_PROVIDERS = [
OCO_AI_PROVIDER_ENUM.OLLAMA,
OCO_AI_PROVIDER_ENUM.MLX
];
@@ -90,7 +96,8 @@ async function selectProvider(): Promise<string | symbol> {
}
async function getApiKey(provider: string): Promise<string | symbol> {
const url = PROVIDER_API_KEY_URLS[provider as keyof typeof PROVIDER_API_KEY_URLS];
const url =
PROVIDER_API_KEY_URLS[provider as keyof typeof PROVIDER_API_KEY_URLS];
let message = `Enter your ${provider} API key:`;
if (url) {
@@ -127,7 +134,8 @@ async function selectModel(
provider: string,
apiKey?: string
): Promise<string | symbol> {
const providerDisplayName = PROVIDER_DISPLAY_NAMES[provider]?.split(' (')[0] || provider;
const providerDisplayName =
PROVIDER_DISPLAY_NAMES[provider]?.split(' (')[0] || provider;
const loadingSpinner = spinner();
loadingSpinner.start(`Fetching models from ${providerDisplayName}...`);
@@ -178,7 +186,8 @@ async function selectModel(
}
// Get recommended model for this provider
const recommended = RECOMMENDED_MODELS[provider as keyof typeof RECOMMENDED_MODELS];
const recommended =
RECOMMENDED_MODELS[provider as keyof typeof RECOMMENDED_MODELS];
// Build options with recommended first
const options: Array<{ value: string; label: string }> = [];
@@ -191,9 +200,7 @@ async function selectModel(
}
// Add other models (first 10, excluding recommended)
const otherModels = models
.filter((m) => m !== recommended)
.slice(0, 10);
const otherModels = models.filter((m) => m !== recommended).slice(0, 10);
otherModels.forEach((model) => {
options.push({ value: model, label: model });
@@ -409,27 +416,31 @@ export async function runSetup(): Promise<boolean> {
setGlobalConfig(newConfig as any);
outro(
`${chalk.green('✔')} Configuration saved to ~/.opencommit\n\n Run ${chalk.cyan('oco')} to generate commit messages!`
`${chalk.green(
'✔'
)} Configuration saved to ~/.opencommit\n\n Run ${chalk.cyan(
'oco'
)} to generate commit messages!`
);
return true;
}
export function isFirstRun(): boolean {
if (!getIsGlobalConfigFileExist()) {
return true;
}
const config = getConfig();
// Check if API key is missing for providers that need it
const provider = config.OCO_AI_PROVIDER || OCO_AI_PROVIDER_ENUM.OPENAI;
if (NO_API_KEY_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
if (MODEL_REQUIRED_PROVIDERS.includes(provider as OCO_AI_PROVIDER_ENUM)) {
// For Ollama/MLX, check if model is set
return !config.OCO_MODEL;
}
if (provider === OCO_AI_PROVIDER_ENUM.TEST) {
return false;
}
// For other providers, check if API key is set
return !config.OCO_API_KEY;
}
@@ -447,9 +458,7 @@ export async function promptForMissingApiKey(): Promise<boolean> {
}
console.log(
chalk.yellow(
`\nAPI key missing for ${provider}. Let's set it up.\n`
)
chalk.yellow(`\nAPI key missing for ${provider}. Let's set it up.\n`)
);
const apiKey = await getApiKey(provider);

View File

@@ -11,6 +11,7 @@ export interface AiEngineConfig {
maxTokensOutput: number;
maxTokensInput: number;
baseURL?: string;
proxy?: string;
customHeaders?: Record<string, string>;
}

View File

@@ -1,4 +1,5 @@
import AnthropicClient from '@anthropic-ai/sdk';
import { HttpsProxyAgent } from 'https-proxy-agent';
import {
MessageCreateParamsNonStreaming,
MessageParam
@@ -18,7 +19,15 @@ export class AnthropicEngine implements AiEngine {
constructor(config) {
this.config = config;
this.client = new AnthropicClient({ apiKey: this.config.apiKey });
const clientOptions: any = { apiKey: this.config.apiKey };
const proxy =
config.proxy || process.env.HTTPS_PROXY || process.env.HTTP_PROXY;
if (proxy) {
clientOptions.httpAgent = new HttpsProxyAgent(proxy);
}
this.client = new AnthropicClient(clientOptions);
}
public generateCommitMessage = async (
@@ -35,9 +44,14 @@ export class AnthropicEngine implements AiEngine {
system: systemMessage,
messages: restMessages,
temperature: 0,
top_p: 0.1,
max_tokens: this.config.maxTokensOutput
};
// add top_p for non-4.5 models
if (!/claude.*-4-5/.test(params.model)) {
params.top_p = 0.1;
}
try {
const REQUEST_TOKENS = messages
.map((msg) => tokenCount(msg.content as string) + 4)

View File

@@ -10,9 +10,10 @@ export interface DeepseekConfig extends OpenAiConfig {}
export class DeepseekEngine extends OpenAiEngine {
constructor(config: DeepseekConfig) {
// Call OpenAIEngine constructor with forced Deepseek baseURL
// Put baseURL first so user config can override it
super({
...config,
baseURL: 'https://api.deepseek.com/v1'
baseURL: 'https://api.deepseek.com/v1',
...config
});
}

View File

@@ -29,10 +29,15 @@ export class GeminiEngine implements AiEngine {
.map((m) => m.content)
.join('\n');
const gemini = this.client.getGenerativeModel({
model: this.config.model,
systemInstruction
});
const gemini = this.client.getGenerativeModel(
{
model: this.config.model,
systemInstruction
},
{
baseUrl: this.config.baseURL
}
);
const contents = messages
.filter((m) => m.role !== 'system')

View File

@@ -1,4 +1,5 @@
import { OpenAI } from 'openai';
import { HttpsProxyAgent } from 'https-proxy-agent';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { normalizeEngineError } from '../utils/engineErrorHandler';
import { removeContentTags } from '../utils/removeContentTags';

View File

@@ -6,16 +6,21 @@ import { AiEngine, AiEngineConfig } from './Engine';
interface MLXConfig extends AiEngineConfig {}
const DEFAULT_MLX_URL = 'http://localhost:8080';
const MLX_CHAT_PATH = '/v1/chat/completions';
export class MLXEngine implements AiEngine {
config: MLXConfig;
client: AxiosInstance;
private chatUrl: string;
constructor(config) {
this.config = config;
const baseUrl = config.baseURL || DEFAULT_MLX_URL;
this.chatUrl = `${baseUrl}${MLX_CHAT_PATH}`;
this.client = axios.create({
url: config.baseURL
? `${config.baseURL}/${config.apiKey}`
: 'http://localhost:8080/v1/chat/completions',
headers: { 'Content-Type': 'application/json' }
});
}
@@ -31,10 +36,7 @@ export class MLXEngine implements AiEngine {
stream: false
};
try {
const response = await this.client.post(
this.client.getUri(this.config),
params
);
const response = await this.client.post(this.chatUrl, params);
const choices = response.data.choices;
const message = choices[0].message;

View File

@@ -6,25 +6,27 @@ import { AiEngine, AiEngineConfig } from './Engine';
interface OllamaConfig extends AiEngineConfig {}
const DEFAULT_OLLAMA_URL = 'http://localhost:11434';
const OLLAMA_CHAT_PATH = '/api/chat';
export class OllamaEngine implements AiEngine {
config: OllamaConfig;
client: AxiosInstance;
private chatUrl: string;
constructor(config) {
this.config = config;
const baseUrl = config.baseURL || DEFAULT_OLLAMA_URL;
this.chatUrl = `${baseUrl}${OLLAMA_CHAT_PATH}`;
// Combine base headers with custom headers
const headers = {
'Content-Type': 'application/json',
...config.customHeaders
};
this.client = axios.create({
url: config.baseURL
? `${config.baseURL}/${config.apiKey}`
: 'http://localhost:11434/api/chat',
headers
});
this.client = axios.create({ headers });
}
async generateCommitMessage(
@@ -37,10 +39,7 @@ export class OllamaEngine implements AiEngine {
stream: false
};
try {
const response = await this.client.post(
this.client.getUri(this.config),
params
);
const response = await this.client.post(this.chatUrl, params);
const { message } = response.data;
let content = message?.content;

View File

@@ -1,4 +1,5 @@
import { OpenAI } from 'openai';
import { HttpsProxyAgent } from 'https-proxy-agent';
import { GenerateCommitMessageErrorEnum } from '../generateCommitMessageFromGitDiff';
import { parseCustomHeaders } from '../utils/engine';
import { normalizeEngineError } from '../utils/engineErrorHandler';
@@ -23,6 +24,12 @@ export class OpenAiEngine implements AiEngine {
clientOptions.baseURL = config.baseURL;
}
const proxy =
config.proxy || process.env.HTTPS_PROXY || process.env.HTTP_PROXY;
if (proxy) {
clientOptions.httpAgent = new HttpsProxyAgent(proxy);
}
if (config.customHeaders) {
const headers = parseCustomHeaders(config.customHeaders);
if (Object.keys(headers).length > 0) {
@@ -36,12 +43,18 @@ export class OpenAiEngine implements AiEngine {
public generateCommitMessage = async (
messages: Array<OpenAI.Chat.Completions.ChatCompletionMessageParam>
): Promise<string | null> => {
const isReasoningModel = /^(o[1-9]|gpt-5)/.test(this.config.model);
const params = {
model: this.config.model,
messages,
temperature: 0,
top_p: 0.1,
max_tokens: this.config.maxTokensOutput
...(isReasoningModel
? { max_completion_tokens: this.config.maxTokensOutput }
: {
temperature: 0,
top_p: 0.1,
max_tokens: this.config.maxTokensOutput
})
};
try {
@@ -55,7 +68,9 @@ export class OpenAiEngine implements AiEngine {
)
throw new Error(GenerateCommitMessageErrorEnum.tooMuchTokens);
const completion = await this.client.chat.completions.create(params);
const completion = await this.client.chat.completions.create(
params as OpenAI.Chat.Completions.ChatCompletionCreateParamsNonStreaming
);
const message = completion.choices[0].message;
let content = message?.content;

View File

@@ -55,9 +55,7 @@ async function handleModelNotFoundError(
provider: string,
currentModel: string
): Promise<string | null> {
console.log(
chalk.red(`\n✖ Model '${currentModel}' not found\n`)
);
console.log(chalk.red(`\n✖ Model '${currentModel}' not found\n`));
const suggestedModels = getSuggestedModels(provider, currentModel);
const recommended =

View File

@@ -47,6 +47,7 @@ export function getEngine(): AiEngine {
maxTokensOutput: config.OCO_TOKENS_MAX_OUTPUT!,
maxTokensInput: config.OCO_TOKENS_MAX_INPUT!,
baseURL: config.OCO_API_URL!,
proxy: config.OCO_PROXY!,
apiKey: config.OCO_API_KEY!,
customHeaders
};

View File

@@ -3,15 +3,18 @@ import { MODEL_LIST, OCO_AI_PROVIDER_ENUM } from '../commands/config';
// Provider billing/help URLs for common errors
export const PROVIDER_BILLING_URLS: Record<string, string | null> = {
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]: 'https://console.anthropic.com/settings/billing',
[OCO_AI_PROVIDER_ENUM.OPENAI]: 'https://platform.openai.com/settings/organization/billing',
[OCO_AI_PROVIDER_ENUM.ANTHROPIC]:
'https://console.anthropic.com/settings/billing',
[OCO_AI_PROVIDER_ENUM.OPENAI]:
'https://platform.openai.com/settings/organization/billing',
[OCO_AI_PROVIDER_ENUM.GEMINI]: 'https://aistudio.google.com/app/plan',
[OCO_AI_PROVIDER_ENUM.GROQ]: 'https://console.groq.com/settings/billing',
[OCO_AI_PROVIDER_ENUM.MISTRAL]: 'https://console.mistral.ai/billing/',
[OCO_AI_PROVIDER_ENUM.DEEPSEEK]: 'https://platform.deepseek.com/usage',
[OCO_AI_PROVIDER_ENUM.OPENROUTER]: 'https://openrouter.ai/credits',
[OCO_AI_PROVIDER_ENUM.AIMLAPI]: 'https://aimlapi.com/app/billing',
[OCO_AI_PROVIDER_ENUM.AZURE]: 'https://portal.azure.com/#view/Microsoft_Azure_CostManagement',
[OCO_AI_PROVIDER_ENUM.AZURE]:
'https://portal.azure.com/#view/Microsoft_Azure_CostManagement',
[OCO_AI_PROVIDER_ENUM.OLLAMA]: null,
[OCO_AI_PROVIDER_ENUM.MLX]: null,
[OCO_AI_PROVIDER_ENUM.FLOWISE]: null,
@@ -23,7 +26,9 @@ export class InsufficientCreditsError extends Error {
public readonly provider: string;
constructor(provider: string, message?: string) {
super(message || `Insufficient credits or quota for provider '${provider}'`);
super(
message || `Insufficient credits or quota for provider '${provider}'`
);
this.name = 'InsufficientCreditsError';
this.provider = provider;
}
@@ -345,7 +350,10 @@ export interface FormattedError {
}
// Format an error into a user-friendly structure
export function formatUserFriendlyError(error: unknown, provider: string): FormattedError {
export function formatUserFriendlyError(
error: unknown,
provider: string
): FormattedError {
const billingUrl = PROVIDER_BILLING_URLS[provider] || null;
// Handle our custom error types first
@@ -460,7 +468,9 @@ export function printFormattedError(formatted: FormattedError): string {
output += ` ${formatted.message}\n`;
if (formatted.helpUrl) {
output += `\n ${chalk.cyan('Help:')} ${chalk.underline(formatted.helpUrl)}\n`;
output += `\n ${chalk.cyan('Help:')} ${chalk.underline(
formatted.helpUrl
)}\n`;
}
if (formatted.suggestion) {

View File

@@ -125,9 +125,7 @@ export async function fetchMistralModels(apiKey: string): Promise<string[]> {
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
const models = data.data?.map((m: { id: string }) => m.id).sort();
return models && models.length > 0 ? models : MODEL_LIST.mistral;
} catch {
@@ -148,9 +146,7 @@ export async function fetchGroqModels(apiKey: string): Promise<string[]> {
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
const models = data.data?.map((m: { id: string }) => m.id).sort();
return models && models.length > 0 ? models : MODEL_LIST.groq;
} catch {
@@ -173,8 +169,9 @@ export async function fetchOpenRouterModels(apiKey: string): Promise<string[]> {
const data = await response.json();
// Filter to text-capable models only (exclude image/audio models)
const models = data.data
?.filter((m: { id: string; context_length?: number }) =>
m.context_length && m.context_length > 0
?.filter(
(m: { id: string; context_length?: number }) =>
m.context_length && m.context_length > 0
)
.map((m: { id: string }) => m.id)
.sort();
@@ -198,9 +195,7 @@ export async function fetchDeepSeekModels(apiKey: string): Promise<string[]> {
}
const data = await response.json();
const models = data.data
?.map((m: { id: string }) => m.id)
.sort();
const models = data.data?.map((m: { id: string }) => m.id).sort();
return models && models.length > 0 ? models : MODEL_LIST.deepseek;
} catch {
@@ -312,7 +307,10 @@ export function clearModelCache(): void {
}
}
export function getCacheInfo(): { timestamp: number | null; providers: string[] } {
export function getCacheInfo(): {
timestamp: number | null;
providers: string[];
} {
const cache = readCache();
if (!cache) {
return { timestamp: null, providers: [] };

21
src/utils/proxy.ts Normal file
View File

@@ -0,0 +1,21 @@
import { setGlobalDispatcher, ProxyAgent } from 'undici';
import axios from 'axios';
import { HttpsProxyAgent } from 'https-proxy-agent';
export function setupProxy(proxyUrl?: string) {
const proxy = proxyUrl || process.env.HTTPS_PROXY || process.env.HTTP_PROXY;
if (proxy) {
try {
// Set global dispatcher for undici (affects globalThis.fetch used by Gemini and others)
const dispatcher = new ProxyAgent(proxy);
setGlobalDispatcher(dispatcher);
// Set axios global agent
const agent = new HttpsProxyAgent(proxy);
axios.defaults.httpsAgent = agent;
axios.defaults.proxy = false; // Disable axios built-in proxy handling to use agent
} catch (error) {
console.warn(`[Proxy Error] Failed to set proxy: ${error.message}`);
}
}
}