mirror of
https://github.com/di-sukharev/opencommit.git
synced 2026-04-20 03:02:51 -04:00
* refactor(api.ts): remove console.log statement
* fix(generateCommitMessageFromGitDiff.ts): increase MAX_REQ_TOKENS to 3900 to avoid exceeding OpenAI's limit
This commit is contained in:
@@ -43,8 +43,6 @@ class OpenAi {
|
||||
public generateCommitMessage = async (
|
||||
messages: Array<ChatCompletionRequestMessage>
|
||||
): Promise<string | undefined> => {
|
||||
console.log({ messages });
|
||||
|
||||
try {
|
||||
const { data } = await this.openAI.createChatCompletion({
|
||||
model: 'gpt-3.5-turbo',
|
||||
@@ -58,7 +56,7 @@ class OpenAi {
|
||||
|
||||
return message?.content;
|
||||
} catch (error) {
|
||||
console.error('openAI api error', { error });
|
||||
// console.error('openAI api error', { error });
|
||||
throw error;
|
||||
}
|
||||
};
|
||||
|
||||
@@ -82,7 +82,7 @@ const INIT_MESSAGES_PROMPT_LENGTH = INIT_MESSAGES_PROMPT.map(
|
||||
(msg) => msg.content
|
||||
).join('').length;
|
||||
|
||||
const MAX_REQ_TOKENS = 1000 - INIT_MESSAGES_PROMPT_LENGTH;
|
||||
const MAX_REQ_TOKENS = 3900 - INIT_MESSAGES_PROMPT_LENGTH;
|
||||
|
||||
export const generateCommitMessageWithChatCompletion = async (
|
||||
diff: string
|
||||
|
||||
Reference in New Issue
Block a user