Adds OpenAI o-1 models to frontend (#3868)

Co-authored-by: Engel Nyst <enyst@users.noreply.github.com>
This commit is contained in:
Graham Neubig
2024-09-13 19:53:24 -04:00
committed by GitHub
parent cbeae3e612
commit 26cc1670ad
6 changed files with 16 additions and 15 deletions

View File

@@ -8,7 +8,7 @@ describe("ModelSelector", () => {
const models = {
openai: {
separator: "/",
models: ["gpt-4o", "gpt-3.5-turbo"],
models: ["gpt-4o", "gpt-4o-mini"],
},
azure: {
separator: "/",

View File

@@ -28,7 +28,7 @@ const renderSettingsForm = (settings?: Settings) => {
SECURITY_ANALYZER: "",
}
}
models={["gpt-4o", "gpt-3.5-turbo", "azure/ada"]}
models={["gpt-4o", "gpt-4o-mini", "azure/ada"]}
agents={["agent1", "agent2", "agent3"]}
securityAnalyzers={["analyzer1", "analyzer2", "analyzer3"]}
onModelChange={onModelChangeMock}
@@ -61,7 +61,7 @@ describe("SettingsForm", () => {
it("should display the existing values if they are present", () => {
renderSettingsForm({
LLM_MODEL: "gpt-3.5-turbo",
LLM_MODEL: "gpt-4o-mini",
AGENT: "agent2",
LANGUAGE: "es",
LLM_API_KEY: "sk-...",
@@ -77,13 +77,13 @@ describe("SettingsForm", () => {
const languageInput = screen.getByRole("combobox", { name: "language" });
expect(providerInput).toHaveValue("OpenAI");
expect(modelInput).toHaveValue("gpt-3.5-turbo");
expect(modelInput).toHaveValue("gpt-4o-mini");
expect(languageInput).toHaveValue("Español");
});
it("should show advanced settings by default if advanced settings are in use", () => {
renderSettingsForm({
LLM_MODEL: "gpt-3.5-turbo",
LLM_MODEL: "gpt-4o-mini",
AGENT: "agent2",
LANGUAGE: "es",
LLM_API_KEY: "sk-...",
@@ -113,7 +113,7 @@ describe("SettingsForm", () => {
it("should show advanced settings if button is clicked", async () => {
renderSettingsForm({
LLM_MODEL: "gpt-3.5-turbo",
LLM_MODEL: "gpt-4o-mini",
AGENT: "agent2",
LANGUAGE: "es",
LLM_API_KEY: "sk-...",
@@ -144,7 +144,7 @@ describe("SettingsForm", () => {
CONFIRMATION_MODE: false,
SECURITY_ANALYZER: "",
}}
models={["gpt-4o", "gpt-3.5-turbo", "azure/ada"]}
models={["gpt-4o", "gpt-4o-mini", "azure/ada"]}
agents={["agent1", "agent2", "agent3"]}
securityAnalyzers={["analyzer1", "analyzer2", "analyzer3"]}
disabled
@@ -266,7 +266,7 @@ describe("SettingsForm", () => {
CONFIRMATION_MODE: true,
SECURITY_ANALYZER: "analyzer1",
}}
models={["gpt-4o", "gpt-3.5-turbo", "azure/ada"]}
models={["gpt-4o", "gpt-4o-mini", "azure/ada"]}
agents={["agent1", "agent2", "agent3"]}
securityAnalyzers={["analyzer1", "analyzer2", "analyzer3"]}
disabled

View File

@@ -47,7 +47,7 @@ vi.mock("#/services/options", async (importOriginal) => ({
.mockResolvedValue(
Promise.resolve([
"gpt-4o",
"gpt-3.5-turbo",
"gpt-4o-mini",
"azure/ada",
"cohere.command-r-v1:0",
]),
@@ -169,7 +169,7 @@ describe("SettingsModal", () => {
await user.click(openai);
await user.click(modelInput);
const model3 = screen.getByText("gpt-3.5-turbo");
const model3 = screen.getByText("gpt-4o-mini");
await user.click(model3);
await user.click(saveButton);

View File

@@ -47,9 +47,9 @@ describe("extractModelAndProvider", () => {
});
it("should add provider for popular models", () => {
expect(extractModelAndProvider("gpt-3.5-turbo")).toEqual({
expect(extractModelAndProvider("gpt-4o-mini")).toEqual({
provider: "openai",
model: "gpt-3.5-turbo",
model: "gpt-4o-mini",
separator: "/",
});

View File

@@ -14,7 +14,7 @@ test("organizeModelsAndProviders", () => {
"cloudflare/@cf/mistral/mistral-7b-instruct-v0.1",
"gpt-4o",
"together-ai-21.1b-41b",
"gpt-3.5-turbo",
"gpt-4o-mini",
"claude-3-5-sonnet-20240620",
"claude-3-haiku-20240307",
"claude-2",
@@ -46,7 +46,7 @@ test("organizeModelsAndProviders", () => {
},
openai: {
separator: "/",
models: ["gpt-4o", "gpt-3.5-turbo"],
models: ["gpt-4o", "gpt-4o-mini"],
},
anthropic: {
separator: "/",

View File

@@ -10,7 +10,8 @@ export const VERIFIED_OPENAI_MODELS = [
"gpt-4-turbo",
"gpt-4",
"gpt-4-32k",
"gpt-3.5-turbo",
"o1-mini",
"o1-preview",
];
// LiteLLM does not return the compatible Anthropic models with the provider, so we list them here to set them ourselves