From 0894ec28c108f4f77cc3490f9b7e266505a7e9f1 Mon Sep 17 00:00:00 2001 From: Kayvan Sylvan Date: Thu, 12 Feb 2026 16:45:05 -0800 Subject: [PATCH 1/4] chore: update model references to latest versions across docs and code - Update default summarize model to `claude-sonnet-4-5` - Replace `gpt-4o` references with `gpt-5.2` throughout docs - Replace `gpt-4o-mini` references with `gpt-5-mini` - Add MiniMax-M2.5 and M2.5-lightning to static models list - Update image generation warning to suggest `gpt-5.2` - Update OpenAI chat example script to use `gpt-5-mini` - Update REST API docs and examples with current model names --- cmd/generate_changelog/README.md | 2 +- .../internal/changelog/summarize.go | 2 +- docs/notification-config.yaml | 2 +- docs/rest-api.md | 16 ++++++++-------- internal/plugins/ai/openai/openai.go | 2 +- internal/plugins/ai/openai/openai_image_test.go | 2 +- .../ai/openai_compatible/providers_config.go | 2 ++ .../plugins/template/Examples/openai-chat.sh | 2 +- internal/server/docs/API_VARIABLES_EXAMPLE.md | 6 +++--- 9 files changed, 19 insertions(+), 17 deletions(-) diff --git a/cmd/generate_changelog/README.md b/cmd/generate_changelog/README.md index 816ad678..c7bc82ab 100644 --- a/cmd/generate_changelog/README.md +++ b/cmd/generate_changelog/README.md @@ -246,7 +246,7 @@ Set the model via environment variable: ```bash export FABRIC_CHANGELOG_SUMMARIZE_MODEL=claude-opus-4 # or -export FABRIC_CHANGELOG_SUMMARIZE_MODEL=gpt-4 +export FABRIC_CHANGELOG_SUMMARIZE_MODEL=gpt-5.2 ``` AI summaries are cached and only regenerated when: diff --git a/cmd/generate_changelog/internal/changelog/summarize.go b/cmd/generate_changelog/internal/changelog/summarize.go index ac5ccc85..e1ada377 100644 --- a/cmd/generate_changelog/internal/changelog/summarize.go +++ b/cmd/generate_changelog/internal/changelog/summarize.go @@ -7,7 +7,7 @@ import ( "strings" ) -const DefaultSummarizeModel = "claude-sonnet-4-20250514" +const DefaultSummarizeModel = "claude-sonnet-4-5" const MinContentLength = 256 // Minimum content length to consider for summarization const prompt = `# ROLE diff --git a/docs/notification-config.yaml b/docs/notification-config.yaml index a81a0a8c..21557c9e 100644 --- a/docs/notification-config.yaml +++ b/docs/notification-config.yaml @@ -16,6 +16,6 @@ notification: true # notificationCommand: '/path/to/custom-notification-script.sh "$1" "$2"' # Other common settings -model: "gpt-4o" +model: "gpt-5.2" temperature: 0.7 stream: true diff --git a/docs/rest-api.md b/docs/rest-api.md index b6418475..876208de 100644 --- a/docs/rest-api.md +++ b/docs/rest-api.md @@ -81,7 +81,7 @@ Stream AI responses using Server-Sent Events (SSE). { "userInput": "Explain quantum computing", "vendor": "openai", - "model": "gpt-4o", + "model": "gpt-5.2", "patternName": "explain", "contextName": "", "strategyName": "", @@ -103,7 +103,7 @@ Stream AI responses using Server-Sent Events (SSE). | ------- | ---------- | --------- | ------------- | | `userInput` | **Yes** | - | Your message or question | | `vendor` | **Yes** | - | AI provider: `openai`, `anthropic`, `gemini`, `ollama`, etc. | -| `model` | **Yes** | - | Model name: `gpt-4o`, `claude-sonnet-4.5`, `gemini-2.0-flash-exp`, etc. | +| `model` | **Yes** | - | Model name: `gpt-5.2`, `claude-sonnet-4.5`, `gemini-2.0-flash-exp`, etc. | | `patternName` | No | `""` | Pattern to apply (from `~/.config/fabric/patterns/`) | | `contextName` | No | `""` | Context to prepend (from `~/.config/fabric/contexts/`) | | `strategyName` | No | `""` | Strategy to use (from `~/.config/fabric/strategies/`) | @@ -151,7 +151,7 @@ curl -X POST http://localhost:8080/chat \ "prompts": [{ "userInput": "What is Fabric?", "vendor": "openai", - "model": "gpt-4o", + "model": "gpt-5.2", "patternName": "explain" }] }' @@ -232,9 +232,9 @@ List available AI models. ```json { - "models": ["gpt-4o", "gpt-4o-mini", "claude-sonnet-4.5", "gemini-2.0-flash-exp"], + "models": ["gpt-5.2", "gpt-5-mini", "claude-sonnet-4.5", "gemini-2.0-flash-exp"], "vendors": { - "openai": ["gpt-4o", "gpt-4o-mini"], + "openai": ["gpt-5.2", "gpt-5-mini"], "anthropic": ["claude-sonnet-4.5", "claude-opus-4.5"], "gemini": ["gemini-2.0-flash-exp", "gemini-2.0-flash-thinking-exp"] } @@ -359,7 +359,7 @@ curl -X POST http://localhost:8080/chat \ \"prompts\": [{ \"userInput\": \"$TRANSCRIPT\", \"vendor\": \"openai\", - \"model\": \"gpt-4o\", + \"model\": \"gpt-5.2\", \"patternName\": \"youtube_summary\" }] }" @@ -374,7 +374,7 @@ curl -s -X POST http://localhost:8080/youtube/transcript \ jq -r '.transcript' | \ xargs -I {} curl -X POST http://localhost:8080/chat \ -H "Content-Type: application/json" \ - -d "{\"prompts\":[{\"userInput\":\"{}\",\"vendor\":\"openai\",\"model\":\"gpt-4o\",\"patternName\":\"youtube_summary\"}]}" + -d "{\"prompts\":[{\"userInput\":\"{}\",\"vendor\":\"openai\",\"model\":\"gpt-5.2\",\"patternName\":\"youtube_summary\"}]}" ``` #### Alternative: Using a script @@ -398,7 +398,7 @@ curl -X POST "$API_BASE/chat" \ \"prompts\": [{ \"userInput\": $(echo "$TRANSCRIPT" | jq -Rs .), \"vendor\": \"openai\", - \"model\": \"gpt-4o\", + \"model\": \"gpt-5.2\", \"patternName\": \"youtube_summary\" }] }" diff --git a/internal/plugins/ai/openai/openai.go b/internal/plugins/ai/openai/openai.go index 743aaaee..85d848dc 100644 --- a/internal/plugins/ai/openai/openai.go +++ b/internal/plugins/ai/openai/openai.go @@ -75,7 +75,7 @@ func (o *Client) SetResponsesAPIEnabled(enabled bool) { // checkImageGenerationCompatibility warns if the model doesn't support image generation func checkImageGenerationCompatibility(model string) { if !supportsImageGeneration(model) { - fmt.Fprintf(os.Stderr, "Warning: Model '%s' does not support image generation. Supported models: %s. Consider using -m gpt-4o for image generation.\n", + fmt.Fprintf(os.Stderr, "Warning: Model '%s' does not support image generation. Supported models: %s. Consider using -m gpt-5.2 for image generation.\n", model, strings.Join(ImageGenerationSupportedModels, ", ")) } } diff --git a/internal/plugins/ai/openai/openai_image_test.go b/internal/plugins/ai/openai/openai_image_test.go index f5251746..f5a102ef 100644 --- a/internal/plugins/ai/openai/openai_image_test.go +++ b/internal/plugins/ai/openai/openai_image_test.go @@ -521,7 +521,7 @@ func TestCheckImageGenerationCompatibility(t *testing.T) { assert.NotEmpty(t, output, "Expected warning output for unsupported model") assert.Contains(t, output, tt.expectedText, "Warning message should contain model name") assert.Contains(t, output, "Supported models:", "Warning should mention supported models") - assert.Contains(t, output, "gpt-4o", "Warning should suggest gpt-4o") + assert.Contains(t, output, "gpt-5.2", "Warning should suggest gpt-5.2") } else { assert.Empty(t, output, "No warning expected for supported model") } diff --git a/internal/plugins/ai/openai_compatible/providers_config.go b/internal/plugins/ai/openai_compatible/providers_config.go index f3712c23..fdbb7df3 100644 --- a/internal/plugins/ai/openai_compatible/providers_config.go +++ b/internal/plugins/ai/openai_compatible/providers_config.go @@ -194,6 +194,8 @@ func (c *Client) getStaticModels(modelsKey string) ([]string, error) { }, nil case "static:minimax": return []string{ + "MiniMax-M2.5", + "MiniMax-M2.5-lightning", "MiniMax-M2", "MiniMax-M2.1", "MiniMax-M2.1-lightning", diff --git a/internal/plugins/template/Examples/openai-chat.sh b/internal/plugins/template/Examples/openai-chat.sh index e8d65ee8..60d8f3a5 100755 --- a/internal/plugins/template/Examples/openai-chat.sh +++ b/internal/plugins/template/Examples/openai-chat.sh @@ -6,7 +6,7 @@ RESPONSE=$(curl "$OPENAI_API_BASE_URL/chat/completions" \ -s -w "\n%{http_code}" \ -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ - -d "{\"model\":\"gpt-4o-mini\",\"messages\":[{\"role\":\"user\",\"content\":$INPUT}]}") + -d "{\"model\":\"gpt-5-mini\",\"messages\":[{\"role\":\"user\",\"content\":$INPUT}]}") HTTP_CODE=$(echo "$RESPONSE" | tail -n1) BODY=$(echo "$RESPONSE" | sed '$d') diff --git a/internal/server/docs/API_VARIABLES_EXAMPLE.md b/internal/server/docs/API_VARIABLES_EXAMPLE.md index 6f61edae..8d6cf5e9 100644 --- a/internal/server/docs/API_VARIABLES_EXAMPLE.md +++ b/internal/server/docs/API_VARIABLES_EXAMPLE.md @@ -12,7 +12,7 @@ This example demonstrates how to use pattern variables in REST API calls to the { "userInput": "Hello my name is Kayvan", "patternName": "translate", - "model": "gpt-4o", + "model": "gpt-5.2", "vendor": "openai", "contextName": "", "strategyName": "", @@ -64,7 +64,7 @@ curl -X POST http://localhost:8080/api/chat \ { "userInput": "Hello my name is Kayvan", "patternName": "translate", - "model": "gpt-4o", + "model": "gpt-5.2", "vendor": "openai", "variables": { "lang_code": "fr" @@ -85,7 +85,7 @@ For patterns that use multiple variables: { "userInput": "Analyze this business model", "patternName": "custom_analysis", - "model": "gpt-4o", + "model": "gpt-5.2", "variables": { "role": "expert consultant", "experience": "15", From 191edb56ca559588f36ea76bca831fb6a92ebf26 Mon Sep 17 00:00:00 2001 From: Kayvan Sylvan Date: Thu, 12 Feb 2026 16:56:58 -0800 Subject: [PATCH 2/4] chore: remove deprecated GPT-4 models from image generation supported list - Remove `gpt-4o` from image generation supported models - Remove `gpt-4o-mini` from image generation supported models - Remove `gpt-4.1` from image generation supported models - Remove `gpt-4.1-mini` from image generation supported models - Keep `gpt-4.1-nano`, `o3`, and `gpt-5` as supported models --- internal/plugins/ai/openai/openai_image.go | 4 ---- 1 file changed, 4 deletions(-) diff --git a/internal/plugins/ai/openai/openai_image.go b/internal/plugins/ai/openai/openai_image.go index 9b4bc968..697e8817 100644 --- a/internal/plugins/ai/openai/openai_image.go +++ b/internal/plugins/ai/openai/openai_image.go @@ -22,10 +22,6 @@ const ImageGenerationToolType = "image_generation" // ImageGenerationSupportedModels lists all models that support image generation var ImageGenerationSupportedModels = []string{ - "gpt-4o", - "gpt-4o-mini", - "gpt-4.1", - "gpt-4.1-mini", "gpt-4.1-nano", "o3", "gpt-5", From 9a593cbc5a1ae9dfb735bb7b63f579ced651b99f Mon Sep 17 00:00:00 2001 From: Kayvan Sylvan Date: Thu, 12 Feb 2026 17:04:25 -0800 Subject: [PATCH 3/4] Remove deprecated OpenAI models from image generation support list - Remove gpt-4o, gpt-4o-mini, gpt-4.1, and gpt-4.1-mini from ImageGenerationSupportedModels - These models are being retired by OpenAI effective February 13, 2026 - Keep gpt-4.1-nano, o3, gpt-5, gpt-5-nano, gpt-5.2 as supported - Update tests to use gpt-5.2 instead of gpt-4o as the example supported model - Update test expectations to reflect deprecated models no longer being supported This ensures users get accurate warnings when attempting image generation with retired models, while maintaining support for currently available models. Co-Authored-By: Warp --- .../plugins/ai/openai/openai_image_test.go | 22 +++++++++---------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/internal/plugins/ai/openai/openai_image_test.go b/internal/plugins/ai/openai/openai_image_test.go index f5a102ef..9e581b60 100644 --- a/internal/plugins/ai/openai/openai_image_test.go +++ b/internal/plugins/ai/openai/openai_image_test.go @@ -231,24 +231,24 @@ func TestSupportsImageGeneration(t *testing.T) { expected bool }{ { - name: "gpt-4o supports image generation", + name: "gpt-4o does not support image generation (deprecated)", model: "gpt-4o", - expected: true, + expected: false, }, { - name: "gpt-4o-mini supports image generation", + name: "gpt-4o-mini does not support image generation (deprecated)", model: "gpt-4o-mini", - expected: true, + expected: false, }, { - name: "gpt-4.1 supports image generation", + name: "gpt-4.1 does not support image generation (deprecated)", model: "gpt-4.1", - expected: true, + expected: false, }, { - name: "gpt-4.1-mini supports image generation", + name: "gpt-4.1-mini does not support image generation (deprecated)", model: "gpt-4.1-mini", - expected: true, + expected: false, }, { name: "gpt-4.1-nano supports image generation", @@ -336,7 +336,7 @@ func TestModelValidationLogic(t *testing.T) { t.Run("Supported model with image file should not trigger validation", func(t *testing.T) { opts := &domain.ChatOptions{ - Model: "gpt-4o", + Model: "gpt-5.2", ImageFile: "/tmp/output.png", } @@ -475,7 +475,7 @@ func TestCheckImageGenerationCompatibility(t *testing.T) { }{ { name: "Supported model - no warning", - model: "gpt-4o", + model: "gpt-5.2", expectWarning: false, }, { @@ -557,7 +557,7 @@ func TestSendResponses_WithWarningIntegration(t *testing.T) { }, { name: "Supported model with image - no warning, no error", - model: "gpt-4o", + model: "gpt-5.2", imageFile: "test.png", expectWarning: false, expectError: false, From dced6ecb7ae5827c31e7c7a866881fac6db32fde Mon Sep 17 00:00:00 2001 From: Kayvan Sylvan Date: Thu, 12 Feb 2026 17:24:17 -0800 Subject: [PATCH 4/4] chore: incoming 1995 changelog entry --- cmd/generate_changelog/incoming/1995.txt | 7 +++++++ 1 file changed, 7 insertions(+) create mode 100644 cmd/generate_changelog/incoming/1995.txt diff --git a/cmd/generate_changelog/incoming/1995.txt b/cmd/generate_changelog/incoming/1995.txt new file mode 100644 index 00000000..4d44e59b --- /dev/null +++ b/cmd/generate_changelog/incoming/1995.txt @@ -0,0 +1,7 @@ +### PR [#1995](https://github.com/danielmiessler/Fabric/pull/1995) by [ksylvan](https://github.com/ksylvan): OpenAI gpt-4o, GPT-4 deprecations, plus other model list updates + +- Update default summarize model to `claude-sonnet-4-5` +- Replace `gpt-4o` and `gpt-4o-mini` references with `gpt-5.2` and `gpt-5-mini` throughout documentation and code +- Remove deprecated GPT-4 models (`gpt-4o`, `gpt-4o-mini`, `gpt-4.1`, `gpt-4.1-mini`) from image generation supported list, effective February 13, 2026 +- Add MiniMax-M2.5 and M2.5-lightning to static models list +- Update tests to use `gpt-5.2` instead of `gpt-4o` as the example supported model