diff --git a/cmd/generate_changelog/README.md b/cmd/generate_changelog/README.md index 816ad678..c7bc82ab 100644 --- a/cmd/generate_changelog/README.md +++ b/cmd/generate_changelog/README.md @@ -246,7 +246,7 @@ Set the model via environment variable: ```bash export FABRIC_CHANGELOG_SUMMARIZE_MODEL=claude-opus-4 # or -export FABRIC_CHANGELOG_SUMMARIZE_MODEL=gpt-4 +export FABRIC_CHANGELOG_SUMMARIZE_MODEL=gpt-5.2 ``` AI summaries are cached and only regenerated when: diff --git a/cmd/generate_changelog/internal/changelog/summarize.go b/cmd/generate_changelog/internal/changelog/summarize.go index ac5ccc85..e1ada377 100644 --- a/cmd/generate_changelog/internal/changelog/summarize.go +++ b/cmd/generate_changelog/internal/changelog/summarize.go @@ -7,7 +7,7 @@ import ( "strings" ) -const DefaultSummarizeModel = "claude-sonnet-4-20250514" +const DefaultSummarizeModel = "claude-sonnet-4-5" const MinContentLength = 256 // Minimum content length to consider for summarization const prompt = `# ROLE diff --git a/docs/notification-config.yaml b/docs/notification-config.yaml index a81a0a8c..21557c9e 100644 --- a/docs/notification-config.yaml +++ b/docs/notification-config.yaml @@ -16,6 +16,6 @@ notification: true # notificationCommand: '/path/to/custom-notification-script.sh "$1" "$2"' # Other common settings -model: "gpt-4o" +model: "gpt-5.2" temperature: 0.7 stream: true diff --git a/docs/rest-api.md b/docs/rest-api.md index b6418475..876208de 100644 --- a/docs/rest-api.md +++ b/docs/rest-api.md @@ -81,7 +81,7 @@ Stream AI responses using Server-Sent Events (SSE). { "userInput": "Explain quantum computing", "vendor": "openai", - "model": "gpt-4o", + "model": "gpt-5.2", "patternName": "explain", "contextName": "", "strategyName": "", @@ -103,7 +103,7 @@ Stream AI responses using Server-Sent Events (SSE). | ------- | ---------- | --------- | ------------- | | `userInput` | **Yes** | - | Your message or question | | `vendor` | **Yes** | - | AI provider: `openai`, `anthropic`, `gemini`, `ollama`, etc. | -| `model` | **Yes** | - | Model name: `gpt-4o`, `claude-sonnet-4.5`, `gemini-2.0-flash-exp`, etc. | +| `model` | **Yes** | - | Model name: `gpt-5.2`, `claude-sonnet-4.5`, `gemini-2.0-flash-exp`, etc. | | `patternName` | No | `""` | Pattern to apply (from `~/.config/fabric/patterns/`) | | `contextName` | No | `""` | Context to prepend (from `~/.config/fabric/contexts/`) | | `strategyName` | No | `""` | Strategy to use (from `~/.config/fabric/strategies/`) | @@ -151,7 +151,7 @@ curl -X POST http://localhost:8080/chat \ "prompts": [{ "userInput": "What is Fabric?", "vendor": "openai", - "model": "gpt-4o", + "model": "gpt-5.2", "patternName": "explain" }] }' @@ -232,9 +232,9 @@ List available AI models. ```json { - "models": ["gpt-4o", "gpt-4o-mini", "claude-sonnet-4.5", "gemini-2.0-flash-exp"], + "models": ["gpt-5.2", "gpt-5-mini", "claude-sonnet-4.5", "gemini-2.0-flash-exp"], "vendors": { - "openai": ["gpt-4o", "gpt-4o-mini"], + "openai": ["gpt-5.2", "gpt-5-mini"], "anthropic": ["claude-sonnet-4.5", "claude-opus-4.5"], "gemini": ["gemini-2.0-flash-exp", "gemini-2.0-flash-thinking-exp"] } @@ -359,7 +359,7 @@ curl -X POST http://localhost:8080/chat \ \"prompts\": [{ \"userInput\": \"$TRANSCRIPT\", \"vendor\": \"openai\", - \"model\": \"gpt-4o\", + \"model\": \"gpt-5.2\", \"patternName\": \"youtube_summary\" }] }" @@ -374,7 +374,7 @@ curl -s -X POST http://localhost:8080/youtube/transcript \ jq -r '.transcript' | \ xargs -I {} curl -X POST http://localhost:8080/chat \ -H "Content-Type: application/json" \ - -d "{\"prompts\":[{\"userInput\":\"{}\",\"vendor\":\"openai\",\"model\":\"gpt-4o\",\"patternName\":\"youtube_summary\"}]}" + -d "{\"prompts\":[{\"userInput\":\"{}\",\"vendor\":\"openai\",\"model\":\"gpt-5.2\",\"patternName\":\"youtube_summary\"}]}" ``` #### Alternative: Using a script @@ -398,7 +398,7 @@ curl -X POST "$API_BASE/chat" \ \"prompts\": [{ \"userInput\": $(echo "$TRANSCRIPT" | jq -Rs .), \"vendor\": \"openai\", - \"model\": \"gpt-4o\", + \"model\": \"gpt-5.2\", \"patternName\": \"youtube_summary\" }] }" diff --git a/internal/plugins/ai/openai/openai.go b/internal/plugins/ai/openai/openai.go index 743aaaee..85d848dc 100644 --- a/internal/plugins/ai/openai/openai.go +++ b/internal/plugins/ai/openai/openai.go @@ -75,7 +75,7 @@ func (o *Client) SetResponsesAPIEnabled(enabled bool) { // checkImageGenerationCompatibility warns if the model doesn't support image generation func checkImageGenerationCompatibility(model string) { if !supportsImageGeneration(model) { - fmt.Fprintf(os.Stderr, "Warning: Model '%s' does not support image generation. Supported models: %s. Consider using -m gpt-4o for image generation.\n", + fmt.Fprintf(os.Stderr, "Warning: Model '%s' does not support image generation. Supported models: %s. Consider using -m gpt-5.2 for image generation.\n", model, strings.Join(ImageGenerationSupportedModels, ", ")) } } diff --git a/internal/plugins/ai/openai/openai_image_test.go b/internal/plugins/ai/openai/openai_image_test.go index f5251746..f5a102ef 100644 --- a/internal/plugins/ai/openai/openai_image_test.go +++ b/internal/plugins/ai/openai/openai_image_test.go @@ -521,7 +521,7 @@ func TestCheckImageGenerationCompatibility(t *testing.T) { assert.NotEmpty(t, output, "Expected warning output for unsupported model") assert.Contains(t, output, tt.expectedText, "Warning message should contain model name") assert.Contains(t, output, "Supported models:", "Warning should mention supported models") - assert.Contains(t, output, "gpt-4o", "Warning should suggest gpt-4o") + assert.Contains(t, output, "gpt-5.2", "Warning should suggest gpt-5.2") } else { assert.Empty(t, output, "No warning expected for supported model") } diff --git a/internal/plugins/ai/openai_compatible/providers_config.go b/internal/plugins/ai/openai_compatible/providers_config.go index f3712c23..fdbb7df3 100644 --- a/internal/plugins/ai/openai_compatible/providers_config.go +++ b/internal/plugins/ai/openai_compatible/providers_config.go @@ -194,6 +194,8 @@ func (c *Client) getStaticModels(modelsKey string) ([]string, error) { }, nil case "static:minimax": return []string{ + "MiniMax-M2.5", + "MiniMax-M2.5-lightning", "MiniMax-M2", "MiniMax-M2.1", "MiniMax-M2.1-lightning", diff --git a/internal/plugins/template/Examples/openai-chat.sh b/internal/plugins/template/Examples/openai-chat.sh index e8d65ee8..60d8f3a5 100755 --- a/internal/plugins/template/Examples/openai-chat.sh +++ b/internal/plugins/template/Examples/openai-chat.sh @@ -6,7 +6,7 @@ RESPONSE=$(curl "$OPENAI_API_BASE_URL/chat/completions" \ -s -w "\n%{http_code}" \ -H "Content-Type: application/json" \ -H "Authorization: Bearer $OPENAI_API_KEY" \ - -d "{\"model\":\"gpt-4o-mini\",\"messages\":[{\"role\":\"user\",\"content\":$INPUT}]}") + -d "{\"model\":\"gpt-5-mini\",\"messages\":[{\"role\":\"user\",\"content\":$INPUT}]}") HTTP_CODE=$(echo "$RESPONSE" | tail -n1) BODY=$(echo "$RESPONSE" | sed '$d') diff --git a/internal/server/docs/API_VARIABLES_EXAMPLE.md b/internal/server/docs/API_VARIABLES_EXAMPLE.md index 6f61edae..8d6cf5e9 100644 --- a/internal/server/docs/API_VARIABLES_EXAMPLE.md +++ b/internal/server/docs/API_VARIABLES_EXAMPLE.md @@ -12,7 +12,7 @@ This example demonstrates how to use pattern variables in REST API calls to the { "userInput": "Hello my name is Kayvan", "patternName": "translate", - "model": "gpt-4o", + "model": "gpt-5.2", "vendor": "openai", "contextName": "", "strategyName": "", @@ -64,7 +64,7 @@ curl -X POST http://localhost:8080/api/chat \ { "userInput": "Hello my name is Kayvan", "patternName": "translate", - "model": "gpt-4o", + "model": "gpt-5.2", "vendor": "openai", "variables": { "lang_code": "fr" @@ -85,7 +85,7 @@ For patterns that use multiple variables: { "userInput": "Analyze this business model", "patternName": "custom_analysis", - "model": "gpt-4o", + "model": "gpt-5.2", "variables": { "role": "expert consultant", "experience": "15",