mirror of
https://github.com/danielmiessler/Fabric.git
synced 2026-01-08 22:08:03 -05:00
Compare commits
62 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
678db0c43e | ||
|
|
765977cd42 | ||
|
|
8017f376b1 | ||
|
|
6f103b2db2 | ||
|
|
19aeebe6f5 | ||
|
|
2d79d3b706 | ||
|
|
4fe501da02 | ||
|
|
2501cbf47e | ||
|
|
d96a1721bb | ||
|
|
c1838d3744 | ||
|
|
643a60a2cf | ||
|
|
90712506f1 | ||
|
|
edc02120bb | ||
|
|
8f05883581 | ||
|
|
996933e687 | ||
|
|
8806f4c2f4 | ||
|
|
b381bae24a | ||
|
|
a6c753499b | ||
|
|
90b2975fba | ||
|
|
145499ee4c | ||
|
|
f9359c99dc | ||
|
|
6b6d0adbfb | ||
|
|
55c94e65da | ||
|
|
2118013547 | ||
|
|
82a9f02879 | ||
|
|
602304e417 | ||
|
|
c0d00aeb1f | ||
|
|
1ec8ecba24 | ||
|
|
ad1465a2e5 | ||
|
|
12b6cf4a0a | ||
|
|
a6ad1d77f9 | ||
|
|
af3403ae44 | ||
|
|
c971781072 | ||
|
|
fd0ac8aa3b | ||
|
|
0991c52e6f | ||
|
|
c60e8d1bf7 | ||
|
|
a5ac60cedf | ||
|
|
96ce0838b5 | ||
|
|
3d88f8e2fc | ||
|
|
f588af0887 | ||
|
|
c4bca7a302 | ||
|
|
1ced245bfe | ||
|
|
d6100026da | ||
|
|
fd465d4130 | ||
|
|
0776e77872 | ||
|
|
cb2759a5a1 | ||
|
|
c32a650eaa | ||
|
|
b41aa2dbdc | ||
|
|
21ec2ca9d9 | ||
|
|
672b920a89 | ||
|
|
8a28ca7b1e | ||
|
|
435d61ae0e | ||
|
|
6ea5551f06 | ||
|
|
b04346008b | ||
|
|
c7ecac3262 | ||
|
|
07457d86d3 | ||
|
|
8166ee7a18 | ||
|
|
c539b1edfc | ||
|
|
66d3bf786e | ||
|
|
569f50179d | ||
|
|
477ca045b0 | ||
|
|
e40d51cc71 |
@@ -94,7 +94,6 @@ jobs:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
go run ./cmd/generate_changelog --process-prs ${{ steps.increment_version.outputs.new_tag }}
|
||||
go run ./cmd/generate_changelog --sync-db
|
||||
git add ./cmd/generate_changelog/changelog.db
|
||||
- name: Commit changes
|
||||
run: |
|
||||
|
||||
500
CHANGELOG.md
500
CHANGELOG.md
@@ -1,59 +1,139 @@
|
||||
# Changelog
|
||||
|
||||
## v1.4.366 (2025-12-31)
|
||||
## v1.4.375 (2026-01-08)
|
||||
|
||||
### PR [#1925](https://github.com/danielmiessler/Fabric/pull/1925) by [ksylvan](https://github.com/ksylvan): docs: update README to document new AI providers and features
|
||||
|
||||
- Docs: update README to document new AI providers and features
|
||||
- List supported native and OpenAI-compatible AI provider integrations
|
||||
- Document dry run mode for previewing prompt construction
|
||||
- Explain Ollama compatibility mode for exposing API endpoints
|
||||
- Detail available prompt strategies like chain-of-thought and reflexion
|
||||
|
||||
### PR [#1926](https://github.com/danielmiessler/Fabric/pull/1926) by [henricook](https://github.com/henricook) and [ksylvan](https://github.com/ksylvan): feat(vertexai): add dynamic model listing and multi-model support
|
||||
|
||||
- Dynamic model listing from Vertex AI Model Garden API
|
||||
- Support for both Gemini (genai SDK) and Claude (Anthropic SDK) models
|
||||
- Curated Gemini model list with web search support for Gemini models
|
||||
- Thinking/extended thinking support for Gemini
|
||||
- TopP parameter support for Claude models
|
||||
|
||||
## v1.4.374 (2026-01-05)
|
||||
|
||||
### PR [#1924](https://github.com/danielmiessler/Fabric/pull/1924) by [ksylvan](https://github.com/ksylvan): Rename `code_helper` to `code2context` across documentation and CLI
|
||||
|
||||
- Rename `code_helper` command to `code2context` throughout codebase
|
||||
- Update README.md table of contents and references
|
||||
- Update installation instructions with new binary name
|
||||
- Update all usage examples in main.go help text
|
||||
- Update create_coding_feature pattern documentation
|
||||
|
||||
## v1.4.373 (2026-01-04)
|
||||
|
||||
### PR [#1914](https://github.com/danielmiessler/Fabric/pull/1914) by [majiayu000](https://github.com/majiayu000): feat(code_helper): add stdin support for piping file lists
|
||||
|
||||
- Added stdin support for piping file lists to code_helper, enabling commands like `find . -name '*.go' | code_helper "instructions"` and `git ls-files '*.py' | code_helper "Add type hints"`
|
||||
- Implemented automatic detection of stdin pipe mode with single argument (instructions) support
|
||||
- Enhanced tool to read file paths from stdin line by line while maintaining backward compatibility with existing directory scanning functionality
|
||||
|
||||
### PR [#1915](https://github.com/danielmiessler/Fabric/pull/1915) by [majiayu000](https://github.com/majiayu000): feat: parallelize audio chunk transcription for improved performance
|
||||
|
||||
- Parallelize audio chunk transcription using goroutines for improved performance
|
||||
|
||||
## v1.4.372 (2026-01-04)
|
||||
|
||||
### PR [#1913](https://github.com/danielmiessler/Fabric/pull/1913) by [majiayu000](https://github.com/majiayu000): fix: REST API /chat endpoint doesn't pass 'search' parameter to ChatOptions
|
||||
|
||||
- Fix: REST API /chat endpoint now properly passes Search and SearchLocation parameters to ChatOptions
|
||||
|
||||
## v1.4.371 (2026-01-04)
|
||||
|
||||
### PR [#1923](https://github.com/danielmiessler/Fabric/pull/1923) by [ksylvan](https://github.com/ksylvan): ChangeLog Generation stability
|
||||
|
||||
- Fix: improve date parsing and prevent early return when PR numbers exist
|
||||
- Add SQLite datetime formats to version date parsing logic
|
||||
- Loop through multiple date formats until one succeeds
|
||||
- Include SQLite fractional seconds format support
|
||||
- Prevent early return when version has PR numbers to output
|
||||
|
||||
## v1.4.370 (2026-01-04)
|
||||
|
||||
### PR [#1921](https://github.com/danielmiessler/Fabric/pull/1921) by [ksylvan](https://github.com/ksylvan): chore: remove redundant `--sync-db` step from changelog workflow
|
||||
|
||||
- Remove redundant `--sync-db` step from changelog workflow
|
||||
- Remove duplicate database sync command from version workflow
|
||||
- Simplify changelog generation to single process-prs step
|
||||
- Clean up `heal_person` pattern by removing duplicate content sections
|
||||
- Remove duplicate IDENTITY, PURPOSE, STEPS, and OUTPUT INSTRUCTIONS from pattern file
|
||||
|
||||
## v1.4.369 (2026-01-04)
|
||||
|
||||
### PR [#1919](https://github.com/danielmiessler/Fabric/pull/1919) by [ksylvan](https://github.com/ksylvan): Fix the `last_pr_sync` setting during PR incoming processing
|
||||
|
||||
- Fix: update `SetLastPRSync` to use version date instead of current time
|
||||
- Change last_pr_sync to use versionDate instead of time.Now()
|
||||
- Ensure future runs fetch PRs merged after the version date
|
||||
- Add clarifying comments explaining the sync timing logic
|
||||
|
||||
## v1.4.368 (2026-01-04)
|
||||
|
||||
### PR [#1918](https://github.com/danielmiessler/Fabric/pull/1918) by [ksylvan](https://github.com/ksylvan): Maintenance: Fix ChangeLog Generation during CI/CD
|
||||
|
||||
- Refactor CHANGELOG.md entries with improved formatting and conventional commit prefixes
|
||||
- Consolidate git worktree fixes into single PR #1917 entry
|
||||
- Reorder PR entries chronologically within version sections
|
||||
- Add cache metadata update step before staging release changes
|
||||
- Update changelog database binary with new entry formatting
|
||||
|
||||
## v1.4.367 (2026-01-03)
|
||||
|
||||
### PR [#1912](https://github.com/danielmiessler/Fabric/pull/1912) by [berniegreen](https://github.com/berniegreen): refactor: implement structured streaming and metadata support
|
||||
|
||||
- Feat: add domain types for structured streaming (Phase 1)
|
||||
- Refactor: update Vendor interface and Chatter for structured streaming (Phase 2)
|
||||
- Refactor: implement structured streaming in all AI vendors (Phase 3)
|
||||
- Feat: implement CLI support for metadata display (Phase 4)
|
||||
- Feat: implement REST API support for metadata streaming (Phase 5)
|
||||
|
||||
## v1.4.366 (2026-01-03)
|
||||
|
||||
### PR [#1917](https://github.com/danielmiessler/Fabric/pull/1917) by [ksylvan](https://github.com/ksylvan): Fix: generate_changelog now works in Git Work Trees
|
||||
|
||||
- Fix: improve git worktree status detection to ignore staged-only files and check worktree status codes instead of using IsClean method
|
||||
- Fix: use native git CLI for add/commit in worktrees to resolve go-git issues with shared object databases
|
||||
- Check filesystem existence of staged files to handle worktree scenarios and ignore files staged in main repo that don't exist in worktree
|
||||
- Update GetStatusDetails to only include worktree-modified files and ignore unmodified and untracked files in clean check
|
||||
- Allow staged files that exist in worktree to be committed normally and fix 'cannot create empty commit: clean working tree' errors
|
||||
|
||||
### PR [#1909](https://github.com/danielmiessler/Fabric/pull/1909) by [copyleftdev](https://github.com/copyleftdev): feat: add greybeard_secure_prompt_engineer pattern
|
||||
|
||||
- Added greybeard_secure_prompt_engineer pattern
|
||||
- Updated changelog with incoming entry
|
||||
- Feat: add greybeard_secure_prompt_engineer pattern
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Fix: use native git CLI for add/commit in worktrees
|
||||
go-git has issues with worktrees where the object database isn't properly
|
||||
shared, causing 'invalid object' errors when trying to commit. Switching
|
||||
to native git CLI for add and commit operations resolves this.
|
||||
This fixes generate_changelog failing in worktrees with errors like:
|
||||
- 'cannot create empty commit: clean working tree'
|
||||
|
||||
- 'error: invalid object ... Error building trees'
|
||||
Co-Authored-By: Warp <agent@warp.dev>
|
||||
- Fix: IsWorkingDirectoryClean to work correctly in worktrees
|
||||
|
||||
- Check filesystem existence of staged files to handle worktree scenarios
|
||||
- Ignore files staged in main repo that don't exist in worktree
|
||||
|
||||
- Allow staged files that exist in worktree to be committed normally
|
||||
Co-Authored-By: Warp <agent@warp.dev>
|
||||
- Fix: improve git worktree status detection to ignore staged-only files
|
||||
|
||||
- Add worktree-specific check for actual working directory changes
|
||||
- Filter out files that are only staged but not in worktree
|
||||
|
||||
- Check worktree status codes instead of using IsClean method
|
||||
- Update GetStatusDetails to only include worktree-modified files
|
||||
|
||||
- Ignore unmodified and untracked files in clean check
|
||||
- Feat: implement REST API support for metadata streaming (Phase 5)
|
||||
- Feat: implement CLI support for metadata display (Phase 4)
|
||||
- Refactor: implement structured streaming in all AI vendors (Phase 3)
|
||||
|
||||
## v1.4.365 (2025-12-30)
|
||||
|
||||
### PR [#1908](https://github.com/danielmiessler/Fabric/pull/1908) by [rodaddy](https://github.com/rodaddy): feat(ai): add VertexAI provider for Claude models
|
||||
|
||||
- Added support for Google Cloud Vertex AI as a provider to access Claude models using Application Default Credentials (ADC)
|
||||
- Enabled routing of Fabric requests through Google Cloud Platform instead of directly to Anthropic for GCP billing
|
||||
- Implemented support for Claude models (Sonnet 4.5, Opus 4.5, Haiku 4.5, etc.) via Vertex AI
|
||||
- Added Google ADC authentication support eliminating the need for API keys
|
||||
- Configured project ID and region settings with 'global' as default for cost optimization
|
||||
- Add support for Google Cloud Vertex AI as a provider to access Claude models using Application Default Credentials (ADC)
|
||||
- Enable routing of Fabric requests through Google Cloud Platform instead of directly to Anthropic for GCP billing
|
||||
- Support for Claude models (Sonnet 4.5, Opus 4.5, Haiku 4.5, etc.) via Vertex AI with configurable project ID and region
|
||||
- Implement full streaming and non-streaming request capabilities with complete ai.Vendor interface
|
||||
- Extract message conversion logic to dedicated `toMessages` helper method with proper role handling and validation
|
||||
|
||||
## v1.4.364 (2025-12-28)
|
||||
|
||||
### PR [#1907](https://github.com/danielmiessler/Fabric/pull/1907) by [majiayu000](https://github.com/majiayu000): feat(gui): add Session Name support for multi-turn conversations
|
||||
|
||||
- Added Session Name support for multi-turn conversations in GUI chat interface, enabling persistent conversations similar to CLI's --session flag
|
||||
- Added SessionName field to PromptRequest and sessionName to ChatPrompt interface for proper session handling
|
||||
- Extracted SessionSelector component with Select component instead of native dropdown for better user experience
|
||||
- Implemented session message loading when selecting existing sessions with proper error handling
|
||||
- Fixed Select component binding and empty input handling to prevent redundant API calls and properly clear sessions
|
||||
- Add Session Name support for multi-turn conversations in GUI chat interface, enabling persistent conversations similar to CLI's --session flag
|
||||
- Extract session UI into dedicated SessionSelector component with proper Select component integration
|
||||
- Add session message loading functionality when selecting existing sessions
|
||||
- Fix session input handling to prevent resetting on each keystroke and improve layout with vertical stacking
|
||||
- Implement proper error handling for session loading and two-way binding with Select component
|
||||
|
||||
## v1.4.363 (2025-12-25)
|
||||
|
||||
@@ -69,27 +149,31 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
|
||||
### PR [#1904](https://github.com/danielmiessler/Fabric/pull/1904) by [majiayu000](https://github.com/majiayu000): fix: resolve WebUI tooltips not rendering due to overflow clipping
|
||||
|
||||
- Fix: resolve WebUI tooltips not rendering due to overflow clipping by using position: fixed and getBoundingClientRect() to calculate tooltip position dynamically, preventing tooltips from being clipped by parent containers with overflow: hidden
|
||||
- Refactor: extract tooltip positioning logic into separate positioning.ts module for better code organization and maintainability
|
||||
- Improve accessibility with aria-describedby attributes and unique IDs for better screen reader support
|
||||
- Add reactive tooltip position updates on scroll and resize events for dynamic positioning
|
||||
- Add SSR safety with isBrowser flag check and comprehensive unit test coverage for the positioning functions
|
||||
- Fix WebUI tooltips not rendering due to overflow clipping by using position: fixed and getBoundingClientRect() for dynamic positioning
|
||||
- Extract positioning calculations into dedicated `positioning.ts` module for better code organization
|
||||
- Add reactive tooltip position updates on scroll and resize events for improved user experience
|
||||
- Improve accessibility with `aria-describedby` attributes and unique IDs for better screen reader support
|
||||
- Update unit tests to use extracted functions and add test coverage for style formatting function
|
||||
|
||||
## v1.4.361 (2025-12-25)
|
||||
|
||||
### PR [#1905](https://github.com/danielmiessler/Fabric/pull/1905) by [majiayu000](https://github.com/majiayu000): fix: optimize oversized logo images reducing package size by 93%
|
||||
|
||||
- Optimize oversized logo images reducing package size by 93%
|
||||
- Fix: optimize oversized logo images reducing package size by 93%
|
||||
- Replace 42MB favicon.png with proper 64x64 PNG (4.7KB)
|
||||
- Replace 42MB fabric-logo.png with static PNG from first GIF frame (387KB)
|
||||
- Optimize animated GIF from 42MB to 5.4MB (half resolution, 12fps, 128 colors)
|
||||
- Update docs/images/fabric-logo-gif.gif with optimized version
|
||||
- Chore: incoming 1905 changelog entry
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Fix: resolve WebUI tooltips not rendering due to overflow clipping
|
||||
|
||||
## v1.4.360 (2025-12-23)
|
||||
|
||||
### PR [#1903](https://github.com/danielmiessler/Fabric/pull/1903) by [ksylvan](https://github.com/ksylvan): Update project dependencies and core SDK versions
|
||||
|
||||
- Update project dependencies and core SDK versions
|
||||
- Chore: update project dependencies and core SDK versions
|
||||
- Upgrade AWS SDK v2 components to latest stable versions
|
||||
- Update Ollama library to version 0.13.5 for improvements
|
||||
- Bump Google API and GenAI dependencies to newer releases
|
||||
@@ -100,56 +184,50 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
### PR [#1902](https://github.com/danielmiessler/Fabric/pull/1902) by [ksylvan](https://github.com/ksylvan): Code Cleanup and Simplification
|
||||
|
||||
- Chore: simplify error formatting and clean up model assignment logic
|
||||
|
||||
- Remove redundant fmt.Sprintf calls from error formatting logic
|
||||
- Simplify model assignment to always use normalized model names
|
||||
|
||||
- Remove unused variadic parameter from the VendorsManager Clear method
|
||||
- Chore: incoming 1902 changelog entry
|
||||
|
||||
## v1.4.358 (2025-12-23)
|
||||
|
||||
### PR [#1901](https://github.com/danielmiessler/Fabric/pull/1901) by [orbisai0security](https://github.com/orbisai0security): sexurity fix: Ollama update: CVE-2025-63389
|
||||
|
||||
- Fix: resolve critical vulnerability CVE-2025-63389 (update Ollama Go library)
|
||||
- Chore: incoming 1901 changelog entry
|
||||
- Fix: resolve critical vulnerability CVE-2025-63389
|
||||
|
||||
## v1.4.357 (2025-12-22)
|
||||
|
||||
### PR [#1897](https://github.com/danielmiessler/Fabric/pull/1897) by [ksylvan](https://github.com/ksylvan): feat: add MiniMax provider support to OpenAI compatible plugin
|
||||
|
||||
- Add MiniMax provider support to OpenAI compatible plugin
|
||||
- Add MiniMax provider configuration to ProviderMap
|
||||
- Set MiniMax base URL to api.minimaxi.com/v1
|
||||
- Configure MiniMax with ImplementsResponses as false
|
||||
- Add test case for MiniMax provider validation
|
||||
- Add MiniMax provider configuration to ProviderMap with base URL set to api.minimaxi.com/v1
|
||||
- Configure MiniMax with ImplementsResponses as false and add test case for provider validation
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Docs: add v1.4.356 release note highlighting complete i18n support
|
||||
|
||||
- Add v1.4.356 entry to Recent Major Features list
|
||||
- Highlight full setup prompt i18n across 10 languages
|
||||
|
||||
- Note intelligent environment variable handling for consistency
|
||||
- Add v1.4.356 release note highlighting complete internationalization support across 10 languages
|
||||
- Highlight full setup prompt i18n and intelligent environment variable handling for consistency
|
||||
|
||||
## v1.4.356 (2025-12-22)
|
||||
|
||||
### PR [#1895](https://github.com/danielmiessler/Fabric/pull/1895) by [ksylvan](https://github.com/ksylvan): Localize setup process and add funding configuration
|
||||
|
||||
- Localize setup prompts and error messages across multiple languages
|
||||
- Implement helper for localized questions with static environment keys
|
||||
- Update environment variable builder to handle hyphenated plugin names
|
||||
- Replace hardcoded console output with localized i18n translation strings
|
||||
- Add GitHub and Buy Me a Coffee funding configuration
|
||||
- Localize setup prompts and error messages across multiple languages for improved user experience
|
||||
- Add GitHub and Buy Me a Coffee funding configuration to support project development
|
||||
- Implement helper for localized questions with static environment keys to streamline internationalization
|
||||
- Update environment variable builder to handle hyphenated plugin names properly
|
||||
- Replace hardcoded console output with localized i18n translation strings throughout the application
|
||||
|
||||
## v1.4.355 (2025-12-20)
|
||||
|
||||
### PR [#1890](https://github.com/danielmiessler/Fabric/pull/1890) by [ksylvan](https://github.com/ksylvan): Bundle yt-dlp with fabric in Nix flake, introduce slim variant
|
||||
|
||||
- Added yt-dlp bundling with fabric package and introduced fabric-slim variant
|
||||
- Renamed original fabric package to fabricSlim and created new fabric package as symlinkJoin of fabricSlim and yt-dlp
|
||||
- Added fabric-slim output for the slim variant and updated default package to point to bundled fabric
|
||||
- Enhanced fabric meta description to note yt-dlp inclusion and set mainProgram to fabric in bundled package
|
||||
- Added wrapper for fabric binary to include PATH in execution environment
|
||||
- Added bundled yt-dlp with fabric package in Nix flake configuration
|
||||
- Introduced fabric-slim variant as a lightweight alternative without yt-dlp
|
||||
- Renamed original fabric package to fabricSlim for better organization
|
||||
- Created new fabric package as symlinkJoin of fabricSlim and yt-dlp
|
||||
- Updated default package to point to the bundled fabric version with yt-dlp
|
||||
|
||||
## v1.4.354 (2025-12-19)
|
||||
|
||||
@@ -166,7 +244,8 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
### PR [#1887](https://github.com/danielmiessler/Fabric/pull/1887) by [bvandevliet](https://github.com/bvandevliet): feat: correct video title and added description to yt transcript api response
|
||||
|
||||
- Feat: correct video title (instead of id) and added description to yt transcript api response
|
||||
- Updated API documentation.
|
||||
- Updated API documentation
|
||||
- Chore: incoming 1887 changelog entry
|
||||
|
||||
## v1.4.352 (2025-12-18)
|
||||
|
||||
@@ -187,9 +266,18 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
### PR [#1882](https://github.com/danielmiessler/Fabric/pull/1882) by [bvandevliet](https://github.com/bvandevliet): Added yt-dlp package to docker image
|
||||
|
||||
- Added yt-dlp package to docker image.
|
||||
- Chore: incoming 1882 changelog entry
|
||||
|
||||
## v1.4.350 (2025-12-18)
|
||||
|
||||
### PR [#1884](https://github.com/danielmiessler/Fabric/pull/1884) by [ksylvan](https://github.com/ksylvan): Implement interactive Swagger API documentation and automated OpenAPI specification generation
|
||||
|
||||
- Add Swagger UI at `/swagger/index.html` endpoint
|
||||
- Generate OpenAPI spec files (JSON and YAML)
|
||||
- Document chat, patterns, and models endpoints
|
||||
- Update contributing guide with Swagger annotation instructions
|
||||
- Add swaggo dependencies to project
|
||||
|
||||
### PR [#1880](https://github.com/danielmiessler/Fabric/pull/1880) by [ksylvan](https://github.com/ksylvan): docs: add REST API server section and new endpoint reference
|
||||
|
||||
- Add README table-of-contents link for REST API
|
||||
@@ -198,52 +286,44 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
- Describe sessions management and model listing endpoints
|
||||
- Provide curl examples for key API workflows
|
||||
|
||||
### PR [#1884](https://github.com/danielmiessler/Fabric/pull/1884) by [ksylvan](https://github.com/ksylvan): Implement interactive Swagger API documentation and automated OpenAPI specification generation
|
||||
|
||||
- Add Swagger UI at `/swagger/index.html` endpoint
|
||||
- Generate OpenAPI spec files (JSON and YAML)
|
||||
- Document chat, patterns, and models endpoints
|
||||
- Update contributing guide with Swagger annotation instructions
|
||||
- Configure authentication bypass for Swagger documentation
|
||||
|
||||
## v1.4.349 (2025-12-16)
|
||||
|
||||
### PR [#1877](https://github.com/danielmiessler/Fabric/pull/1877) by [ksylvan](https://github.com/ksylvan): modernize: update GitHub Actions and modernize Go code
|
||||
|
||||
- Modernize GitHub Actions and Go code with latest stdlib features
|
||||
- Upgrade GitHub Actions to latest versions (v6, v21) and add modernization check step
|
||||
- Modernize: update GitHub Actions and modernize Go code with latest stdlib features
|
||||
- Upgrade GitHub Actions to latest versions (v6, v21)
|
||||
- Add modernization check step in CI workflow
|
||||
- Replace strings manipulation with `strings.CutPrefix` and `strings.CutSuffix`
|
||||
- Replace manual loops with `slices.Contains` for validation and use `strings.SplitSeq` for iterator-based splitting
|
||||
- Replace `fmt.Sprintf` with `fmt.Appendf` for efficiency and simplify padding calculation with `max` builtin
|
||||
- Replace manual loops with `slices.Contains` for validation
|
||||
|
||||
## v1.4.348 (2025-12-16)
|
||||
|
||||
### PR [#1876](https://github.com/danielmiessler/Fabric/pull/1876) by [ksylvan](https://github.com/ksylvan): modernize Go code with TypeFor and range loops
|
||||
|
||||
- Replace reflect.TypeOf with TypeFor generic syntax for improved type handling
|
||||
- Convert traditional for loops to range-based iterations for better code readability
|
||||
- Simplify reflection usage in CLI flag handling to reduce complexity
|
||||
- Update test loops to use range over integers for cleaner test code
|
||||
- Refactor string processing loops in template plugin to use modern Go patterns
|
||||
- Replace reflect.TypeOf with TypeFor generic syntax for improved type safety
|
||||
- Convert traditional for loops to range-based iterations for cleaner code
|
||||
- Simplify reflection usage in CLI flag handling
|
||||
- Update test loops to use range over integers
|
||||
- Refactor string processing loops in template plugin
|
||||
|
||||
## v1.4.347 (2025-12-16)
|
||||
|
||||
### PR [#1875](https://github.com/danielmiessler/Fabric/pull/1875) by [ksylvan](https://github.com/ksylvan): modernize: update benchmarks to use b.Loop and refactor map copying
|
||||
|
||||
- Updated benchmark loops to use cleaner `b.Loop()` syntax
|
||||
- Removed unnecessary `b.ResetTimer()` call in token benchmark
|
||||
- Used `maps.Copy` for merging variables in patterns handler
|
||||
- Update benchmark loops to use cleaner `b.Loop()` syntax
|
||||
- Remove unnecessary `b.ResetTimer()` call in token benchmark
|
||||
- Use `maps.Copy` for merging variables in patterns handler
|
||||
- Update benchmarks to use b.Loop and refactor map copying
|
||||
|
||||
## v1.4.346 (2025-12-16)
|
||||
|
||||
### PR [#1874](https://github.com/danielmiessler/Fabric/pull/1874) by [ksylvan](https://github.com/ksylvan): refactor: replace interface{} with any across codebase
|
||||
|
||||
- Part 1 of dealing with #1873 as pointed out by @philoserf
|
||||
- Replace `interface{}` with `any` in slice type declarations throughout the codebase
|
||||
- Update map types from `map[string]interface{}` to `map[string]any` for modern Go standards
|
||||
- Replace `interface{}` with `any` in slice type declarations
|
||||
- Update map types from `map[string]interface{}` to `map[string]any`
|
||||
- Change variadic function parameters to use `...any` instead of `...interface{}`
|
||||
- Modernize JSON unmarshaling variables to use `any` for consistency
|
||||
- Update struct fields and method signatures to prefer the `any` alias over legacy interface syntax
|
||||
- Modernize JSON unmarshaling variables to `any` for consistency
|
||||
- Update struct fields and method signatures to prefer `any` alias
|
||||
|
||||
## v1.4.345 (2025-12-15)
|
||||
|
||||
@@ -261,12 +341,18 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
|
||||
- Chore: update flake
|
||||
- Merge branch 'main' into update-flake
|
||||
- Chore: incoming 1867 changelog entry
|
||||
|
||||
## v1.4.343 (2025-12-14)
|
||||
|
||||
### PR [#1829](https://github.com/danielmiessler/Fabric/pull/1829) by [dependabo](https://github.com/apps/dependabot): chore(deps): bump js-yaml from 4.1.0 to 4.1.1 in /web in the npm_and_yarn group across 1 directory
|
||||
### PR [#1829](https://github.com/danielmiessler/Fabric/pull/1829) by [dependabot[bot]](https://github.com/apps/dependabot): chore(deps): bump js-yaml from 4.1.0 to 4.1.1 in /web in the npm_and_yarn group across 1 directory
|
||||
|
||||
- Updated js-yaml dependency from version 4.1.0 to 4.1.1 in the /web directory
|
||||
- Updated js-yaml dependency from version 4.1.0 to 4.1.1 in the web directory
|
||||
- Added changelog entry for incoming PR #1829
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Updated flake configuration
|
||||
|
||||
## v1.4.342 (2025-12-13)
|
||||
|
||||
@@ -278,7 +364,7 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
- Add os import to support stderr error writes
|
||||
- Preserve help-output suppression and exit behavior
|
||||
|
||||
## v1.4.341 (2025-12-10)
|
||||
## v1.4.341 (2025-12-11)
|
||||
|
||||
### PR [#1860](https://github.com/danielmiessler/Fabric/pull/1860) by [ksylvan](https://github.com/ksylvan): fix: allow resetting required settings without validation errors
|
||||
|
||||
@@ -292,19 +378,19 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
|
||||
### PR [#1856](https://github.com/danielmiessler/Fabric/pull/1856) by [ksylvan](https://github.com/ksylvan): Add support for new ClaudeHaiku 4.5 models
|
||||
|
||||
- Add support for new ClaudeHaiku models in client
|
||||
- Add `ModelClaudeHaiku4_5` to supported models
|
||||
- Add `ModelClaudeHaiku4_5_20251001` to supported models
|
||||
- Added support for new ClaudeHaiku 4.5 models in client
|
||||
- Added `ModelClaudeHaiku4_5` to supported models list
|
||||
- Added `ModelClaudeHaiku4_5_20251001` to supported models list
|
||||
|
||||
## v1.4.339 (2025-12-08)
|
||||
|
||||
### PR [#1855](https://github.com/danielmiessler/Fabric/pull/1855) by [ksylvan](https://github.com/ksylvan): feat: add image attachment support for Ollama vision models
|
||||
|
||||
- Add multi-modal image support to Ollama client
|
||||
- Add base64 and io imports for image handling
|
||||
- Store httpClient separately in Client struct for reuse
|
||||
- Convert createChatRequest to return error for validation
|
||||
- Implement convertMessage to handle multi-content chat messages
|
||||
- Add loadImageBytes to fetch images from URLs
|
||||
- Support base64 data URLs for inline images
|
||||
- Handle HTTP image URLs with context propagation
|
||||
|
||||
## v1.4.338 (2025-12-04)
|
||||
|
||||
@@ -331,21 +417,17 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
### PR [#1848](https://github.com/danielmiessler/Fabric/pull/1848) by [zeddy303](https://github.com/zeddy303): Fix localStorage SSR error in favorites-store
|
||||
|
||||
- Fix localStorage SSR error in favorites-store by using SvelteKit's browser constant instead of typeof localStorage check to properly handle server-side rendering and prevent 'localStorage.getItem is not a function' error when running dev server
|
||||
- Add changelog entry for incoming PR #1848
|
||||
|
||||
## v1.4.335 (2025-11-28)
|
||||
|
||||
### PR [#1847](https://github.com/danielmiessler/Fabric/pull/1847) by [ksylvan](https://github.com/ksylvan): Improve model name matching for NeedsRaw in Ollama plugin
|
||||
|
||||
- Improved model name matching in Ollama plugin by replacing prefix-based matching with substring matching
|
||||
- Enhanced NeedsRaw functionality to support more flexible model name detection
|
||||
- Improved model name matching in Ollama plugin by replacing prefix matching with substring matching
|
||||
- Enhanced Ollama model name detection by enabling substring-based search instead of prefix-only matching
|
||||
- Added "conceptmap" to VSCode dictionary settings for better development experience
|
||||
- Fixed typo in README documentation
|
||||
- Renamed `ollamaPrefixes` variable to `ollamaSearchStrings` for better code clarity
|
||||
- Replaced `HasPrefix` function with `Contains` for more comprehensive model matching
|
||||
- Added "conceptmap" to VSCode dictionary settings
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Merge branch 'danielmiessler:main' into main
|
||||
- Docs: Fix typo in README
|
||||
|
||||
## v1.4.334 (2025-11-26)
|
||||
|
||||
@@ -359,10 +441,6 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
|
||||
## v1.4.333 (2025-11-25)
|
||||
|
||||
### PR [#1833](https://github.com/danielmiessler/Fabric/pull/1833) by [junaid18183](https://github.com/junaid18183): Added concall_summary
|
||||
|
||||
- Added concall_summery pattern to extract strategic insights from earnings transcripts for investors.
|
||||
|
||||
### PR [#1844](https://github.com/danielmiessler/Fabric/pull/1844) by [ksylvan](https://github.com/ksylvan): Correct directory name from `concall_summery` to `concall_summary`
|
||||
|
||||
- Fix: correct directory name from `concall_summery` to `concall_summary`
|
||||
@@ -371,6 +449,10 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
- Add concall_summary to BUSINESS and SUMMARIZE category listings
|
||||
- Add user documentation for earnings call analysis
|
||||
|
||||
### PR [#1833](https://github.com/danielmiessler/Fabric/pull/1833) by [junaid18183](https://github.com/junaid18183): Added concall_summery
|
||||
|
||||
- Added concall_summery
|
||||
|
||||
## v1.4.332 (2025-11-24)
|
||||
|
||||
### PR [#1843](https://github.com/danielmiessler/Fabric/pull/1843) by [ksylvan](https://github.com/ksylvan): Implement case-insensitive vendor and model name matching
|
||||
@@ -381,11 +463,11 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
- Add FilterByVendor method with case-insensitive matching
|
||||
- Add FindModelNameCaseInsensitive helper for model queries
|
||||
|
||||
## v1.4.331 (2025-11-22)
|
||||
## v1.4.331 (2025-11-23)
|
||||
|
||||
### PR [#1839](https://github.com/danielmiessler/Fabric/pull/1839) by [ksylvan](https://github.com/ksylvan): Add GitHub Models Provider and Refactor Fetching Fallback Logic
|
||||
|
||||
- Add GitHub Models provider and refactor model fetching with direct API fallback
|
||||
- Feat: add GitHub Models provider and refactor model fetching with direct API fallback
|
||||
- Add GitHub Models to supported OpenAI-compatible providers list
|
||||
- Implement direct HTTP fallback for non-standard model responses
|
||||
- Centralize model fetching logic in openai package
|
||||
@@ -395,38 +477,35 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
|
||||
### PR [#1840](https://github.com/danielmiessler/Fabric/pull/1840) by [ZackaryWelch](https://github.com/ZackaryWelch): Replace deprecated bash function in completion script
|
||||
|
||||
- Replace deprecated bash function in completion script to use `_comp_get_words` instead of `__get_comp_words_by_ref`, fixing compatibility issues with latest bash versions and preventing script breakage on updated distributions like Fedora 42+
|
||||
- Replace deprecated bash function in completion script to use `_comp_get_words` instead of the removed `__get_comp_words_by_ref` function
|
||||
- Fix compatibility issues with latest bash version 5.2 and newer distributions like Fedora 42+
|
||||
|
||||
## v1.4.329 (2025-11-20)
|
||||
|
||||
### PR [#1838](https://github.com/danielmiessler/fabric/pull/1838) by [ksylvan](https://github.com/ksylvan): refactor: implement i18n support for YouTube tool error messages
|
||||
### PR [#1838](https://github.com/danielmiessler/Fabric/pull/1838) by [ksylvan](https://github.com/ksylvan): refactor: implement i18n support for YouTube tool error messages
|
||||
|
||||
- Refactor: implement i18n support for YouTube tool error messages
|
||||
- Replace hardcoded error strings with i18n translation calls
|
||||
- Add localization keys for YouTube errors to all locale files
|
||||
- Introduce `extractAndValidateVideoId` helper to reduce code duplication
|
||||
- Update timestamp parsing logic to handle localized error formats
|
||||
- Standardize error handling in `yt-dlp` execution with i18n
|
||||
|
||||
## v1.4.328 (2025-11-18)
|
||||
|
||||
### PR [#1836](https://github.com/danielmiessler/Fabric/pull/1836) by [ksylvan](https://github.com/ksylvan): docs: clarify `--raw` flag behavior for OpenAI and Anthropic providers
|
||||
|
||||
- Update `--raw` flag description across all documentation files
|
||||
- Clarify flag only affects OpenAI-compatible providers behavior
|
||||
- Document Anthropic models use smart parameter selection
|
||||
- Remove outdated reference to system/user role changes
|
||||
- Update help text in CLI flags definition
|
||||
- Updated documentation to clarify `--raw` flag behavior across OpenAI and Anthropic providers
|
||||
- Documented that Anthropic models use smart parameter selection instead of raw flag behavior
|
||||
- Updated CLI help text and shell completion descriptions for better clarity
|
||||
- Translated updated flag descriptions to all supported locales
|
||||
- Removed outdated references to system/user role changes
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Added concall_summery
|
||||
|
||||
## v1.4.327 (2025-11-16)
|
||||
|
||||
### PR [#1831](https://github.com/danielmiessler/Fabric/pull/1831) by [ksylvan](https://github.com/ksylvan): Remove `get_youtube_rss` pattern
|
||||
|
||||
- Chore: remove `get_youtube_rss` pattern from multiple files
|
||||
- Remove `get_youtube_rss` from `pattern_explanations.md`
|
||||
- Delete `get_youtube_rss` entry in `pattern_descriptions.json`
|
||||
- Delete `get_youtube_rss` entry in `pattern_extracts.json`
|
||||
- Remove `get_youtube_rss` from `suggest_pattern/system.md`
|
||||
|
||||
### PR [#1832](https://github.com/danielmiessler/Fabric/pull/1832) by [ksylvan](https://github.com/ksylvan): Improve channel management in Gemini provider
|
||||
|
||||
- Fix: improve channel management in Gemini streaming method
|
||||
@@ -435,29 +514,29 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
- Remove redundant channel close statements from loop
|
||||
- Ensure channel closes on all exit paths consistently
|
||||
|
||||
### PR [#1831](https://github.com/danielmiessler/Fabric/pull/1831) by [ksylvan](https://github.com/ksylvan): Remove `get_youtube_rss` pattern
|
||||
|
||||
- Chore: remove `get_youtube_rss` pattern from multiple files
|
||||
- Remove `get_youtube_rss` from `pattern_explanations.md`
|
||||
- Delete `get_youtube_rss` entry in `pattern_descriptions.json`
|
||||
- Delete `get_youtube_rss` entry in `pattern_extracts.json`
|
||||
- Remove `get_youtube_rss` from `suggest_pattern/system.md`
|
||||
|
||||
## v1.4.326 (2025-11-16)
|
||||
|
||||
### PR [#1830](https://github.com/danielmiessler/Fabric/pull/1830) by [ksylvan](https://github.com/ksylvan): Ensure final newline in model generated outputs
|
||||
|
||||
- Feat: ensure newline in `CreateOutputFile` and improve tests
|
||||
- Add newline to `CreateOutputFile` if missing
|
||||
- Use `t.Cleanup` for file removal in tests
|
||||
- Add test for message with trailing newline
|
||||
- Introduce `printedStream` flag in `Chatter.Send`
|
||||
- Add newline to `CreateOutputFile` if missing and improve tests with `t.Cleanup` for file removal
|
||||
- Add test for message with trailing newline and introduce `printedStream` flag in `Chatter.Send`
|
||||
- Print newline if stream printed without trailing newline
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Chore: update README with recent features and extensions
|
||||
|
||||
- Add v1.4.322 release with concept maps
|
||||
|
||||
- Introduce WELLNESS category with psychological analysis
|
||||
- Upgrade to Claude Sonnet 4.5
|
||||
|
||||
- Add Portuguese language variants with BCP 47 support
|
||||
- Migrate to `openai-go/azure` SDK for Azure
|
||||
|
||||
- Add Extensions section to README navigation
|
||||
- Add v1.4.322 release with concept maps and introduce WELLNESS category with psychological analysis
|
||||
- Upgrade to Claude Sonnet 4.5 and add Portuguese language variants with BCP 47 support
|
||||
- Migrate to `openai-go/azure` SDK for Azure integration
|
||||
- Update README with recent features and extensions, including new Extensions section navigation
|
||||
- General repository maintenance and feature documentation updates
|
||||
|
||||
## v1.4.325 (2025-11-15)
|
||||
|
||||
@@ -467,21 +546,27 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
- Remove default space in `BuildSession` message content
|
||||
- Trim whitespace in `anthropic` message content check
|
||||
- Trim whitespace in `gemini` message content check
|
||||
- Chore: incoming 1828 changelog entry
|
||||
|
||||
## v1.4.324 (2025-11-14)
|
||||
|
||||
### PR [#1827](https://github.com/danielmiessler/Fabric/pull/1827) by [ksylvan](https://github.com/ksylvan): Make YouTube API key optional in setup
|
||||
|
||||
- Make YouTube API key optional in setup process
|
||||
- Change API key setup question to optional configuration
|
||||
- Add test for optional API key behavior
|
||||
- Ensure plugin configuration works without API key
|
||||
- Made YouTube API key optional during setup process
|
||||
- Changed API key setup question to be optional rather than required
|
||||
- Added test coverage for optional API key behavior
|
||||
- Ensured plugin configuration works without API key
|
||||
- Added changelog entry for the changes
|
||||
|
||||
## v1.4.323 (2025-11-12)
|
||||
|
||||
### PR [#1802](https://github.com/danielmiessler/Fabric/pull/1802) by [nickarino](https://github.com/nickarino): fix: improve template extension handling for {{input}} and add examples
|
||||
|
||||
- Fix: improve template extension handling for {{input}} and add examples
|
||||
- Extract InputSentinel constant to shared constants.go file and remove duplicate inputSentinel definitions from template.go and patterns.go
|
||||
- Create withTestExtension helper function to reduce test code duplication and refactor 3 test functions to use the helper
|
||||
- Fix shell script to use $@ instead of $- for proper argument quoting
|
||||
- Add prominent warning at top of Extensions guide with visual indicators and update main README with brief Extensions section
|
||||
|
||||
### PR [#1823](https://github.com/danielmiessler/Fabric/pull/1823) by [ksylvan](https://github.com/ksylvan): Add missing patterns and renumber pattern explanations list
|
||||
|
||||
@@ -489,14 +574,17 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
- Add `extract_mcp_servers` pattern for MCP server identification
|
||||
- Add `generate_code_rules` pattern for AI coding guardrails
|
||||
- Add `t_check_dunning_kruger` pattern for competence assessment
|
||||
- Renumber all patterns from 37-226 to 37-230
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Chore: incoming 1823 changelog entry
|
||||
- Renumber all patterns from 37-226 to 37-230 and insert new patterns at positions 37, 129, 153, 203
|
||||
|
||||
## v1.4.322 (2025-11-05)
|
||||
|
||||
### PR [#1816](https://github.com/danielmiessler/Fabric/pull/1816) by [ksylvan](https://github.com/ksylvan): Update `anthropic-sdk-go` to v1.16.0 and update models
|
||||
|
||||
- Upgrade `anthropic-sdk-go` to version 1.16.0
|
||||
- Remove outdated model `ModelClaude3_5SonnetLatest`
|
||||
- Add new model `ModelClaudeSonnet4_5_20250929`
|
||||
- Include `ModelClaudeSonnet4_5_20250929` in `modelBetas` map
|
||||
|
||||
### PR [#1814](https://github.com/danielmiessler/Fabric/pull/1814) by [ksylvan](https://github.com/ksylvan): Add Concept Map in html
|
||||
|
||||
- Add `create_conceptmap` for interactive HTML concept maps using Vis.js
|
||||
@@ -504,71 +592,60 @@ Co-Authored-By: Warp <agent@warp.dev>
|
||||
- Introduce `model_as_sherlock_freud` for psychological modeling and behavior analysis
|
||||
- Implement `predict_person_actions` for behavioral response predictions
|
||||
- Add `recommend_yoga_practice` for personalized yoga guidance
|
||||
- Credit goes to @FELIPEGUEDESBR for the pattern
|
||||
|
||||
|
||||
### PR [#1816](https://github.com/danielmiessler/Fabric/pull/1816) by [ksylvan](https://github.com/ksylvan): Update `anthropic-sdk-go` to v1.16.0 and update models
|
||||
|
||||
- Upgraded `anthropic-sdk-go` from v1.13.0 to v1.16.0
|
||||
- Removed outdated model `ModelClaude3_5SonnetLatest`
|
||||
- Added new model `ModelClaudeSonnet4_5_20250929`
|
||||
- Updated anthropic beta map to include the new model
|
||||
- Updated dependencies in `go.sum` file
|
||||
|
||||
## v1.4.321 (2025-11-03)
|
||||
|
||||
### PR [#1803](https://github.com/danielmiessler/Fabric/pull/1803) by [dependabot[bot][bot]](https://github.com/apps/dependabot): chore(deps-dev): bump vite from 5.4.20 to 5.4.21 in /web in the npm_and_yarn group across 1 directory
|
||||
### PR [#1803](https://github.com/danielmiessler/Fabric/pull/1803) by [dependabot[bot]](https://github.com/apps/dependabot): chore(deps-dev): bump vite from 5.4.20 to 5.4.21 in /web in the npm_and_yarn group across 1 directory
|
||||
|
||||
- Updated Vite development dependency from version 5.4.20 to 5.4.21 in the web directory
|
||||
- Bumped vite dependency from 5.4.20 to 5.4.21 in the /web directory
|
||||
|
||||
### PR [#1805](https://github.com/danielmiessler/Fabric/pull/1805) by [OmriH-Elister](https://github.com/OmriH-Elister): Added several new patterns
|
||||
|
||||
- Added new WELLNESS category with four patterns including personalized yoga practice recommendations and wellness guidance
|
||||
- Added `model_as_sherlock_freud` pattern for psychological detective analysis combining Sherlock Holmes deduction with Freudian psychology
|
||||
- Added `predict_person_actions` pattern for behavioral response predictions based on personality analysis
|
||||
- Added `fix_typos` pattern for automated proofreading and typo corrections
|
||||
- Updated ANALYSIS and SELF categories to include new wellness-related patterns and classifications
|
||||
- Added new WELLNESS category with four patterns including yoga practice recommendations
|
||||
- Introduced psychological analysis patterns: `model_as_sherlock_freud` and `predict_person_actions`
|
||||
- Added `fix_typos` pattern for proofreading and text corrections
|
||||
- Updated ANALYSIS and SELF categories to include new wellness-related patterns
|
||||
|
||||
### PR [#1808](https://github.com/danielmiessler/Fabric/pull/1808) by [sluosapher](https://github.com/sluosapher): Updated create_newsletter_entry pattern to generate more factual titles
|
||||
|
||||
- Updated the title generation style; added an output example.
|
||||
- Updated title generation style for more factual newsletter entries and added output example
|
||||
|
||||
## v1.4.320 (2025-10-28)
|
||||
|
||||
### PR [#1780](https://github.com/danielmiessler/Fabric/pull/1780) by [marcas756](https://github.com/marcas756): feat: add extract_characters pattern
|
||||
|
||||
- Define character extraction goals and steps with canonical naming and deduplication rules
|
||||
- Outline interaction mapping and narrative importance analysis
|
||||
- Provide comprehensive output schema with proper formatting guidelines
|
||||
- Include positive and negative examples for pattern clarity
|
||||
- Enforce restrictions on speculative motivations and non-actor inclusion
|
||||
|
||||
### PR [#1794](https://github.com/danielmiessler/Fabric/pull/1794) by [starfish456](https://github.com/starfish456): Enhance web app docs
|
||||
|
||||
- Remove duplicate content from the main readme and link to the web app readme
|
||||
- Update table of contents with proper nesting and fix minor formatting issues
|
||||
|
||||
### PR [#1810](https://github.com/danielmiessler/Fabric/pull/1810) by [tonymet](https://github.com/tonymet): improve subtitle lang, retry, debugging & error handling
|
||||
|
||||
- Improve subtitle lang, retry, debugging & error handling
|
||||
|
||||
### PR [#1780](https://github.com/danielmiessler/Fabric/pull/1780) by [marcas756](https://github.com/marcas756): feat: add extract_characters pattern
|
||||
|
||||
- Add extract_characters pattern for detailed character analysis and identification
|
||||
- Define character extraction goals with canonical naming and deduplication rules
|
||||
- Include output schema with formatting guidelines and positive/negative examples
|
||||
|
||||
### PR [#1794](https://github.com/danielmiessler/Fabric/pull/1794) by [productStripesAdmin](https://github.com/productStripesAdmin): Enhance web app docs
|
||||
|
||||
- Remove duplicate content from main readme and link to web app readme
|
||||
- Update table of contents with proper nesting and fix minor formatting issues
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Docs: clean up README - remove duplicate image and add collapsible updates section
|
||||
|
||||
- Remove duplicate fabric-summarize.png screenshot
|
||||
- Wrap Updates section in HTML details/summary accordion to save space
|
||||
🤖 Generated with [Claude Code](<https://claude.com/claude-code)>
|
||||
Co-Authored-By: Claude <noreply@anthropic.com>
|
||||
- Updated CSE pattern.
|
||||
- Add new patterns and update title generation style with output examples
|
||||
- Fix template extension handling for {{input}} and add examples
|
||||
|
||||
## v1.4.319 (2025-09-30)
|
||||
|
||||
### PR [#1783](https://github.com/danielmiessler/Fabric/pull/1783) by [ksylvan](https://github.com/ksylvan): Update anthropic-sdk-go and add claude-sonnet-4-5
|
||||
|
||||
- Feat: update `anthropic-sdk-go` to v1.13.0 and add new model
|
||||
- Upgrade `anthropic-sdk-go` to version 1.13.0
|
||||
- Add `ModelClaudeSonnet4_5` to supported models list
|
||||
- Updated `anthropic-sdk-go` to version 1.13.0 for improved compatibility and performance
|
||||
- Added support for `ModelClaudeSonnet4_5` to the list of available AI models
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Added new `extract_characters` system definition with comprehensive character extraction capabilities
|
||||
- Implemented canonical naming and deduplication rules for consistent character identification
|
||||
- Created structured output schema with detailed formatting guidelines and examples
|
||||
- Established interaction mapping functionality to track character relationships and narrative importance
|
||||
- Added fallback handling for scenarios where no characters are found in the content
|
||||
|
||||
## v1.4.318 (2025-09-24)
|
||||
|
||||
@@ -594,28 +671,19 @@ Co-Authored-By: Claude <noreply@anthropic.com>
|
||||
|
||||
### PR [#1777](https://github.com/danielmiessler/Fabric/pull/1777) by [ksylvan](https://github.com/ksylvan): chore: remove garble installation from release workflow
|
||||
|
||||
- Remove garble installation step from release workflow
|
||||
- Add comment for GoReleaser config file reference link
|
||||
- The original idea of adding garble was to make it pass
|
||||
virus scanning during version upgrades for Winget, and
|
||||
this was a failed experiment.
|
||||
- Remove garble installation step from release workflow to simplify the build process
|
||||
- Add comment with GoReleaser config file reference link for better documentation
|
||||
- Discontinue failed experiment with garble that was intended to improve Windows package manager virus scanning compatibility
|
||||
|
||||
## v1.4.315 (2025-09-20)
|
||||
|
||||
### Direct commits
|
||||
### PR [#1776](https://github.com/danielmiessler/Fabric/pull/1776) by [ksylvan](https://github.com/ksylvan): Remove garble from the build process for Windows
|
||||
|
||||
- Chore: update CI workflow and simplify goreleaser build configuration
|
||||
|
||||
- Add changelog database to git tracking
|
||||
|
||||
- Remove unnecessary goreleaser comments
|
||||
- Add version metadata to default build
|
||||
|
||||
- Rename windows build from garbled to standard
|
||||
- Remove garble obfuscation from windows build
|
||||
|
||||
- Standardize ldflags across all build targets
|
||||
- Inject version info during compilation
|
||||
- Update CI workflow and simplify goreleaser build configuration
|
||||
- Add changelog database to git tracking
|
||||
|
||||
## v1.4.314 (2025-09-17)
|
||||
|
||||
|
||||
123
README.md
123
README.md
@@ -160,6 +160,7 @@ Keep in mind that many of these were recorded when Fabric was Python-based, so r
|
||||
- [Docker](#docker)
|
||||
- [Environment Variables](#environment-variables)
|
||||
- [Setup](#setup)
|
||||
- [Supported AI Providers](#supported-ai-providers)
|
||||
- [Per-Pattern Model Mapping](#per-pattern-model-mapping)
|
||||
- [Add aliases for all patterns](#add-aliases-for-all-patterns)
|
||||
- [Save your files in markdown using aliases](#save-your-files-in-markdown-using-aliases)
|
||||
@@ -172,12 +173,15 @@ Keep in mind that many of these were recorded when Fabric was Python-based, so r
|
||||
- [Fish Completion](#fish-completion)
|
||||
- [Usage](#usage)
|
||||
- [Debug Levels](#debug-levels)
|
||||
- [Dry Run Mode](#dry-run-mode)
|
||||
- [Extensions](#extensions)
|
||||
- [REST API Server](#rest-api-server)
|
||||
- [Ollama Compatibility Mode](#ollama-compatibility-mode)
|
||||
- [Our approach to prompting](#our-approach-to-prompting)
|
||||
- [Examples](#examples)
|
||||
- [Just use the Patterns](#just-use-the-patterns)
|
||||
- [Prompt Strategies](#prompt-strategies)
|
||||
- [Available Strategies](#available-strategies)
|
||||
- [Custom Patterns](#custom-patterns)
|
||||
- [Setting Up Custom Patterns](#setting-up-custom-patterns)
|
||||
- [Using Custom Patterns](#using-custom-patterns)
|
||||
@@ -185,7 +189,8 @@ Keep in mind that many of these were recorded when Fabric was Python-based, so r
|
||||
- [Helper Apps](#helper-apps)
|
||||
- [`to_pdf`](#to_pdf)
|
||||
- [`to_pdf` Installation](#to_pdf-installation)
|
||||
- [`code_helper`](#code_helper)
|
||||
- [`code2context`](#code2context)
|
||||
- [`generate_changelog`](#generate_changelog)
|
||||
- [pbpaste](#pbpaste)
|
||||
- [Web Interface (Fabric Web App)](#web-interface-fabric-web-app)
|
||||
- [Meta](#meta)
|
||||
@@ -349,6 +354,43 @@ fabric --setup
|
||||
|
||||
If everything works you are good to go.
|
||||
|
||||
### Supported AI Providers
|
||||
|
||||
Fabric supports a wide range of AI providers:
|
||||
|
||||
**Native Integrations:**
|
||||
|
||||
- OpenAI
|
||||
- Anthropic (Claude)
|
||||
- Google Gemini
|
||||
- Ollama (local models)
|
||||
- Azure OpenAI
|
||||
- Amazon Bedrock
|
||||
- Vertex AI
|
||||
- LM Studio
|
||||
- Perplexity
|
||||
|
||||
**OpenAI-Compatible Providers:**
|
||||
|
||||
- Abacus
|
||||
- AIML
|
||||
- Cerebras
|
||||
- DeepSeek
|
||||
- GitHub Models
|
||||
- GrokAI
|
||||
- Groq
|
||||
- Langdock
|
||||
- LiteLLM
|
||||
- MiniMax
|
||||
- Mistral
|
||||
- OpenRouter
|
||||
- SiliconCloud
|
||||
- Together
|
||||
- Venice AI
|
||||
- Z AI
|
||||
|
||||
Run `fabric --setup` to configure your preferred provider(s), or use `fabric --listvendors` to see all available vendors.
|
||||
|
||||
### Per-Pattern Model Mapping
|
||||
|
||||
You can configure specific models for individual patterns using environment variables
|
||||
@@ -705,6 +747,7 @@ Application Options:
|
||||
--yt-dlp-args= Additional arguments to pass to yt-dlp (e.g. '--cookies-from-browser brave')
|
||||
--thinking= Set reasoning/thinking level (e.g., off, low, medium, high, or
|
||||
numeric tokens for Anthropic or Google Gemini)
|
||||
--show-metadata Print metadata (input/output tokens) to stderr
|
||||
--debug= Set debug level (0: off, 1: basic, 2: detailed, 3: trace)
|
||||
Help Options:
|
||||
-h, --help Show this help message
|
||||
@@ -719,6 +762,16 @@ Use the `--debug` flag to control runtime logging:
|
||||
- `2`: detailed debugging
|
||||
- `3`: trace level
|
||||
|
||||
### Dry Run Mode
|
||||
|
||||
Use `--dry-run` to preview what would be sent to the AI model without making an API call:
|
||||
|
||||
```bash
|
||||
echo "test input" | fabric --dry-run -p summarize
|
||||
```
|
||||
|
||||
This is useful for debugging patterns, checking prompt construction, and verifying input formatting before using API credits.
|
||||
|
||||
### Extensions
|
||||
|
||||
Fabric supports extensions that can be called within patterns. See the [Extension Guide](internal/plugins/template/Examples/README.md) for complete documentation.
|
||||
@@ -744,6 +797,22 @@ The server provides endpoints for:
|
||||
|
||||
For complete endpoint documentation, authentication setup, and usage examples, see [REST API Documentation](docs/rest-api.md).
|
||||
|
||||
### Ollama Compatibility Mode
|
||||
|
||||
Fabric can serve as a drop-in replacement for Ollama by exposing Ollama-compatible API endpoints. Start the server with:
|
||||
|
||||
```bash
|
||||
fabric --serve --serveOllama
|
||||
```
|
||||
|
||||
This enables the following Ollama-compatible endpoints:
|
||||
|
||||
- `GET /api/tags` - List available patterns as models
|
||||
- `POST /api/chat` - Chat completions
|
||||
- `GET /api/version` - Server version
|
||||
|
||||
Applications configured to use the Ollama API can point to your Fabric server instead, allowing you to use any of Fabric's supported AI providers through the Ollama interface. Patterns appear as models (e.g., `summarize:latest`).
|
||||
|
||||
## Our approach to prompting
|
||||
|
||||
Fabric _Patterns_ are different than most prompts you'll see.
|
||||
@@ -824,6 +893,34 @@ LLM in the chat session.
|
||||
|
||||
Use `fabric -S` and select the option to install the strategies in your `~/.config/fabric` directory.
|
||||
|
||||
#### Available Strategies
|
||||
|
||||
Fabric includes several prompt strategies:
|
||||
|
||||
- `cot` - Chain-of-Thought: Step-by-step reasoning
|
||||
- `cod` - Chain-of-Draft: Iterative drafting with minimal notes (5 words max per step)
|
||||
- `tot` - Tree-of-Thought: Generate multiple reasoning paths and select the best one
|
||||
- `aot` - Atom-of-Thought: Break problems into smallest independent atomic sub-problems
|
||||
- `ltm` - Least-to-Most: Solve problems from easiest to hardest sub-problems
|
||||
- `self-consistent` - Self-Consistency: Multiple reasoning paths with consensus
|
||||
- `self-refine` - Self-Refinement: Answer, critique, and refine
|
||||
- `reflexion` - Reflexion: Answer, critique briefly, and provide refined answer
|
||||
- `standard` - Standard: Direct answer without explanation
|
||||
|
||||
Use the `--strategy` flag to apply a strategy:
|
||||
|
||||
```bash
|
||||
echo "Analyze this code" | fabric --strategy cot -p analyze_code
|
||||
```
|
||||
|
||||
List all available strategies with:
|
||||
|
||||
```bash
|
||||
fabric --liststrategies
|
||||
```
|
||||
|
||||
Strategies are stored as JSON files in `~/.config/fabric/strategies/`. See the default strategies for the format specification.
|
||||
|
||||
## Custom Patterns
|
||||
|
||||
You may want to use Fabric to create your own custom Patterns—but not share them with others. No problem!
|
||||
@@ -903,9 +1000,9 @@ go install github.com/danielmiessler/fabric/cmd/to_pdf@latest
|
||||
|
||||
Make sure you have a LaTeX distribution (like TeX Live or MiKTeX) installed on your system, as `to_pdf` requires `pdflatex` to be available in your system's PATH.
|
||||
|
||||
### `code_helper`
|
||||
### `code2context`
|
||||
|
||||
`code_helper` is used in conjunction with the `create_coding_feature` pattern.
|
||||
`code2context` is used in conjunction with the `create_coding_feature` pattern.
|
||||
It generates a `json` representation of a directory of code that can be fed into an AI model
|
||||
with instructions to create a new feature or edit the code in a specified way.
|
||||
|
||||
@@ -914,9 +1011,27 @@ See [the Create Coding Feature Pattern README](./data/patterns/create_coding_fea
|
||||
Install it first using:
|
||||
|
||||
```bash
|
||||
go install github.com/danielmiessler/fabric/cmd/code_helper@latest
|
||||
go install github.com/danielmiessler/fabric/cmd/code2context@latest
|
||||
```
|
||||
|
||||
### `generate_changelog`
|
||||
|
||||
`generate_changelog` generates changelogs from git commit history and GitHub pull requests. It walks through your repository's git history, extracts PR information, and produces well-formatted markdown changelogs.
|
||||
|
||||
```bash
|
||||
generate_changelog --help
|
||||
```
|
||||
|
||||
Features include SQLite caching for fast incremental updates, GitHub GraphQL API integration for efficient PR fetching, and optional AI-enhanced summaries using Fabric.
|
||||
|
||||
Install it using:
|
||||
|
||||
```bash
|
||||
go install github.com/danielmiessler/fabric/cmd/generate_changelog@latest
|
||||
```
|
||||
|
||||
See the [generate_changelog README](./cmd/generate_changelog/README.md) for detailed usage and options.
|
||||
|
||||
## pbpaste
|
||||
|
||||
The [examples](#examples) use the macOS program `pbpaste` to paste content from the clipboard to pipe into `fabric` as the input. `pbpaste` is not available on Windows or Linux, but there are alternatives.
|
||||
|
||||
@@ -131,6 +131,75 @@ func ScanDirectory(rootDir string, maxDepth int, instructions string, ignoreList
|
||||
return json.MarshalIndent(data, "", " ")
|
||||
}
|
||||
|
||||
// ScanFiles scans specific files and returns a JSON representation
|
||||
func ScanFiles(files []string, instructions string) ([]byte, error) {
|
||||
fileCount := 0
|
||||
dirSet := make(map[string]bool)
|
||||
|
||||
// Create root directory item
|
||||
rootItem := FileItem{
|
||||
Type: "directory",
|
||||
Name: ".",
|
||||
Contents: []FileItem{},
|
||||
}
|
||||
|
||||
for _, filePath := range files {
|
||||
// Skip directories
|
||||
info, err := os.Stat(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error accessing file %s: %v", filePath, err)
|
||||
}
|
||||
if info.IsDir() {
|
||||
continue
|
||||
}
|
||||
|
||||
// Track unique directories
|
||||
dir := filepath.Dir(filePath)
|
||||
if dir != "." {
|
||||
dirSet[dir] = true
|
||||
}
|
||||
|
||||
fileCount++
|
||||
|
||||
// Read file content
|
||||
content, err := os.ReadFile(filePath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("error reading file %s: %v", filePath, err)
|
||||
}
|
||||
|
||||
// Clean path for consistent handling
|
||||
cleanPath := filepath.Clean(filePath)
|
||||
if strings.HasPrefix(cleanPath, "./") {
|
||||
cleanPath = cleanPath[2:]
|
||||
}
|
||||
|
||||
// Add file to the structure
|
||||
addFileToDirectory(&rootItem, cleanPath, string(content), ".")
|
||||
}
|
||||
|
||||
// Create final data structure
|
||||
var data []any
|
||||
data = append(data, rootItem)
|
||||
|
||||
// Add report
|
||||
reportItem := map[string]any{
|
||||
"type": "report",
|
||||
"directories": len(dirSet) + 1,
|
||||
"files": fileCount,
|
||||
}
|
||||
data = append(data, reportItem)
|
||||
|
||||
// Add instructions
|
||||
instructionsItem := map[string]any{
|
||||
"type": "instructions",
|
||||
"name": "code_change_instructions",
|
||||
"details": instructions,
|
||||
}
|
||||
data = append(data, instructionsItem)
|
||||
|
||||
return json.MarshalIndent(data, "", " ")
|
||||
}
|
||||
|
||||
// addFileToDirectory adds a file to the correct directory in the structure
|
||||
func addFileToDirectory(root *FileItem, path, content, rootDir string) {
|
||||
parts := strings.Split(path, string(filepath.Separator))
|
||||
100
cmd/code2context/code_test.go
Normal file
100
cmd/code2context/code_test.go
Normal file
@@ -0,0 +1,100 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"testing"
|
||||
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestScanFiles(t *testing.T) {
|
||||
// Create temp directory with test files
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
// Create test files
|
||||
file1 := filepath.Join(tmpDir, "test1.go")
|
||||
file2 := filepath.Join(tmpDir, "test2.go")
|
||||
subDir := filepath.Join(tmpDir, "subdir")
|
||||
file3 := filepath.Join(subDir, "test3.go")
|
||||
|
||||
require.NoError(t, os.WriteFile(file1, []byte("package main\n"), 0644))
|
||||
require.NoError(t, os.WriteFile(file2, []byte("package main\n\nfunc main() {}\n"), 0644))
|
||||
require.NoError(t, os.MkdirAll(subDir, 0755))
|
||||
require.NoError(t, os.WriteFile(file3, []byte("package subdir\n"), 0644))
|
||||
|
||||
// Test scanning specific files
|
||||
files := []string{file1, file3}
|
||||
instructions := "Test instructions"
|
||||
|
||||
jsonData, err := ScanFiles(files, instructions)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Parse the JSON output
|
||||
var result []any
|
||||
err = json.Unmarshal(jsonData, &result)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, result, 3) // directory, report, instructions
|
||||
|
||||
// Check report
|
||||
report := result[1].(map[string]any)
|
||||
assert.Equal(t, "report", report["type"])
|
||||
assert.Equal(t, float64(2), report["files"])
|
||||
|
||||
// Check instructions
|
||||
instr := result[2].(map[string]any)
|
||||
assert.Equal(t, "instructions", instr["type"])
|
||||
assert.Equal(t, "Test instructions", instr["details"])
|
||||
}
|
||||
|
||||
func TestScanFilesSkipsDirectories(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
file1 := filepath.Join(tmpDir, "test.go")
|
||||
subDir := filepath.Join(tmpDir, "subdir")
|
||||
|
||||
require.NoError(t, os.WriteFile(file1, []byte("package main\n"), 0644))
|
||||
require.NoError(t, os.MkdirAll(subDir, 0755))
|
||||
|
||||
// Include a directory in the file list - should be skipped
|
||||
files := []string{file1, subDir}
|
||||
|
||||
jsonData, err := ScanFiles(files, "test")
|
||||
require.NoError(t, err)
|
||||
|
||||
var result []any
|
||||
err = json.Unmarshal(jsonData, &result)
|
||||
require.NoError(t, err)
|
||||
|
||||
// Check that only 1 file was counted (directory was skipped)
|
||||
report := result[1].(map[string]any)
|
||||
assert.Equal(t, float64(1), report["files"])
|
||||
}
|
||||
|
||||
func TestScanFilesNonExistentFile(t *testing.T) {
|
||||
files := []string{"/nonexistent/file.go"}
|
||||
_, err := ScanFiles(files, "test")
|
||||
assert.Error(t, err)
|
||||
assert.Contains(t, err.Error(), "error accessing file")
|
||||
}
|
||||
|
||||
func TestScanDirectory(t *testing.T) {
|
||||
tmpDir := t.TempDir()
|
||||
|
||||
file1 := filepath.Join(tmpDir, "main.go")
|
||||
require.NoError(t, os.WriteFile(file1, []byte("package main\n"), 0644))
|
||||
|
||||
jsonData, err := ScanDirectory(tmpDir, 3, "Test instructions", []string{})
|
||||
require.NoError(t, err)
|
||||
|
||||
var result []any
|
||||
err = json.Unmarshal(jsonData, &result)
|
||||
require.NoError(t, err)
|
||||
assert.Len(t, result, 3)
|
||||
|
||||
// Check instructions
|
||||
instr := result[2].(map[string]any)
|
||||
assert.Equal(t, "Test instructions", instr["details"])
|
||||
}
|
||||
109
cmd/code2context/main.go
Normal file
109
cmd/code2context/main.go
Normal file
@@ -0,0 +1,109 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Command line flags
|
||||
maxDepth := flag.Int("depth", 3, "Maximum directory depth to scan")
|
||||
ignorePatterns := flag.String("ignore", ".git,node_modules,vendor", "Comma-separated patterns to ignore")
|
||||
outputFile := flag.String("out", "", "Output file (default: stdout)")
|
||||
flag.Usage = printUsage
|
||||
flag.Parse()
|
||||
|
||||
// Check if stdin has data (is a pipe)
|
||||
stdinInfo, _ := os.Stdin.Stat()
|
||||
hasStdin := (stdinInfo.Mode() & os.ModeCharDevice) == 0
|
||||
|
||||
var jsonData []byte
|
||||
var err error
|
||||
|
||||
if hasStdin {
|
||||
// Stdin mode: read file list from stdin, instructions from argument
|
||||
if flag.NArg() != 1 {
|
||||
fmt.Fprintf(os.Stderr, "Error: When piping file list via stdin, provide exactly 1 argument: <instructions>\n")
|
||||
fmt.Fprintf(os.Stderr, "Usage: find . -name '*.go' | code2context \"instructions\"\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
instructions := flag.Arg(0)
|
||||
|
||||
// Read file paths from stdin
|
||||
var files []string
|
||||
scanner := bufio.NewScanner(os.Stdin)
|
||||
for scanner.Scan() {
|
||||
line := strings.TrimSpace(scanner.Text())
|
||||
if line != "" {
|
||||
files = append(files, line)
|
||||
}
|
||||
}
|
||||
if err := scanner.Err(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error reading stdin: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
if len(files) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "Error: No files provided via stdin\n")
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
jsonData, err = ScanFiles(files, instructions)
|
||||
} else {
|
||||
// Directory mode: require directory and instructions arguments
|
||||
if flag.NArg() != 2 {
|
||||
printUsage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
directory := flag.Arg(0)
|
||||
instructions := flag.Arg(1)
|
||||
|
||||
// Validate directory
|
||||
if info, err := os.Stat(directory); err != nil || !info.IsDir() {
|
||||
fmt.Fprintf(os.Stderr, "Error: Directory '%s' does not exist or is not a directory\n", directory)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Parse ignore patterns and scan directory
|
||||
jsonData, err = ScanDirectory(directory, *maxDepth, instructions, strings.Split(*ignorePatterns, ","))
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error scanning: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Output result
|
||||
if *outputFile != "" {
|
||||
if err := os.WriteFile(*outputFile, jsonData, 0644); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error writing file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
fmt.Print(string(jsonData))
|
||||
}
|
||||
}
|
||||
|
||||
func printUsage() {
|
||||
fmt.Fprintf(os.Stderr, `code2context - Code project scanner for use with Fabric AI
|
||||
|
||||
Usage:
|
||||
code2context [options] <directory> <instructions>
|
||||
<file_list> | code2context [options] <instructions>
|
||||
|
||||
Examples:
|
||||
code2context . "Add input validation to all user inputs"
|
||||
code2context -depth 4 ./my-project "Implement error handling"
|
||||
code2context -out project.json ./src "Fix security issues"
|
||||
find . -name '*.go' | code2context "Refactor error handling"
|
||||
git ls-files '*.py' | code2context "Add type hints"
|
||||
|
||||
Options:
|
||||
`)
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
@@ -1,65 +0,0 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"flag"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
)
|
||||
|
||||
func main() {
|
||||
// Command line flags
|
||||
maxDepth := flag.Int("depth", 3, "Maximum directory depth to scan")
|
||||
ignorePatterns := flag.String("ignore", ".git,node_modules,vendor", "Comma-separated patterns to ignore")
|
||||
outputFile := flag.String("out", "", "Output file (default: stdout)")
|
||||
flag.Usage = printUsage
|
||||
flag.Parse()
|
||||
|
||||
// Require exactly two positional arguments: directory and instructions
|
||||
if flag.NArg() != 2 {
|
||||
printUsage()
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
directory := flag.Arg(0)
|
||||
instructions := flag.Arg(1)
|
||||
|
||||
// Validate directory
|
||||
if info, err := os.Stat(directory); err != nil || !info.IsDir() {
|
||||
fmt.Fprintf(os.Stderr, "Error: Directory '%s' does not exist or is not a directory\n", directory)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Parse ignore patterns and scan directory
|
||||
jsonData, err := ScanDirectory(directory, *maxDepth, instructions, strings.Split(*ignorePatterns, ","))
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error scanning directory: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
|
||||
// Output result
|
||||
if *outputFile != "" {
|
||||
if err := os.WriteFile(*outputFile, jsonData, 0644); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error writing file: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
} else {
|
||||
fmt.Print(string(jsonData))
|
||||
}
|
||||
}
|
||||
|
||||
func printUsage() {
|
||||
fmt.Fprintf(os.Stderr, `code_helper - Code project scanner for use with Fabric AI
|
||||
|
||||
Usage:
|
||||
code_helper [options] <directory> <instructions>
|
||||
|
||||
Examples:
|
||||
code_helper . "Add input validation to all user inputs"
|
||||
code_helper -depth 4 ./my-project "Implement error handling"
|
||||
code_helper -out project.json ./src "Fix security issues"
|
||||
|
||||
Options:
|
||||
`)
|
||||
flag.PrintDefaults()
|
||||
}
|
||||
@@ -1,3 +1,3 @@
|
||||
package main
|
||||
|
||||
var version = "v1.4.366"
|
||||
var version = "v1.4.375"
|
||||
|
||||
Binary file not shown.
21
cmd/generate_changelog/internal/cache/cache.go
vendored
21
cmd/generate_changelog/internal/cache/cache.go
vendored
@@ -202,14 +202,23 @@ func (c *Cache) GetVersions() (map[string]*git.Version, error) {
|
||||
}
|
||||
|
||||
if dateStr.Valid {
|
||||
// Try RFC3339Nano first (for nanosecond precision), then fall back to RFC3339
|
||||
v.Date, err = time.Parse(time.RFC3339Nano, dateStr.String)
|
||||
if err != nil {
|
||||
v.Date, err = time.Parse(time.RFC3339, dateStr.String)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing date '%s' for version '%s': %v. Expected format: RFC3339 or RFC3339Nano.\n", dateStr.String, v.Name, err)
|
||||
// Try multiple date formats: SQLite format, RFC3339Nano, and RFC3339
|
||||
dateFormats := []string{
|
||||
"2006-01-02 15:04:05-07:00", // SQLite DATETIME format
|
||||
"2006-01-02 15:04:05.999999999-07:00", // SQLite with fractional seconds
|
||||
time.RFC3339Nano,
|
||||
time.RFC3339,
|
||||
}
|
||||
var parseErr error
|
||||
for _, format := range dateFormats {
|
||||
v.Date, parseErr = time.Parse(format, dateStr.String)
|
||||
if parseErr == nil {
|
||||
break // Successfully parsed
|
||||
}
|
||||
}
|
||||
if parseErr != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing date '%s' for version '%s': %v\n", dateStr.String, v.Name, parseErr)
|
||||
}
|
||||
}
|
||||
|
||||
if prNumbersJSON != "" {
|
||||
|
||||
@@ -470,7 +470,8 @@ func (g *Generator) generateRawVersionContent(version *git.Version) string {
|
||||
}
|
||||
|
||||
// There are occasionally no PRs or direct commits other than version bumps, so we handle that gracefully
|
||||
if len(prCommits) == 0 && len(directCommits) == 0 {
|
||||
// However, don't return early if we have PRs to output from version.PRNumbers
|
||||
if len(prCommits) == 0 && len(directCommits) == 0 && len(version.PRNumbers) == 0 {
|
||||
return ""
|
||||
}
|
||||
|
||||
|
||||
@@ -284,6 +284,20 @@ func (g *Generator) CreateNewChangelogEntry(version string) error {
|
||||
}
|
||||
}
|
||||
|
||||
// Update metadata before staging changes so they get committed together
|
||||
if g.cache != nil {
|
||||
// Update last_processed_tag to the version we just processed
|
||||
if err := g.cache.SetLastProcessedTag(version); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Failed to update last_processed_tag: %v\n", err)
|
||||
}
|
||||
|
||||
// Update last_pr_sync to the version date (not current time)
|
||||
// This ensures future runs will fetch PRs merged after this version
|
||||
if err := g.cache.SetLastPRSync(versionDate); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Failed to update last_pr_sync: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
if err := g.stageChangesForRelease(); err != nil {
|
||||
return fmt.Errorf("critical: failed to stage changes for release: %w", err)
|
||||
}
|
||||
|
||||
@@ -4,10 +4,10 @@ Generate code changes to an existing coding project using AI.
|
||||
|
||||
## Installation
|
||||
|
||||
After installing the `code_helper` binary:
|
||||
After installing the `code2context` binary:
|
||||
|
||||
```bash
|
||||
go install github.com/danielmiessler/fabric/cmd/code_helper@latest
|
||||
go install github.com/danielmiessler/fabric/cmd/code2context@latest
|
||||
```
|
||||
|
||||
## Usage
|
||||
@@ -15,18 +15,18 @@ go install github.com/danielmiessler/fabric/cmd/code_helper@latest
|
||||
The create_coding_feature allows you to apply AI-suggested code changes directly to your project files. Use it like this:
|
||||
|
||||
```bash
|
||||
code_helper [project_directory] "[instructions for code changes]" | fabric --pattern create_coding_feature
|
||||
code2context [project_directory] "[instructions for code changes]" | fabric --pattern create_coding_feature
|
||||
```
|
||||
|
||||
For example:
|
||||
|
||||
```bash
|
||||
code_helper . "Create a simple Hello World C program in file main.c" | fabric --pattern create_coding_feature
|
||||
code2context . "Create a simple Hello World C program in file main.c" | fabric --pattern create_coding_feature
|
||||
```
|
||||
|
||||
## How It Works
|
||||
|
||||
1. `code_helper` scans your project directory and creates a JSON representation
|
||||
1. `code2context` scans your project directory and creates a JSON representation
|
||||
2. The AI model analyzes your project structure and instructions
|
||||
3. AI generates file changes in a standard format
|
||||
4. Fabric parses these changes and prompts you to confirm
|
||||
@@ -36,7 +36,7 @@ code_helper . "Create a simple Hello World C program in file main.c" | fabric --
|
||||
|
||||
```bash
|
||||
# Request AI to create a Hello World program
|
||||
code_helper . "Create a simple Hello World C program in file main.c" | fabric --pattern create_coding_feature
|
||||
code2context . "Create a simple Hello World C program in file main.c" | fabric --pattern create_coding_feature
|
||||
|
||||
# Review the changes made to your project
|
||||
git diff
|
||||
@@ -52,7 +52,7 @@ git commit -s -m "Add Hello World program"
|
||||
### Security Enhancement Example
|
||||
|
||||
```bash
|
||||
code_helper . "Ensure that all user input is validated and sanitized before being used in the program." | fabric --pattern create_coding_feature
|
||||
code2context . "Ensure that all user input is validated and sanitized before being used in the program." | fabric --pattern create_coding_feature
|
||||
git diff
|
||||
make check
|
||||
git add <changed files>
|
||||
|
||||
@@ -24,30 +24,4 @@ Take a step back and think step-by-step about how to achieve the best possible r
|
||||
|
||||
# INPUT
|
||||
|
||||
INPUT:# IDENTITY and PURPOSE
|
||||
|
||||
You are an AI assistant whose primary responsibility is to interpret and analyze psychological profiles and/or psychology data files provided as input. Your role is to carefully process this data and use your expertise to develop a tailored plan aimed at spiritual and mental healing, as well as overall life improvement for the subject. You must approach each case with sensitivity, applying psychological knowledge and holistic strategies to create actionable, personalized recommendations that address both mental and spiritual well-being. Your focus is on structured, compassionate, and practical guidance that can help the individual make meaningful improvements in their life.
|
||||
|
||||
Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
|
||||
|
||||
# STEPS
|
||||
|
||||
- Carefully review the psychological-profile and/or psychology data file provided as input.
|
||||
|
||||
- Analyze the data to identify key issues, strengths, and areas needing improvement related to the subject's mental and spiritual well-being.
|
||||
|
||||
- Develop a comprehensive plan that includes specific strategies for spiritual healing, mental health improvement, and overall life enhancement.
|
||||
|
||||
- Structure your output to clearly outline recommendations, resources, and actionable steps tailored to the individual's unique profile.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
- Only output Markdown.
|
||||
|
||||
- Ensure your output is organized, clear, and easy to follow, using headings, subheadings, and bullet points where appropriate.
|
||||
|
||||
- Ensure you follow ALL these instructions when creating your output.
|
||||
|
||||
# INPUT
|
||||
|
||||
INPUT:
|
||||
INPUT
|
||||
|
||||
33
docs/docs.go
33
docs/docs.go
@@ -289,6 +289,20 @@ const docTemplate = `{
|
||||
"ThinkingHigh"
|
||||
]
|
||||
},
|
||||
"domain.UsageMetadata": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input_tokens": {
|
||||
"type": "integer"
|
||||
},
|
||||
"output_tokens": {
|
||||
"type": "integer"
|
||||
},
|
||||
"total_tokens": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"fsdb.Pattern": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -360,6 +374,9 @@ const docTemplate = `{
|
||||
"$ref": "#/definitions/restapi.PromptRequest"
|
||||
}
|
||||
},
|
||||
"quiet": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"raw": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -372,6 +389,9 @@ const docTemplate = `{
|
||||
"seed": {
|
||||
"type": "integer"
|
||||
},
|
||||
"showMetadata": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"suppressThink": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -392,6 +412,9 @@ const docTemplate = `{
|
||||
"type": "number",
|
||||
"format": "float64"
|
||||
},
|
||||
"updateChan": {
|
||||
"type": "object"
|
||||
},
|
||||
"voice": {
|
||||
"type": "string"
|
||||
}
|
||||
@@ -423,6 +446,10 @@ const docTemplate = `{
|
||||
"patternName": {
|
||||
"type": "string"
|
||||
},
|
||||
"sessionName": {
|
||||
"description": "Session name for multi-turn conversations",
|
||||
"type": "string"
|
||||
},
|
||||
"strategyName": {
|
||||
"description": "Optional strategy name",
|
||||
"type": "string"
|
||||
@@ -446,7 +473,6 @@ const docTemplate = `{
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"description": "The actual content",
|
||||
"type": "string"
|
||||
},
|
||||
"format": {
|
||||
@@ -454,8 +480,11 @@ const docTemplate = `{
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"description": "\"content\", \"error\", \"complete\"",
|
||||
"description": "\"content\", \"usage\", \"error\", \"complete\"",
|
||||
"type": "string"
|
||||
},
|
||||
"usage": {
|
||||
"$ref": "#/definitions/domain.UsageMetadata"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -283,6 +283,20 @@
|
||||
"ThinkingHigh"
|
||||
]
|
||||
},
|
||||
"domain.UsageMetadata": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"input_tokens": {
|
||||
"type": "integer"
|
||||
},
|
||||
"output_tokens": {
|
||||
"type": "integer"
|
||||
},
|
||||
"total_tokens": {
|
||||
"type": "integer"
|
||||
}
|
||||
}
|
||||
},
|
||||
"fsdb.Pattern": {
|
||||
"type": "object",
|
||||
"properties": {
|
||||
@@ -354,6 +368,9 @@
|
||||
"$ref": "#/definitions/restapi.PromptRequest"
|
||||
}
|
||||
},
|
||||
"quiet": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"raw": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -366,6 +383,9 @@
|
||||
"seed": {
|
||||
"type": "integer"
|
||||
},
|
||||
"showMetadata": {
|
||||
"type": "boolean"
|
||||
},
|
||||
"suppressThink": {
|
||||
"type": "boolean"
|
||||
},
|
||||
@@ -386,6 +406,9 @@
|
||||
"type": "number",
|
||||
"format": "float64"
|
||||
},
|
||||
"updateChan": {
|
||||
"type": "object"
|
||||
},
|
||||
"voice": {
|
||||
"type": "string"
|
||||
}
|
||||
@@ -417,6 +440,10 @@
|
||||
"patternName": {
|
||||
"type": "string"
|
||||
},
|
||||
"sessionName": {
|
||||
"description": "Session name for multi-turn conversations",
|
||||
"type": "string"
|
||||
},
|
||||
"strategyName": {
|
||||
"description": "Optional strategy name",
|
||||
"type": "string"
|
||||
@@ -440,7 +467,6 @@
|
||||
"type": "object",
|
||||
"properties": {
|
||||
"content": {
|
||||
"description": "The actual content",
|
||||
"type": "string"
|
||||
},
|
||||
"format": {
|
||||
@@ -448,8 +474,11 @@
|
||||
"type": "string"
|
||||
},
|
||||
"type": {
|
||||
"description": "\"content\", \"error\", \"complete\"",
|
||||
"description": "\"content\", \"usage\", \"error\", \"complete\"",
|
||||
"type": "string"
|
||||
},
|
||||
"usage": {
|
||||
"$ref": "#/definitions/domain.UsageMetadata"
|
||||
}
|
||||
}
|
||||
},
|
||||
|
||||
@@ -12,6 +12,15 @@ definitions:
|
||||
- ThinkingLow
|
||||
- ThinkingMedium
|
||||
- ThinkingHigh
|
||||
domain.UsageMetadata:
|
||||
properties:
|
||||
input_tokens:
|
||||
type: integer
|
||||
output_tokens:
|
||||
type: integer
|
||||
total_tokens:
|
||||
type: integer
|
||||
type: object
|
||||
fsdb.Pattern:
|
||||
properties:
|
||||
description:
|
||||
@@ -60,6 +69,8 @@ definitions:
|
||||
items:
|
||||
$ref: '#/definitions/restapi.PromptRequest'
|
||||
type: array
|
||||
quiet:
|
||||
type: boolean
|
||||
raw:
|
||||
type: boolean
|
||||
search:
|
||||
@@ -68,6 +79,8 @@ definitions:
|
||||
type: string
|
||||
seed:
|
||||
type: integer
|
||||
showMetadata:
|
||||
type: boolean
|
||||
suppressThink:
|
||||
type: boolean
|
||||
temperature:
|
||||
@@ -82,6 +95,8 @@ definitions:
|
||||
topP:
|
||||
format: float64
|
||||
type: number
|
||||
updateChan:
|
||||
type: object
|
||||
voice:
|
||||
type: string
|
||||
type: object
|
||||
@@ -102,6 +117,9 @@ definitions:
|
||||
type: string
|
||||
patternName:
|
||||
type: string
|
||||
sessionName:
|
||||
description: Session name for multi-turn conversations
|
||||
type: string
|
||||
strategyName:
|
||||
description: Optional strategy name
|
||||
type: string
|
||||
@@ -118,14 +136,15 @@ definitions:
|
||||
restapi.StreamResponse:
|
||||
properties:
|
||||
content:
|
||||
description: The actual content
|
||||
type: string
|
||||
format:
|
||||
description: '"markdown", "mermaid", "plain"'
|
||||
type: string
|
||||
type:
|
||||
description: '"content", "error", "complete"'
|
||||
description: '"content", "usage", "error", "complete"'
|
||||
type: string
|
||||
usage:
|
||||
$ref: '#/definitions/domain.UsageMetadata'
|
||||
type: object
|
||||
restapi.YouTubeRequest:
|
||||
properties:
|
||||
|
||||
@@ -104,6 +104,7 @@ type Flags struct {
|
||||
Notification bool `long:"notification" yaml:"notification" description:"Send desktop notification when command completes"`
|
||||
NotificationCommand string `long:"notification-command" yaml:"notificationCommand" description:"Custom command to run for notifications (overrides built-in notifications)"`
|
||||
Thinking domain.ThinkingLevel `long:"thinking" yaml:"thinking" description:"Set reasoning/thinking level (e.g., off, low, medium, high, or numeric tokens for Anthropic or Google Gemini)"`
|
||||
ShowMetadata bool `long:"show-metadata" description:"Print metadata to stderr"`
|
||||
Debug int `long:"debug" description:"Set debug level (0=off, 1=basic, 2=detailed, 3=trace)" default:"0"`
|
||||
}
|
||||
|
||||
@@ -459,6 +460,7 @@ func (o *Flags) BuildChatOptions() (ret *domain.ChatOptions, err error) {
|
||||
Voice: o.Voice,
|
||||
Notification: o.Notification || o.NotificationCommand != "",
|
||||
NotificationCommand: o.NotificationCommand,
|
||||
ShowMetadata: o.ShowMetadata,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -64,7 +64,7 @@ func (o *Chatter) Send(request *domain.ChatRequest, opts *domain.ChatOptions) (s
|
||||
message := ""
|
||||
|
||||
if o.Stream {
|
||||
responseChan := make(chan string)
|
||||
responseChan := make(chan domain.StreamUpdate)
|
||||
errChan := make(chan error, 1)
|
||||
done := make(chan struct{})
|
||||
printedStream := false
|
||||
@@ -76,15 +76,31 @@ func (o *Chatter) Send(request *domain.ChatRequest, opts *domain.ChatOptions) (s
|
||||
}
|
||||
}()
|
||||
|
||||
for response := range responseChan {
|
||||
message += response
|
||||
if !opts.SuppressThink {
|
||||
fmt.Print(response)
|
||||
printedStream = true
|
||||
for update := range responseChan {
|
||||
if opts.UpdateChan != nil {
|
||||
opts.UpdateChan <- update
|
||||
}
|
||||
switch update.Type {
|
||||
case domain.StreamTypeContent:
|
||||
message += update.Content
|
||||
if !opts.SuppressThink && !opts.Quiet {
|
||||
fmt.Print(update.Content)
|
||||
printedStream = true
|
||||
}
|
||||
case domain.StreamTypeUsage:
|
||||
if opts.ShowMetadata && update.Usage != nil && !opts.Quiet {
|
||||
fmt.Fprintf(os.Stderr, "\n[Metadata] Input: %d | Output: %d | Total: %d\n",
|
||||
update.Usage.InputTokens, update.Usage.OutputTokens, update.Usage.TotalTokens)
|
||||
}
|
||||
case domain.StreamTypeError:
|
||||
if !opts.Quiet {
|
||||
fmt.Fprintf(os.Stderr, "Error: %s\n", update.Content)
|
||||
}
|
||||
errChan <- errors.New(update.Content)
|
||||
}
|
||||
}
|
||||
|
||||
if printedStream && !opts.SuppressThink && !strings.HasSuffix(message, "\n") {
|
||||
if printedStream && !opts.SuppressThink && !strings.HasSuffix(message, "\n") && !opts.Quiet {
|
||||
fmt.Println()
|
||||
}
|
||||
|
||||
|
||||
@@ -14,7 +14,7 @@ import (
|
||||
// mockVendor implements the ai.Vendor interface for testing
|
||||
type mockVendor struct {
|
||||
sendStreamError error
|
||||
streamChunks []string
|
||||
streamChunks []domain.StreamUpdate
|
||||
sendFunc func(context.Context, []*chat.ChatCompletionMessage, *domain.ChatOptions) (string, error)
|
||||
}
|
||||
|
||||
@@ -45,7 +45,7 @@ func (m *mockVendor) ListModels() ([]string, error) {
|
||||
return []string{"test-model"}, nil
|
||||
}
|
||||
|
||||
func (m *mockVendor) SendStream(messages []*chat.ChatCompletionMessage, opts *domain.ChatOptions, responseChan chan string) error {
|
||||
func (m *mockVendor) SendStream(messages []*chat.ChatCompletionMessage, opts *domain.ChatOptions, responseChan chan domain.StreamUpdate) error {
|
||||
// Send chunks if provided (for successful streaming test)
|
||||
if m.streamChunks != nil {
|
||||
for _, chunk := range m.streamChunks {
|
||||
@@ -169,7 +169,11 @@ func TestChatter_Send_StreamingSuccessfulAggregation(t *testing.T) {
|
||||
db := fsdb.NewDb(tempDir)
|
||||
|
||||
// Create test chunks that should be aggregated
|
||||
testChunks := []string{"Hello", " ", "world", "!", " This", " is", " a", " test."}
|
||||
chunks := []string{"Hello", " ", "world", "!", " This", " is", " a", " test."}
|
||||
testChunks := make([]domain.StreamUpdate, len(chunks))
|
||||
for i, c := range chunks {
|
||||
testChunks[i] = domain.StreamUpdate{Type: domain.StreamTypeContent, Content: c}
|
||||
}
|
||||
expectedMessage := "Hello world! This is a test."
|
||||
|
||||
// Create a mock vendor that will send chunks successfully
|
||||
@@ -228,3 +232,83 @@ func TestChatter_Send_StreamingSuccessfulAggregation(t *testing.T) {
|
||||
t.Errorf("Expected aggregated message %q, got %q", expectedMessage, assistantMessage.Content)
|
||||
}
|
||||
}
|
||||
|
||||
func TestChatter_Send_StreamingMetadataPropagation(t *testing.T) {
|
||||
// Create a temporary database for testing
|
||||
tempDir := t.TempDir()
|
||||
db := fsdb.NewDb(tempDir)
|
||||
|
||||
// Create test chunks: one content, one usage metadata
|
||||
testChunks := []domain.StreamUpdate{
|
||||
{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: "Test content",
|
||||
},
|
||||
{
|
||||
Type: domain.StreamTypeUsage,
|
||||
Usage: &domain.UsageMetadata{
|
||||
InputTokens: 10,
|
||||
OutputTokens: 5,
|
||||
TotalTokens: 15,
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Create a mock vendor
|
||||
mockVendor := &mockVendor{
|
||||
sendStreamError: nil,
|
||||
streamChunks: testChunks,
|
||||
}
|
||||
|
||||
// Create chatter with streaming enabled
|
||||
chatter := &Chatter{
|
||||
db: db,
|
||||
Stream: true,
|
||||
vendor: mockVendor,
|
||||
model: "test-model",
|
||||
}
|
||||
|
||||
// Create a test request
|
||||
request := &domain.ChatRequest{
|
||||
Message: &chat.ChatCompletionMessage{
|
||||
Role: chat.ChatMessageRoleUser,
|
||||
Content: "test message",
|
||||
},
|
||||
}
|
||||
|
||||
// Create an update channel to capture stream events
|
||||
updateChan := make(chan domain.StreamUpdate, 10)
|
||||
|
||||
// Create test options with UpdateChan
|
||||
opts := &domain.ChatOptions{
|
||||
Model: "test-model",
|
||||
UpdateChan: updateChan,
|
||||
Quiet: true, // Suppress stdout/stderr
|
||||
}
|
||||
|
||||
// Call Send
|
||||
_, err := chatter.Send(request, opts)
|
||||
if err != nil {
|
||||
t.Fatalf("Expected no error, but got: %v", err)
|
||||
}
|
||||
close(updateChan)
|
||||
|
||||
// Verify we received the metadata event
|
||||
var usageReceived bool
|
||||
for update := range updateChan {
|
||||
if update.Type == domain.StreamTypeUsage {
|
||||
usageReceived = true
|
||||
if update.Usage == nil {
|
||||
t.Error("Expected usage metadata to be non-nil")
|
||||
} else {
|
||||
if update.Usage.TotalTokens != 15 {
|
||||
t.Errorf("Expected 15 total tokens, got %d", update.Usage.TotalTokens)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if !usageReceived {
|
||||
t.Error("Expected to receive a usage metadata update, but didn't")
|
||||
}
|
||||
}
|
||||
|
||||
@@ -43,7 +43,7 @@ func (m *testVendor) Configure() error { return nil }
|
||||
func (m *testVendor) Setup() error { return nil }
|
||||
func (m *testVendor) SetupFillEnvFileContent(*bytes.Buffer) {}
|
||||
func (m *testVendor) ListModels() ([]string, error) { return m.models, nil }
|
||||
func (m *testVendor) SendStream([]*chat.ChatCompletionMessage, *domain.ChatOptions, chan string) error {
|
||||
func (m *testVendor) SendStream([]*chat.ChatCompletionMessage, *domain.ChatOptions, chan domain.StreamUpdate) error {
|
||||
return nil
|
||||
}
|
||||
func (m *testVendor) Send(context.Context, []*chat.ChatCompletionMessage, *domain.ChatOptions) (string, error) {
|
||||
|
||||
@@ -51,6 +51,9 @@ type ChatOptions struct {
|
||||
Voice string
|
||||
Notification bool
|
||||
NotificationCommand string
|
||||
ShowMetadata bool
|
||||
Quiet bool
|
||||
UpdateChan chan StreamUpdate
|
||||
}
|
||||
|
||||
// NormalizeMessages remove empty messages and ensure messages order user-assist-user
|
||||
|
||||
24
internal/domain/stream.go
Normal file
24
internal/domain/stream.go
Normal file
@@ -0,0 +1,24 @@
|
||||
package domain
|
||||
|
||||
// StreamType distinguishes between partial text content and metadata events.
|
||||
type StreamType string
|
||||
|
||||
const (
|
||||
StreamTypeContent StreamType = "content"
|
||||
StreamTypeUsage StreamType = "usage"
|
||||
StreamTypeError StreamType = "error"
|
||||
)
|
||||
|
||||
// StreamUpdate is the unified payload sent through the internal channels.
|
||||
type StreamUpdate struct {
|
||||
Type StreamType `json:"type"`
|
||||
Content string `json:"content,omitempty"` // For text deltas
|
||||
Usage *UsageMetadata `json:"usage,omitempty"` // For token counts
|
||||
}
|
||||
|
||||
// UsageMetadata normalizes token counts across different providers.
|
||||
type UsageMetadata struct {
|
||||
InputTokens int `json:"input_tokens"`
|
||||
OutputTokens int `json:"output_tokens"`
|
||||
TotalTokens int `json:"total_tokens"`
|
||||
}
|
||||
@@ -184,7 +184,7 @@ func parseThinking(level domain.ThinkingLevel) (anthropic.ThinkingConfigParamUni
|
||||
}
|
||||
|
||||
func (an *Client) SendStream(
|
||||
msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string,
|
||||
msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan domain.StreamUpdate,
|
||||
) (err error) {
|
||||
messages := an.toMessages(msgs)
|
||||
if len(messages) == 0 {
|
||||
@@ -210,9 +210,33 @@ func (an *Client) SendStream(
|
||||
for stream.Next() {
|
||||
event := stream.Current()
|
||||
|
||||
// directly send any non-empty delta text
|
||||
// Handle Content
|
||||
if event.Delta.Text != "" {
|
||||
channel <- event.Delta.Text
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: event.Delta.Text,
|
||||
}
|
||||
}
|
||||
|
||||
// Handle Usage
|
||||
if event.Message.Usage.InputTokens != 0 || event.Message.Usage.OutputTokens != 0 {
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeUsage,
|
||||
Usage: &domain.UsageMetadata{
|
||||
InputTokens: int(event.Message.Usage.InputTokens),
|
||||
OutputTokens: int(event.Message.Usage.OutputTokens),
|
||||
TotalTokens: int(event.Message.Usage.InputTokens + event.Message.Usage.OutputTokens),
|
||||
},
|
||||
}
|
||||
} else if event.Usage.InputTokens != 0 || event.Usage.OutputTokens != 0 {
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeUsage,
|
||||
Usage: &domain.UsageMetadata{
|
||||
InputTokens: int(event.Usage.InputTokens),
|
||||
OutputTokens: int(event.Usage.OutputTokens),
|
||||
TotalTokens: int(event.Usage.InputTokens + event.Usage.OutputTokens),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -154,7 +154,7 @@ func (c *BedrockClient) ListModels() ([]string, error) {
|
||||
}
|
||||
|
||||
// SendStream sends the messages to the Bedrock ConverseStream API
|
||||
func (c *BedrockClient) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string) (err error) {
|
||||
func (c *BedrockClient) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan domain.StreamUpdate) (err error) {
|
||||
// Ensure channel is closed on all exit paths to prevent goroutine leaks
|
||||
defer func() {
|
||||
if r := recover(); r != nil {
|
||||
@@ -186,18 +186,35 @@ func (c *BedrockClient) SendStream(msgs []*chat.ChatCompletionMessage, opts *dom
|
||||
case *types.ConverseStreamOutputMemberContentBlockDelta:
|
||||
text, ok := v.Value.Delta.(*types.ContentBlockDeltaMemberText)
|
||||
if ok {
|
||||
channel <- text.Value
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: text.Value,
|
||||
}
|
||||
}
|
||||
|
||||
case *types.ConverseStreamOutputMemberMessageStop:
|
||||
channel <- "\n"
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: "\n",
|
||||
}
|
||||
return nil // Let defer handle the close
|
||||
|
||||
case *types.ConverseStreamOutputMemberMetadata:
|
||||
if v.Value.Usage != nil {
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeUsage,
|
||||
Usage: &domain.UsageMetadata{
|
||||
InputTokens: int(*v.Value.Usage.InputTokens),
|
||||
OutputTokens: int(*v.Value.Usage.OutputTokens),
|
||||
TotalTokens: int(*v.Value.Usage.TotalTokens),
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
// Unused Events
|
||||
case *types.ConverseStreamOutputMemberMessageStart,
|
||||
*types.ConverseStreamOutputMemberContentBlockStart,
|
||||
*types.ConverseStreamOutputMemberContentBlockStop,
|
||||
*types.ConverseStreamOutputMemberMetadata:
|
||||
*types.ConverseStreamOutputMemberContentBlockStop:
|
||||
|
||||
default:
|
||||
return fmt.Errorf("unknown stream event type: %T", v)
|
||||
|
||||
@@ -108,12 +108,30 @@ func (c *Client) constructRequest(msgs []*chat.ChatCompletionMessage, opts *doma
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string) error {
|
||||
func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan domain.StreamUpdate) error {
|
||||
defer close(channel)
|
||||
request := c.constructRequest(msgs, opts)
|
||||
channel <- request
|
||||
channel <- "\n"
|
||||
channel <- DryRunResponse
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: request,
|
||||
}
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: "\n",
|
||||
}
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: DryRunResponse,
|
||||
}
|
||||
// Simulated usage
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeUsage,
|
||||
Usage: &domain.UsageMetadata{
|
||||
InputTokens: 100,
|
||||
OutputTokens: 50,
|
||||
TotalTokens: 150,
|
||||
},
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
|
||||
@@ -39,7 +39,7 @@ func TestSendStream_SendsMessages(t *testing.T) {
|
||||
opts := &domain.ChatOptions{
|
||||
Model: "dry-run-model",
|
||||
}
|
||||
channel := make(chan string)
|
||||
channel := make(chan domain.StreamUpdate)
|
||||
go func() {
|
||||
err := client.SendStream(msgs, opts, channel)
|
||||
if err != nil {
|
||||
@@ -48,7 +48,7 @@ func TestSendStream_SendsMessages(t *testing.T) {
|
||||
}()
|
||||
var receivedMessages []string
|
||||
for msg := range channel {
|
||||
receivedMessages = append(receivedMessages, msg)
|
||||
receivedMessages = append(receivedMessages, msg.Content)
|
||||
}
|
||||
if len(receivedMessages) == 0 {
|
||||
t.Errorf("Expected to receive messages, but got none")
|
||||
|
||||
@@ -10,9 +10,9 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/chat"
|
||||
"github.com/danielmiessler/fabric/internal/plugins"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
"github.com/danielmiessler/fabric/internal/plugins"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/geminicommon"
|
||||
"google.golang.org/genai"
|
||||
)
|
||||
|
||||
@@ -29,10 +29,6 @@ const (
|
||||
)
|
||||
|
||||
const (
|
||||
citationHeader = "\n\n## Sources\n\n"
|
||||
citationSeparator = "\n"
|
||||
citationFormat = "- [%s](%s)"
|
||||
|
||||
errInvalidLocationFormat = "invalid search location format %q: must be timezone (e.g., 'America/Los_Angeles') or language code (e.g., 'en-US')"
|
||||
locationSeparator = "/"
|
||||
langCodeSeparator = "_"
|
||||
@@ -111,7 +107,7 @@ func (o *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, o
|
||||
}
|
||||
|
||||
// Convert messages to new SDK format
|
||||
contents := o.convertMessages(msgs)
|
||||
contents := geminicommon.ConvertMessages(msgs)
|
||||
|
||||
cfg, err := o.buildGenerateContentConfig(opts)
|
||||
if err != nil {
|
||||
@@ -125,11 +121,11 @@ func (o *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, o
|
||||
}
|
||||
|
||||
// Extract text from response
|
||||
ret = o.extractTextFromResponse(response)
|
||||
ret = geminicommon.ExtractTextWithCitations(response)
|
||||
return
|
||||
}
|
||||
|
||||
func (o *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string) (err error) {
|
||||
func (o *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan domain.StreamUpdate) (err error) {
|
||||
ctx := context.Background()
|
||||
defer close(channel)
|
||||
|
||||
@@ -142,7 +138,7 @@ func (o *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.Cha
|
||||
}
|
||||
|
||||
// Convert messages to new SDK format
|
||||
contents := o.convertMessages(msgs)
|
||||
contents := geminicommon.ConvertMessages(msgs)
|
||||
|
||||
cfg, err := o.buildGenerateContentConfig(opts)
|
||||
if err != nil {
|
||||
@@ -154,13 +150,30 @@ func (o *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.Cha
|
||||
|
||||
for response, err := range stream {
|
||||
if err != nil {
|
||||
channel <- fmt.Sprintf("Error: %v\n", err)
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeError,
|
||||
Content: fmt.Sprintf("Error: %v", err),
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
text := o.extractTextFromResponse(response)
|
||||
text := geminicommon.ExtractTextWithCitations(response)
|
||||
if text != "" {
|
||||
channel <- text
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: text,
|
||||
}
|
||||
}
|
||||
|
||||
if response.UsageMetadata != nil {
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeUsage,
|
||||
Usage: &domain.UsageMetadata{
|
||||
InputTokens: int(response.UsageMetadata.PromptTokenCount),
|
||||
OutputTokens: int(response.UsageMetadata.CandidatesTokenCount),
|
||||
TotalTokens: int(response.UsageMetadata.TotalTokenCount),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -201,10 +214,14 @@ func parseThinkingConfig(level domain.ThinkingLevel) (*genai.ThinkingConfig, boo
|
||||
func (o *Client) buildGenerateContentConfig(opts *domain.ChatOptions) (*genai.GenerateContentConfig, error) {
|
||||
temperature := float32(opts.Temperature)
|
||||
topP := float32(opts.TopP)
|
||||
var maxTokens int32
|
||||
if opts.MaxTokens > 0 {
|
||||
maxTokens = int32(opts.MaxTokens)
|
||||
}
|
||||
cfg := &genai.GenerateContentConfig{
|
||||
Temperature: &temperature,
|
||||
TopP: &topP,
|
||||
MaxOutputTokens: int32(opts.ModelContextLength),
|
||||
MaxOutputTokens: maxTokens,
|
||||
}
|
||||
|
||||
if opts.Search {
|
||||
@@ -435,113 +452,3 @@ func (o *Client) generateWAVFile(pcmData []byte) ([]byte, error) {
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// convertMessages converts fabric chat messages to genai Content format
|
||||
func (o *Client) convertMessages(msgs []*chat.ChatCompletionMessage) []*genai.Content {
|
||||
var contents []*genai.Content
|
||||
|
||||
for _, msg := range msgs {
|
||||
content := &genai.Content{Parts: []*genai.Part{}}
|
||||
|
||||
switch msg.Role {
|
||||
case chat.ChatMessageRoleAssistant:
|
||||
content.Role = "model"
|
||||
case chat.ChatMessageRoleUser:
|
||||
content.Role = "user"
|
||||
case chat.ChatMessageRoleSystem, chat.ChatMessageRoleDeveloper, chat.ChatMessageRoleFunction, chat.ChatMessageRoleTool:
|
||||
// Gemini's API only accepts "user" and "model" roles.
|
||||
// Map all other roles to "user" to preserve instruction context.
|
||||
content.Role = "user"
|
||||
default:
|
||||
content.Role = "user"
|
||||
}
|
||||
|
||||
if strings.TrimSpace(msg.Content) != "" {
|
||||
content.Parts = append(content.Parts, &genai.Part{Text: msg.Content})
|
||||
}
|
||||
|
||||
// Handle multi-content messages (images, etc.)
|
||||
for _, part := range msg.MultiContent {
|
||||
switch part.Type {
|
||||
case chat.ChatMessagePartTypeText:
|
||||
content.Parts = append(content.Parts, &genai.Part{Text: part.Text})
|
||||
case chat.ChatMessagePartTypeImageURL:
|
||||
// TODO: Handle image URLs if needed
|
||||
// This would require downloading and converting to inline data
|
||||
}
|
||||
}
|
||||
|
||||
contents = append(contents, content)
|
||||
}
|
||||
|
||||
return contents
|
||||
}
|
||||
|
||||
// extractTextFromResponse extracts text content from the response and appends
|
||||
// any web citations in a standardized format.
|
||||
func (o *Client) extractTextFromResponse(response *genai.GenerateContentResponse) string {
|
||||
if response == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
text := o.extractTextParts(response)
|
||||
citations := o.extractCitations(response)
|
||||
if len(citations) > 0 {
|
||||
return text + citationHeader + strings.Join(citations, citationSeparator)
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
func (o *Client) extractTextParts(response *genai.GenerateContentResponse) string {
|
||||
var builder strings.Builder
|
||||
for _, candidate := range response.Candidates {
|
||||
if candidate == nil || candidate.Content == nil {
|
||||
continue
|
||||
}
|
||||
for _, part := range candidate.Content.Parts {
|
||||
if part != nil && part.Text != "" {
|
||||
builder.WriteString(part.Text)
|
||||
}
|
||||
}
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func (o *Client) extractCitations(response *genai.GenerateContentResponse) []string {
|
||||
if response == nil || len(response.Candidates) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
citationMap := make(map[string]bool)
|
||||
var citations []string
|
||||
for _, candidate := range response.Candidates {
|
||||
if candidate == nil || candidate.GroundingMetadata == nil {
|
||||
continue
|
||||
}
|
||||
chunks := candidate.GroundingMetadata.GroundingChunks
|
||||
if len(chunks) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, chunk := range chunks {
|
||||
if chunk == nil || chunk.Web == nil {
|
||||
continue
|
||||
}
|
||||
uri := chunk.Web.URI
|
||||
title := chunk.Web.Title
|
||||
if uri == "" || title == "" {
|
||||
continue
|
||||
}
|
||||
var keyBuilder strings.Builder
|
||||
keyBuilder.WriteString(uri)
|
||||
keyBuilder.WriteByte('|')
|
||||
keyBuilder.WriteString(title)
|
||||
key := keyBuilder.String()
|
||||
if !citationMap[key] {
|
||||
citationMap[key] = true
|
||||
citationText := fmt.Sprintf(citationFormat, title, uri)
|
||||
citations = append(citations, citationText)
|
||||
}
|
||||
}
|
||||
}
|
||||
return citations
|
||||
}
|
||||
|
||||
@@ -4,10 +4,10 @@ import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"google.golang.org/genai"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/chat"
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/geminicommon"
|
||||
"google.golang.org/genai"
|
||||
)
|
||||
|
||||
// Test buildModelNameFull method
|
||||
@@ -31,9 +31,8 @@ func TestBuildModelNameFull(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
// Test extractTextFromResponse method
|
||||
// Test ExtractTextWithCitations from geminicommon
|
||||
func TestExtractTextFromResponse(t *testing.T) {
|
||||
client := &Client{}
|
||||
response := &genai.GenerateContentResponse{
|
||||
Candidates: []*genai.Candidate{
|
||||
{
|
||||
@@ -48,7 +47,7 @@ func TestExtractTextFromResponse(t *testing.T) {
|
||||
}
|
||||
expected := "Hello, world!"
|
||||
|
||||
result := client.extractTextFromResponse(response)
|
||||
result := geminicommon.ExtractTextWithCitations(response)
|
||||
|
||||
if result != expected {
|
||||
t.Errorf("Expected %v, got %v", expected, result)
|
||||
@@ -56,14 +55,12 @@ func TestExtractTextFromResponse(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestExtractTextFromResponse_Nil(t *testing.T) {
|
||||
client := &Client{}
|
||||
if got := client.extractTextFromResponse(nil); got != "" {
|
||||
if got := geminicommon.ExtractTextWithCitations(nil); got != "" {
|
||||
t.Fatalf("expected empty string, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractTextFromResponse_EmptyGroundingChunks(t *testing.T) {
|
||||
client := &Client{}
|
||||
response := &genai.GenerateContentResponse{
|
||||
Candidates: []*genai.Candidate{
|
||||
{
|
||||
@@ -72,7 +69,7 @@ func TestExtractTextFromResponse_EmptyGroundingChunks(t *testing.T) {
|
||||
},
|
||||
},
|
||||
}
|
||||
if got := client.extractTextFromResponse(response); got != "Hello" {
|
||||
if got := geminicommon.ExtractTextWithCitations(response); got != "Hello" {
|
||||
t.Fatalf("expected 'Hello', got %q", got)
|
||||
}
|
||||
}
|
||||
@@ -162,7 +159,6 @@ func TestBuildGenerateContentConfig_ThinkingTokens(t *testing.T) {
|
||||
}
|
||||
|
||||
func TestCitationFormatting(t *testing.T) {
|
||||
client := &Client{}
|
||||
response := &genai.GenerateContentResponse{
|
||||
Candidates: []*genai.Candidate{
|
||||
{
|
||||
@@ -178,7 +174,7 @@ func TestCitationFormatting(t *testing.T) {
|
||||
},
|
||||
}
|
||||
|
||||
result := client.extractTextFromResponse(response)
|
||||
result := geminicommon.ExtractTextWithCitations(response)
|
||||
if !strings.Contains(result, "## Sources") {
|
||||
t.Fatalf("expected sources section in result: %s", result)
|
||||
}
|
||||
@@ -189,14 +185,13 @@ func TestCitationFormatting(t *testing.T) {
|
||||
|
||||
// Test convertMessages handles role mapping correctly
|
||||
func TestConvertMessagesRoles(t *testing.T) {
|
||||
client := &Client{}
|
||||
msgs := []*chat.ChatCompletionMessage{
|
||||
{Role: chat.ChatMessageRoleUser, Content: "user"},
|
||||
{Role: chat.ChatMessageRoleAssistant, Content: "assistant"},
|
||||
{Role: chat.ChatMessageRoleSystem, Content: "system"},
|
||||
}
|
||||
|
||||
contents := client.convertMessages(msgs)
|
||||
contents := geminicommon.ConvertMessages(msgs)
|
||||
|
||||
expected := []string{"user", "model", "user"}
|
||||
|
||||
|
||||
130
internal/plugins/ai/geminicommon/geminicommon.go
Normal file
130
internal/plugins/ai/geminicommon/geminicommon.go
Normal file
@@ -0,0 +1,130 @@
|
||||
// Package geminicommon provides shared utilities for Gemini API integrations.
|
||||
// Used by both the standalone Gemini provider (API key auth) and VertexAI provider (ADC auth).
|
||||
package geminicommon
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/chat"
|
||||
"google.golang.org/genai"
|
||||
)
|
||||
|
||||
// Citation formatting constants
|
||||
const (
|
||||
CitationHeader = "\n\n## Sources\n\n"
|
||||
CitationSeparator = "\n"
|
||||
CitationFormat = "- [%s](%s)"
|
||||
)
|
||||
|
||||
// ConvertMessages converts fabric chat messages to genai Content format.
|
||||
// Gemini's API only accepts "user" and "model" roles, so other roles are mapped to "user".
|
||||
func ConvertMessages(msgs []*chat.ChatCompletionMessage) []*genai.Content {
|
||||
var contents []*genai.Content
|
||||
|
||||
for _, msg := range msgs {
|
||||
content := &genai.Content{Parts: []*genai.Part{}}
|
||||
|
||||
switch msg.Role {
|
||||
case chat.ChatMessageRoleAssistant:
|
||||
content.Role = "model"
|
||||
case chat.ChatMessageRoleUser:
|
||||
content.Role = "user"
|
||||
case chat.ChatMessageRoleSystem, chat.ChatMessageRoleDeveloper, chat.ChatMessageRoleFunction, chat.ChatMessageRoleTool:
|
||||
// Gemini's API only accepts "user" and "model" roles.
|
||||
// Map all other roles to "user" to preserve instruction context.
|
||||
content.Role = "user"
|
||||
default:
|
||||
content.Role = "user"
|
||||
}
|
||||
|
||||
if strings.TrimSpace(msg.Content) != "" {
|
||||
content.Parts = append(content.Parts, &genai.Part{Text: msg.Content})
|
||||
}
|
||||
|
||||
// Handle multi-content messages (images, etc.)
|
||||
for _, part := range msg.MultiContent {
|
||||
switch part.Type {
|
||||
case chat.ChatMessagePartTypeText:
|
||||
content.Parts = append(content.Parts, &genai.Part{Text: part.Text})
|
||||
case chat.ChatMessagePartTypeImageURL:
|
||||
// TODO: Handle image URLs if needed
|
||||
// This would require downloading and converting to inline data
|
||||
}
|
||||
}
|
||||
|
||||
contents = append(contents, content)
|
||||
}
|
||||
|
||||
return contents
|
||||
}
|
||||
|
||||
// ExtractText extracts just the text parts from a Gemini response.
|
||||
func ExtractText(response *genai.GenerateContentResponse) string {
|
||||
if response == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
var builder strings.Builder
|
||||
for _, candidate := range response.Candidates {
|
||||
if candidate == nil || candidate.Content == nil {
|
||||
continue
|
||||
}
|
||||
for _, part := range candidate.Content.Parts {
|
||||
if part != nil && part.Text != "" {
|
||||
builder.WriteString(part.Text)
|
||||
}
|
||||
}
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
// ExtractTextWithCitations extracts text content from the response and appends
|
||||
// any web citations in a standardized format.
|
||||
func ExtractTextWithCitations(response *genai.GenerateContentResponse) string {
|
||||
if response == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
text := ExtractText(response)
|
||||
citations := ExtractCitations(response)
|
||||
if len(citations) > 0 {
|
||||
return text + CitationHeader + strings.Join(citations, CitationSeparator)
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
// ExtractCitations extracts web citations from grounding metadata.
|
||||
func ExtractCitations(response *genai.GenerateContentResponse) []string {
|
||||
if response == nil || len(response.Candidates) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
citationMap := make(map[string]bool)
|
||||
var citations []string
|
||||
for _, candidate := range response.Candidates {
|
||||
if candidate == nil || candidate.GroundingMetadata == nil {
|
||||
continue
|
||||
}
|
||||
chunks := candidate.GroundingMetadata.GroundingChunks
|
||||
if len(chunks) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, chunk := range chunks {
|
||||
if chunk == nil || chunk.Web == nil {
|
||||
continue
|
||||
}
|
||||
uri := chunk.Web.URI
|
||||
title := chunk.Web.Title
|
||||
if uri == "" || title == "" {
|
||||
continue
|
||||
}
|
||||
key := uri + "|" + title
|
||||
if !citationMap[key] {
|
||||
citationMap[key] = true
|
||||
citations = append(citations, fmt.Sprintf(CitationFormat, title, uri))
|
||||
}
|
||||
}
|
||||
}
|
||||
return citations
|
||||
}
|
||||
@@ -87,13 +87,16 @@ func (c *Client) ListModels() ([]string, error) {
|
||||
return models, nil
|
||||
}
|
||||
|
||||
func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string) (err error) {
|
||||
func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan domain.StreamUpdate) (err error) {
|
||||
url := fmt.Sprintf("%s/chat/completions", c.ApiUrl.Value)
|
||||
|
||||
payload := map[string]any{
|
||||
"messages": msgs,
|
||||
"model": opts.Model,
|
||||
"stream": true, // Enable streaming
|
||||
"stream_options": map[string]any{
|
||||
"include_usage": true,
|
||||
},
|
||||
}
|
||||
|
||||
var jsonPayload []byte
|
||||
@@ -144,7 +147,7 @@ func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.Cha
|
||||
line = after
|
||||
}
|
||||
|
||||
if string(line) == "[DONE]" {
|
||||
if string(bytes.TrimSpace(line)) == "[DONE]" {
|
||||
break
|
||||
}
|
||||
|
||||
@@ -153,6 +156,24 @@ func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.Cha
|
||||
continue
|
||||
}
|
||||
|
||||
// Handle Usage
|
||||
if usage, ok := result["usage"].(map[string]any); ok {
|
||||
var metadata domain.UsageMetadata
|
||||
if val, ok := usage["prompt_tokens"].(float64); ok {
|
||||
metadata.InputTokens = int(val)
|
||||
}
|
||||
if val, ok := usage["completion_tokens"].(float64); ok {
|
||||
metadata.OutputTokens = int(val)
|
||||
}
|
||||
if val, ok := usage["total_tokens"].(float64); ok {
|
||||
metadata.TotalTokens = int(val)
|
||||
}
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeUsage,
|
||||
Usage: &metadata,
|
||||
}
|
||||
}
|
||||
|
||||
var choices []any
|
||||
var ok bool
|
||||
if choices, ok = result["choices"].([]any); !ok || len(choices) == 0 {
|
||||
@@ -166,7 +187,10 @@ func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.Cha
|
||||
|
||||
var content string
|
||||
if content, _ = delta["content"].(string); content != "" {
|
||||
channel <- content
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: content,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -106,7 +106,7 @@ func (o *Client) ListModels() (ret []string, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (o *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string) (err error) {
|
||||
func (o *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan domain.StreamUpdate) (err error) {
|
||||
ctx := context.Background()
|
||||
|
||||
var req ollamaapi.ChatRequest
|
||||
@@ -115,7 +115,21 @@ func (o *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.Cha
|
||||
}
|
||||
|
||||
respFunc := func(resp ollamaapi.ChatResponse) (streamErr error) {
|
||||
channel <- resp.Message.Content
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: resp.Message.Content,
|
||||
}
|
||||
|
||||
if resp.Done {
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeUsage,
|
||||
Usage: &domain.UsageMetadata{
|
||||
InputTokens: resp.PromptEvalCount,
|
||||
OutputTokens: resp.EvalCount,
|
||||
TotalTokens: resp.PromptEvalCount + resp.EvalCount,
|
||||
},
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -30,7 +30,7 @@ func (o *Client) sendChatCompletions(ctx context.Context, msgs []*chat.ChatCompl
|
||||
|
||||
// sendStreamChatCompletions sends a streaming request using the Chat Completions API
|
||||
func (o *Client) sendStreamChatCompletions(
|
||||
msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string,
|
||||
msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan domain.StreamUpdate,
|
||||
) (err error) {
|
||||
defer close(channel)
|
||||
|
||||
@@ -39,11 +39,28 @@ func (o *Client) sendStreamChatCompletions(
|
||||
for stream.Next() {
|
||||
chunk := stream.Current()
|
||||
if len(chunk.Choices) > 0 && chunk.Choices[0].Delta.Content != "" {
|
||||
channel <- chunk.Choices[0].Delta.Content
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: chunk.Choices[0].Delta.Content,
|
||||
}
|
||||
}
|
||||
|
||||
if chunk.Usage.TotalTokens > 0 {
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeUsage,
|
||||
Usage: &domain.UsageMetadata{
|
||||
InputTokens: int(chunk.Usage.PromptTokens),
|
||||
OutputTokens: int(chunk.Usage.CompletionTokens),
|
||||
TotalTokens: int(chunk.Usage.TotalTokens),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
if stream.Err() == nil {
|
||||
channel <- "\n"
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: "\n",
|
||||
}
|
||||
}
|
||||
return stream.Err()
|
||||
}
|
||||
@@ -65,6 +82,9 @@ func (o *Client) buildChatCompletionParams(
|
||||
ret = openai.ChatCompletionNewParams{
|
||||
Model: shared.ChatModel(opts.Model),
|
||||
Messages: messages,
|
||||
StreamOptions: openai.ChatCompletionStreamOptionsParam{
|
||||
IncludeUsage: openai.Bool(true),
|
||||
},
|
||||
}
|
||||
|
||||
if !opts.Raw {
|
||||
|
||||
@@ -108,7 +108,7 @@ func (o *Client) ListModels() (ret []string, err error) {
|
||||
}
|
||||
|
||||
func (o *Client) SendStream(
|
||||
msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string,
|
||||
msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan domain.StreamUpdate,
|
||||
) (err error) {
|
||||
// Use Responses API for OpenAI, Chat Completions API for other providers
|
||||
if o.supportsResponsesAPI() {
|
||||
@@ -118,7 +118,7 @@ func (o *Client) SendStream(
|
||||
}
|
||||
|
||||
func (o *Client) sendStreamResponses(
|
||||
msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string,
|
||||
msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan domain.StreamUpdate,
|
||||
) (err error) {
|
||||
defer close(channel)
|
||||
|
||||
@@ -128,7 +128,10 @@ func (o *Client) sendStreamResponses(
|
||||
event := stream.Current()
|
||||
switch event.Type {
|
||||
case string(constant.ResponseOutputTextDelta("").Default()):
|
||||
channel <- event.AsResponseOutputTextDelta().Delta
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: event.AsResponseOutputTextDelta().Delta,
|
||||
}
|
||||
case string(constant.ResponseOutputTextDone("").Default()):
|
||||
// The Responses API sends the full text again in the
|
||||
// final "done" event. Since we've already streamed all
|
||||
@@ -138,7 +141,10 @@ func (o *Client) sendStreamResponses(
|
||||
}
|
||||
}
|
||||
if stream.Err() == nil {
|
||||
channel <- "\n"
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: "\n",
|
||||
}
|
||||
}
|
||||
return stream.Err()
|
||||
}
|
||||
|
||||
@@ -10,12 +10,20 @@ import (
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
|
||||
openai "github.com/openai/openai-go"
|
||||
)
|
||||
|
||||
// transcriptionResult holds the result of a single chunk transcription.
|
||||
type transcriptionResult struct {
|
||||
index int
|
||||
text string
|
||||
err error
|
||||
}
|
||||
|
||||
// MaxAudioFileSize defines the maximum allowed size for audio uploads (25MB).
|
||||
const MaxAudioFileSize int64 = 25 * 1024 * 1024
|
||||
|
||||
@@ -73,27 +81,56 @@ func (o *Client) TranscribeFile(ctx context.Context, filePath, model string, spl
|
||||
files = []string{filePath}
|
||||
}
|
||||
|
||||
var builder strings.Builder
|
||||
resultsChan := make(chan transcriptionResult, len(files))
|
||||
var wg sync.WaitGroup
|
||||
|
||||
for i, f := range files {
|
||||
debuglog.Log("Using model %s to transcribe part %d (file name: %s)...\n", model, i+1, f)
|
||||
var chunk *os.File
|
||||
if chunk, err = os.Open(f); err != nil {
|
||||
return "", err
|
||||
}
|
||||
params := openai.AudioTranscriptionNewParams{
|
||||
File: chunk,
|
||||
Model: openai.AudioModel(model),
|
||||
}
|
||||
var resp *openai.Transcription
|
||||
resp, err = o.ApiClient.Audio.Transcriptions.New(ctx, params)
|
||||
chunk.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
wg.Add(1)
|
||||
go func(index int, filePath string) {
|
||||
defer wg.Done()
|
||||
debuglog.Log("Using model %s to transcribe part %d (file name: %s)...\n", model, index+1, filePath)
|
||||
|
||||
chunk, openErr := os.Open(filePath)
|
||||
if openErr != nil {
|
||||
resultsChan <- transcriptionResult{index: index, err: openErr}
|
||||
return
|
||||
}
|
||||
defer chunk.Close()
|
||||
|
||||
params := openai.AudioTranscriptionNewParams{
|
||||
File: chunk,
|
||||
Model: openai.AudioModel(model),
|
||||
}
|
||||
resp, transcribeErr := o.ApiClient.Audio.Transcriptions.New(ctx, params)
|
||||
if transcribeErr != nil {
|
||||
resultsChan <- transcriptionResult{index: index, err: transcribeErr}
|
||||
return
|
||||
}
|
||||
resultsChan <- transcriptionResult{index: index, text: resp.Text}
|
||||
}(i, f)
|
||||
}
|
||||
|
||||
wg.Wait()
|
||||
close(resultsChan)
|
||||
|
||||
results := make([]transcriptionResult, 0, len(files))
|
||||
for result := range resultsChan {
|
||||
if result.err != nil {
|
||||
return "", result.err
|
||||
}
|
||||
results = append(results, result)
|
||||
}
|
||||
|
||||
sort.Slice(results, func(i, j int) bool {
|
||||
return results[i].index < results[j].index
|
||||
})
|
||||
|
||||
var builder strings.Builder
|
||||
for i, result := range results {
|
||||
if i > 0 {
|
||||
builder.WriteString(" ")
|
||||
}
|
||||
builder.WriteString(resp.Text)
|
||||
builder.WriteString(result.text)
|
||||
}
|
||||
|
||||
return builder.String(), nil
|
||||
|
||||
@@ -123,7 +123,7 @@ func (c *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, o
|
||||
return content.String(), nil
|
||||
}
|
||||
|
||||
func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string) error {
|
||||
func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan domain.StreamUpdate) error {
|
||||
if c.client == nil {
|
||||
if err := c.Configure(); err != nil {
|
||||
close(channel) // Ensure channel is closed on error
|
||||
@@ -196,7 +196,21 @@ func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.Cha
|
||||
content = resp.Choices[0].Message.Content
|
||||
}
|
||||
if content != "" {
|
||||
channel <- content
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: content,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if resp.Usage.TotalTokens != 0 {
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeUsage,
|
||||
Usage: &domain.UsageMetadata{
|
||||
InputTokens: int(resp.Usage.PromptTokens),
|
||||
OutputTokens: int(resp.Usage.CompletionTokens),
|
||||
TotalTokens: int(resp.Usage.TotalTokens),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -205,9 +219,14 @@ func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.Cha
|
||||
if lastResponse != nil {
|
||||
citations := lastResponse.GetCitations()
|
||||
if len(citations) > 0 {
|
||||
channel <- "\n\n# CITATIONS\n\n"
|
||||
var citationsText strings.Builder
|
||||
citationsText.WriteString("\n\n# CITATIONS\n\n")
|
||||
for i, citation := range citations {
|
||||
channel <- fmt.Sprintf("- [%d] %s\n", i+1, citation)
|
||||
citationsText.WriteString(fmt.Sprintf("- [%d] %s\n", i+1, citation))
|
||||
}
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: citationsText.String(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -12,7 +12,7 @@ import (
|
||||
type Vendor interface {
|
||||
plugins.Plugin
|
||||
ListModels() ([]string, error)
|
||||
SendStream([]*chat.ChatCompletionMessage, *domain.ChatOptions, chan string) error
|
||||
SendStream([]*chat.ChatCompletionMessage, *domain.ChatOptions, chan domain.StreamUpdate) error
|
||||
Send(context.Context, []*chat.ChatCompletionMessage, *domain.ChatOptions) (string, error)
|
||||
NeedsRawMode(modelName string) bool
|
||||
}
|
||||
|
||||
@@ -20,7 +20,7 @@ func (v *stubVendor) Configure() error { return nil }
|
||||
func (v *stubVendor) Setup() error { return nil }
|
||||
func (v *stubVendor) SetupFillEnvFileContent(*bytes.Buffer) {}
|
||||
func (v *stubVendor) ListModels() ([]string, error) { return nil, nil }
|
||||
func (v *stubVendor) SendStream([]*chat.ChatCompletionMessage, *domain.ChatOptions, chan string) error {
|
||||
func (v *stubVendor) SendStream([]*chat.ChatCompletionMessage, *domain.ChatOptions, chan domain.StreamUpdate) error {
|
||||
return nil
|
||||
}
|
||||
func (v *stubVendor) Send(context.Context, []*chat.ChatCompletionMessage, *domain.ChatOptions) (string, error) {
|
||||
|
||||
237
internal/plugins/ai/vertexai/models.go
Normal file
237
internal/plugins/ai/vertexai/models.go
Normal file
@@ -0,0 +1,237 @@
|
||||
package vertexai
|
||||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
)
|
||||
|
||||
const (
|
||||
// API limits
|
||||
maxResponseSize = 10 * 1024 * 1024 // 10MB
|
||||
errorResponseLimit = 1024 // 1KB for error messages
|
||||
|
||||
// Default region for Model Garden API (global doesn't work for this endpoint)
|
||||
defaultModelGardenRegion = "us-central1"
|
||||
)
|
||||
|
||||
// Supported Model Garden publishers (others can be added when SDK support is implemented)
|
||||
var publishers = []string{"google", "anthropic"}
|
||||
|
||||
// publisherModelsResponse represents the API response from publishers.models.list
|
||||
type publisherModelsResponse struct {
|
||||
PublisherModels []publisherModel `json:"publisherModels"`
|
||||
NextPageToken string `json:"nextPageToken"`
|
||||
}
|
||||
|
||||
// publisherModel represents a single model in the API response
|
||||
type publisherModel struct {
|
||||
Name string `json:"name"` // Format: publishers/{publisher}/models/{model}
|
||||
}
|
||||
|
||||
// fetchModelsPage makes a single API request and returns the parsed response.
|
||||
// Extracted to ensure proper cleanup of HTTP response bodies in pagination loops.
|
||||
func fetchModelsPage(ctx context.Context, httpClient *http.Client, url, projectID, publisher string) (*publisherModelsResponse, error) {
|
||||
req, err := http.NewRequestWithContext(ctx, http.MethodGet, url, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create request: %w", err)
|
||||
}
|
||||
|
||||
req.Header.Set("Accept", "application/json")
|
||||
// Set quota project header required by Vertex AI API
|
||||
req.Header.Set("x-goog-user-project", projectID)
|
||||
|
||||
resp, err := httpClient.Do(req)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("request failed: %w", err)
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
bodyBytes, _ := io.ReadAll(io.LimitReader(resp.Body, errorResponseLimit))
|
||||
debuglog.Debug(debuglog.Basic, "API error for %s: status %d, url: %s, body: %s\n", publisher, resp.StatusCode, url, string(bodyBytes))
|
||||
return nil, fmt.Errorf("API returned status %d: %s", resp.StatusCode, string(bodyBytes))
|
||||
}
|
||||
|
||||
bodyBytes, err := io.ReadAll(io.LimitReader(resp.Body, maxResponseSize+1))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to read response: %w", err)
|
||||
}
|
||||
|
||||
if len(bodyBytes) > maxResponseSize {
|
||||
return nil, fmt.Errorf("response too large (>%d bytes)", maxResponseSize)
|
||||
}
|
||||
|
||||
var response publisherModelsResponse
|
||||
if err := json.Unmarshal(bodyBytes, &response); err != nil {
|
||||
return nil, fmt.Errorf("failed to parse response: %w", err)
|
||||
}
|
||||
|
||||
return &response, nil
|
||||
}
|
||||
|
||||
// listPublisherModels fetches models from a specific publisher via the Model Garden API
|
||||
func listPublisherModels(ctx context.Context, httpClient *http.Client, region, projectID, publisher string) ([]string, error) {
|
||||
// Use default region if global or empty (Model Garden API requires a specific region)
|
||||
if region == "" || region == "global" {
|
||||
region = defaultModelGardenRegion
|
||||
}
|
||||
|
||||
baseURL := fmt.Sprintf("https://%s-aiplatform.googleapis.com/v1beta1/publishers/%s/models", region, publisher)
|
||||
|
||||
var allModels []string
|
||||
pageToken := ""
|
||||
|
||||
for {
|
||||
url := baseURL
|
||||
if pageToken != "" {
|
||||
url = fmt.Sprintf("%s?pageToken=%s", baseURL, pageToken)
|
||||
}
|
||||
|
||||
response, err := fetchModelsPage(ctx, httpClient, url, projectID, publisher)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
// Extract model names, stripping the publishers/{publisher}/models/ prefix
|
||||
for _, model := range response.PublisherModels {
|
||||
modelName := extractModelName(model.Name)
|
||||
if modelName != "" {
|
||||
allModels = append(allModels, modelName)
|
||||
}
|
||||
}
|
||||
|
||||
// Check for more pages
|
||||
if response.NextPageToken == "" {
|
||||
break
|
||||
}
|
||||
pageToken = response.NextPageToken
|
||||
}
|
||||
|
||||
debuglog.Debug(debuglog.Detailed, "Listed %d models from publisher %s\n", len(allModels), publisher)
|
||||
return allModels, nil
|
||||
}
|
||||
|
||||
// extractModelName extracts the model name from the full resource path
|
||||
// Input: "publishers/google/models/gemini-2.0-flash"
|
||||
// Output: "gemini-2.0-flash"
|
||||
func extractModelName(fullName string) string {
|
||||
parts := strings.Split(fullName, "/")
|
||||
if len(parts) >= 4 && parts[0] == "publishers" && parts[2] == "models" {
|
||||
return parts[3]
|
||||
}
|
||||
// Fallback: return the last segment
|
||||
if len(parts) > 0 {
|
||||
return parts[len(parts)-1]
|
||||
}
|
||||
return fullName
|
||||
}
|
||||
|
||||
// sortModels sorts models by priority: Gemini > Claude > Others
|
||||
// Within each group, models are sorted alphabetically
|
||||
func sortModels(models []string) []string {
|
||||
sort.Slice(models, func(i, j int) bool {
|
||||
pi := modelPriority(models[i])
|
||||
pj := modelPriority(models[j])
|
||||
if pi != pj {
|
||||
return pi < pj
|
||||
}
|
||||
// Same priority: sort alphabetically (case-insensitive)
|
||||
return strings.ToLower(models[i]) < strings.ToLower(models[j])
|
||||
})
|
||||
return models
|
||||
}
|
||||
|
||||
// modelPriority returns the sort priority for a model (lower = higher priority)
|
||||
func modelPriority(model string) int {
|
||||
lower := strings.ToLower(model)
|
||||
switch {
|
||||
case strings.HasPrefix(lower, "gemini"):
|
||||
return 1
|
||||
case strings.HasPrefix(lower, "claude"):
|
||||
return 2
|
||||
default:
|
||||
return 3
|
||||
}
|
||||
}
|
||||
|
||||
// knownGeminiModels is a curated list of Gemini models available on Vertex AI.
|
||||
// Vertex AI doesn't provide a list API for Gemini models - they must be known ahead of time.
|
||||
// This list is based on Google Cloud documentation as of January 2025.
|
||||
// See: https://docs.cloud.google.com/vertex-ai/generative-ai/docs/models
|
||||
var knownGeminiModels = []string{
|
||||
// Gemini 3 (Preview)
|
||||
"gemini-3-pro-preview",
|
||||
"gemini-3-flash-preview",
|
||||
// Gemini 2.5 (GA)
|
||||
"gemini-2.5-pro",
|
||||
"gemini-2.5-flash",
|
||||
"gemini-2.5-flash-lite",
|
||||
// Gemini 2.0 (GA)
|
||||
"gemini-2.0-flash",
|
||||
"gemini-2.0-flash-lite",
|
||||
}
|
||||
|
||||
// getKnownGeminiModels returns the curated list of Gemini models available on Vertex AI.
|
||||
// Unlike third-party models which can be listed via the Model Garden API,
|
||||
// Gemini models must be known ahead of time as there's no list endpoint for them.
|
||||
func getKnownGeminiModels() []string {
|
||||
return knownGeminiModels
|
||||
}
|
||||
|
||||
// isGeminiModel returns true if the model is a Gemini model
|
||||
func isGeminiModel(modelName string) bool {
|
||||
return strings.HasPrefix(strings.ToLower(modelName), "gemini")
|
||||
}
|
||||
|
||||
// isConversationalModel returns true if the model is suitable for text generation/chat
|
||||
// Filters out image generation, embeddings, and other non-conversational models
|
||||
func isConversationalModel(modelName string) bool {
|
||||
lower := strings.ToLower(modelName)
|
||||
|
||||
// Exclude patterns for non-conversational models
|
||||
excludePatterns := []string{
|
||||
"imagen", // Image generation models
|
||||
"imagegeneration",
|
||||
"imagetext",
|
||||
"image-segmentation",
|
||||
"embedding", // Embedding models
|
||||
"textembedding",
|
||||
"multimodalembedding",
|
||||
"text-bison", // Legacy completion models (not chat)
|
||||
"text-unicorn",
|
||||
"code-bison", // Legacy code models
|
||||
"code-gecko",
|
||||
"codechat-bison", // Deprecated chat model
|
||||
"chat-bison", // Deprecated chat model
|
||||
"veo", // Video generation
|
||||
"chirp", // Audio/speech models
|
||||
"medlm", // Medical models (restricted)
|
||||
"medical",
|
||||
}
|
||||
|
||||
for _, pattern := range excludePatterns {
|
||||
if strings.Contains(lower, pattern) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// filterConversationalModels returns only models suitable for text generation/chat
|
||||
func filterConversationalModels(models []string) []string {
|
||||
var filtered []string
|
||||
for _, model := range models {
|
||||
if isConversationalModel(model) {
|
||||
filtered = append(filtered, model)
|
||||
}
|
||||
}
|
||||
return filtered
|
||||
}
|
||||
@@ -9,13 +9,18 @@ import (
|
||||
"github.com/anthropics/anthropic-sdk-go/vertex"
|
||||
"github.com/danielmiessler/fabric/internal/chat"
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/plugins"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/geminicommon"
|
||||
"golang.org/x/oauth2"
|
||||
"golang.org/x/oauth2/google"
|
||||
"google.golang.org/genai"
|
||||
)
|
||||
|
||||
const (
|
||||
cloudPlatformScope = "https://www.googleapis.com/auth/cloud-platform"
|
||||
defaultRegion = "global"
|
||||
maxTokens = 4096
|
||||
defaultMaxTokens = 4096
|
||||
)
|
||||
|
||||
// NewClient creates a new Vertex AI client for accessing Claude models via Google Cloud
|
||||
@@ -59,17 +64,78 @@ func (c *Client) configure() error {
|
||||
}
|
||||
|
||||
func (c *Client) ListModels() ([]string, error) {
|
||||
// Return Claude models available on Vertex AI
|
||||
return []string{
|
||||
string(anthropic.ModelClaudeSonnet4_5),
|
||||
string(anthropic.ModelClaudeOpus4_5),
|
||||
string(anthropic.ModelClaudeHaiku4_5),
|
||||
string(anthropic.ModelClaude3_7SonnetLatest),
|
||||
string(anthropic.ModelClaude3_5HaikuLatest),
|
||||
}, nil
|
||||
ctx := context.Background()
|
||||
|
||||
// Get ADC credentials for API authentication
|
||||
creds, err := google.FindDefaultCredentials(ctx, cloudPlatformScope)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get Google credentials (ensure ADC is configured): %w", err)
|
||||
}
|
||||
httpClient := oauth2.NewClient(ctx, creds.TokenSource)
|
||||
|
||||
// Query all publishers in parallel for better performance
|
||||
type result struct {
|
||||
models []string
|
||||
err error
|
||||
publisher string
|
||||
}
|
||||
// +1 for known Gemini models (no API to list them)
|
||||
results := make(chan result, len(publishers)+1)
|
||||
|
||||
// Query Model Garden API for third-party models
|
||||
for _, pub := range publishers {
|
||||
go func(publisher string) {
|
||||
models, err := listPublisherModels(ctx, httpClient, c.Region.Value, c.ProjectID.Value, publisher)
|
||||
results <- result{models: models, err: err, publisher: publisher}
|
||||
}(pub)
|
||||
}
|
||||
|
||||
// Add known Gemini models (Vertex AI doesn't have a list API for Gemini)
|
||||
go func() {
|
||||
results <- result{models: getKnownGeminiModels(), err: nil, publisher: "gemini"}
|
||||
}()
|
||||
|
||||
// Collect results from all sources
|
||||
var allModels []string
|
||||
for range len(publishers) + 1 {
|
||||
r := <-results
|
||||
if r.err != nil {
|
||||
// Log warning but continue - some sources may not be available
|
||||
debuglog.Debug(debuglog.Basic, "Failed to list %s models: %v\n", r.publisher, r.err)
|
||||
continue
|
||||
}
|
||||
allModels = append(allModels, r.models...)
|
||||
}
|
||||
|
||||
if len(allModels) == 0 {
|
||||
return nil, fmt.Errorf("no models found from any publisher")
|
||||
}
|
||||
|
||||
// Filter to only conversational models and sort
|
||||
filtered := filterConversationalModels(allModels)
|
||||
if len(filtered) == 0 {
|
||||
return nil, fmt.Errorf("no conversational models found")
|
||||
}
|
||||
|
||||
return sortModels(filtered), nil
|
||||
}
|
||||
|
||||
func (c *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions) (string, error) {
|
||||
if isGeminiModel(opts.Model) {
|
||||
return c.sendGemini(ctx, msgs, opts)
|
||||
}
|
||||
return c.sendClaude(ctx, msgs, opts)
|
||||
}
|
||||
|
||||
// getMaxTokens returns the max output tokens to use for a request
|
||||
func getMaxTokens(opts *domain.ChatOptions) int64 {
|
||||
if opts.MaxTokens > 0 {
|
||||
return int64(opts.MaxTokens)
|
||||
}
|
||||
return int64(defaultMaxTokens)
|
||||
}
|
||||
|
||||
func (c *Client) sendClaude(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions) (string, error) {
|
||||
if c.client == nil {
|
||||
return "", fmt.Errorf("VertexAI client not initialized")
|
||||
}
|
||||
@@ -80,14 +146,22 @@ func (c *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, o
|
||||
return "", fmt.Errorf("no valid messages to send")
|
||||
}
|
||||
|
||||
// Create the request
|
||||
response, err := c.client.Messages.New(ctx, anthropic.MessageNewParams{
|
||||
Model: anthropic.Model(opts.Model),
|
||||
MaxTokens: int64(maxTokens),
|
||||
Messages: anthropicMessages,
|
||||
Temperature: anthropic.Opt(opts.Temperature),
|
||||
})
|
||||
// Build request params
|
||||
params := anthropic.MessageNewParams{
|
||||
Model: anthropic.Model(opts.Model),
|
||||
MaxTokens: getMaxTokens(opts),
|
||||
Messages: anthropicMessages,
|
||||
}
|
||||
|
||||
// Only set one of Temperature or TopP as some models don't allow both
|
||||
// (following anthropic.go pattern)
|
||||
if opts.TopP != domain.DefaultTopP {
|
||||
params.TopP = anthropic.Opt(opts.TopP)
|
||||
} else {
|
||||
params.Temperature = anthropic.Opt(opts.Temperature)
|
||||
}
|
||||
|
||||
response, err := c.client.Messages.New(ctx, params)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -107,7 +181,14 @@ func (c *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, o
|
||||
return strings.Join(textParts, ""), nil
|
||||
}
|
||||
|
||||
func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string) error {
|
||||
func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan domain.StreamUpdate) error {
|
||||
if isGeminiModel(opts.Model) {
|
||||
return c.sendStreamGemini(msgs, opts, channel)
|
||||
}
|
||||
return c.sendStreamClaude(msgs, opts, channel)
|
||||
}
|
||||
|
||||
func (c *Client) sendStreamClaude(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan domain.StreamUpdate) error {
|
||||
if c.client == nil {
|
||||
close(channel)
|
||||
return fmt.Errorf("VertexAI client not initialized")
|
||||
@@ -122,25 +203,198 @@ func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.Cha
|
||||
return fmt.Errorf("no valid messages to send")
|
||||
}
|
||||
|
||||
// Build request params
|
||||
params := anthropic.MessageNewParams{
|
||||
Model: anthropic.Model(opts.Model),
|
||||
MaxTokens: getMaxTokens(opts),
|
||||
Messages: anthropicMessages,
|
||||
}
|
||||
|
||||
// Only set one of Temperature or TopP as some models don't allow both
|
||||
if opts.TopP != domain.DefaultTopP {
|
||||
params.TopP = anthropic.Opt(opts.TopP)
|
||||
} else {
|
||||
params.Temperature = anthropic.Opt(opts.Temperature)
|
||||
}
|
||||
|
||||
// Create streaming request
|
||||
stream := c.client.Messages.NewStreaming(ctx, anthropic.MessageNewParams{
|
||||
Model: anthropic.Model(opts.Model),
|
||||
MaxTokens: int64(maxTokens),
|
||||
Messages: anthropicMessages,
|
||||
Temperature: anthropic.Opt(opts.Temperature),
|
||||
})
|
||||
stream := c.client.Messages.NewStreaming(ctx, params)
|
||||
|
||||
// Process stream
|
||||
for stream.Next() {
|
||||
event := stream.Current()
|
||||
|
||||
// Handle Content
|
||||
if event.Delta.Text != "" {
|
||||
channel <- event.Delta.Text
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: event.Delta.Text,
|
||||
}
|
||||
}
|
||||
|
||||
// Handle Usage
|
||||
if event.Message.Usage.InputTokens != 0 || event.Message.Usage.OutputTokens != 0 {
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeUsage,
|
||||
Usage: &domain.UsageMetadata{
|
||||
InputTokens: int(event.Message.Usage.InputTokens),
|
||||
OutputTokens: int(event.Message.Usage.OutputTokens),
|
||||
TotalTokens: int(event.Message.Usage.InputTokens + event.Message.Usage.OutputTokens),
|
||||
},
|
||||
}
|
||||
} else if event.Usage.InputTokens != 0 || event.Usage.OutputTokens != 0 {
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeUsage,
|
||||
Usage: &domain.UsageMetadata{
|
||||
InputTokens: int(event.Usage.InputTokens),
|
||||
OutputTokens: int(event.Usage.OutputTokens),
|
||||
TotalTokens: int(event.Usage.InputTokens + event.Usage.OutputTokens),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return stream.Err()
|
||||
}
|
||||
|
||||
// Gemini methods using genai SDK with Vertex AI backend
|
||||
|
||||
// getGeminiRegion returns the appropriate region for a Gemini model.
|
||||
// Preview models are often only available on the global endpoint.
|
||||
func (c *Client) getGeminiRegion(model string) string {
|
||||
if strings.Contains(strings.ToLower(model), "preview") {
|
||||
return "global"
|
||||
}
|
||||
return c.Region.Value
|
||||
}
|
||||
|
||||
func (c *Client) sendGemini(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions) (string, error) {
|
||||
client, err := genai.NewClient(ctx, &genai.ClientConfig{
|
||||
Project: c.ProjectID.Value,
|
||||
Location: c.getGeminiRegion(opts.Model),
|
||||
Backend: genai.BackendVertexAI,
|
||||
})
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to create Gemini client: %w", err)
|
||||
}
|
||||
|
||||
contents := geminicommon.ConvertMessages(msgs)
|
||||
if len(contents) == 0 {
|
||||
return "", fmt.Errorf("no valid messages to send")
|
||||
}
|
||||
|
||||
config := c.buildGeminiConfig(opts)
|
||||
|
||||
response, err := client.Models.GenerateContent(ctx, opts.Model, contents, config)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return geminicommon.ExtractTextWithCitations(response), nil
|
||||
}
|
||||
|
||||
// buildGeminiConfig creates the generation config for Gemini models
|
||||
// following the gemini.go pattern for feature parity
|
||||
func (c *Client) buildGeminiConfig(opts *domain.ChatOptions) *genai.GenerateContentConfig {
|
||||
temperature := float32(opts.Temperature)
|
||||
topP := float32(opts.TopP)
|
||||
config := &genai.GenerateContentConfig{
|
||||
Temperature: &temperature,
|
||||
TopP: &topP,
|
||||
MaxOutputTokens: int32(getMaxTokens(opts)),
|
||||
}
|
||||
|
||||
// Add web search support
|
||||
if opts.Search {
|
||||
config.Tools = []*genai.Tool{{GoogleSearch: &genai.GoogleSearch{}}}
|
||||
}
|
||||
|
||||
// Add thinking support
|
||||
if tc := parseGeminiThinking(opts.Thinking); tc != nil {
|
||||
config.ThinkingConfig = tc
|
||||
}
|
||||
|
||||
return config
|
||||
}
|
||||
|
||||
// parseGeminiThinking converts thinking level to Gemini thinking config
|
||||
func parseGeminiThinking(level domain.ThinkingLevel) *genai.ThinkingConfig {
|
||||
lower := strings.ToLower(strings.TrimSpace(string(level)))
|
||||
switch domain.ThinkingLevel(lower) {
|
||||
case "", domain.ThinkingOff:
|
||||
return nil
|
||||
case domain.ThinkingLow, domain.ThinkingMedium, domain.ThinkingHigh:
|
||||
if budget, ok := domain.ThinkingBudgets[domain.ThinkingLevel(lower)]; ok {
|
||||
b := int32(budget)
|
||||
return &genai.ThinkingConfig{IncludeThoughts: true, ThinkingBudget: &b}
|
||||
}
|
||||
default:
|
||||
// Try parsing as integer token count
|
||||
var tokens int
|
||||
if _, err := fmt.Sscanf(lower, "%d", &tokens); err == nil && tokens > 0 {
|
||||
t := int32(tokens)
|
||||
return &genai.ThinkingConfig{IncludeThoughts: true, ThinkingBudget: &t}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) sendStreamGemini(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan domain.StreamUpdate) error {
|
||||
defer close(channel)
|
||||
ctx := context.Background()
|
||||
|
||||
client, err := genai.NewClient(ctx, &genai.ClientConfig{
|
||||
Project: c.ProjectID.Value,
|
||||
Location: c.getGeminiRegion(opts.Model),
|
||||
Backend: genai.BackendVertexAI,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create Gemini client: %w", err)
|
||||
}
|
||||
|
||||
contents := geminicommon.ConvertMessages(msgs)
|
||||
if len(contents) == 0 {
|
||||
return fmt.Errorf("no valid messages to send")
|
||||
}
|
||||
|
||||
config := c.buildGeminiConfig(opts)
|
||||
|
||||
stream := client.Models.GenerateContentStream(ctx, opts.Model, contents, config)
|
||||
|
||||
for response, err := range stream {
|
||||
if err != nil {
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeError,
|
||||
Content: fmt.Sprintf("Error: %v", err),
|
||||
}
|
||||
return err
|
||||
}
|
||||
|
||||
text := geminicommon.ExtractText(response)
|
||||
if text != "" {
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeContent,
|
||||
Content: text,
|
||||
}
|
||||
}
|
||||
|
||||
if response.UsageMetadata != nil {
|
||||
channel <- domain.StreamUpdate{
|
||||
Type: domain.StreamTypeUsage,
|
||||
Usage: &domain.UsageMetadata{
|
||||
InputTokens: int(response.UsageMetadata.PromptTokenCount),
|
||||
OutputTokens: int(response.UsageMetadata.CandidatesTokenCount),
|
||||
TotalTokens: int(response.UsageMetadata.TotalTokenCount),
|
||||
},
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// Claude message conversion
|
||||
|
||||
func (c *Client) toMessages(msgs []*chat.ChatCompletionMessage) []anthropic.MessageParam {
|
||||
// Convert messages to Anthropic format with proper role handling
|
||||
// - System messages become part of the first user message
|
||||
|
||||
442
internal/plugins/ai/vertexai/vertexai_test.go
Normal file
442
internal/plugins/ai/vertexai/vertexai_test.go
Normal file
@@ -0,0 +1,442 @@
|
||||
package vertexai
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"net/http"
|
||||
"net/http/httptest"
|
||||
"testing"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
"github.com/stretchr/testify/assert"
|
||||
"github.com/stretchr/testify/require"
|
||||
)
|
||||
|
||||
func TestExtractModelName(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "standard format",
|
||||
input: "publishers/google/models/gemini-2.0-flash",
|
||||
expected: "gemini-2.0-flash",
|
||||
},
|
||||
{
|
||||
name: "anthropic model",
|
||||
input: "publishers/anthropic/models/claude-sonnet-4-5",
|
||||
expected: "claude-sonnet-4-5",
|
||||
},
|
||||
{
|
||||
name: "model with version",
|
||||
input: "publishers/anthropic/models/claude-3-opus@20240229",
|
||||
expected: "claude-3-opus@20240229",
|
||||
},
|
||||
{
|
||||
name: "just model name",
|
||||
input: "gemini-pro",
|
||||
expected: "gemini-pro",
|
||||
},
|
||||
{
|
||||
name: "empty string",
|
||||
input: "",
|
||||
expected: "",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := extractModelName(tt.input)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSortModels(t *testing.T) {
|
||||
input := []string{
|
||||
"claude-sonnet-4-5",
|
||||
"gemini-2.0-flash",
|
||||
"gemini-pro",
|
||||
"claude-opus-4",
|
||||
"unknown-model",
|
||||
}
|
||||
|
||||
result := sortModels(input)
|
||||
|
||||
// Verify order: Gemini first, then Claude, then others (alphabetically within each group)
|
||||
expected := []string{
|
||||
"gemini-2.0-flash",
|
||||
"gemini-pro",
|
||||
"claude-opus-4",
|
||||
"claude-sonnet-4-5",
|
||||
"unknown-model",
|
||||
}
|
||||
|
||||
assert.Equal(t, expected, result)
|
||||
}
|
||||
|
||||
func TestModelPriority(t *testing.T) {
|
||||
tests := []struct {
|
||||
model string
|
||||
priority int
|
||||
}{
|
||||
{"gemini-2.0-flash", 1},
|
||||
{"Gemini-Pro", 1},
|
||||
{"claude-sonnet-4-5", 2},
|
||||
{"CLAUDE-OPUS", 2},
|
||||
{"some-other-model", 3},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.model, func(t *testing.T) {
|
||||
result := modelPriority(tt.model)
|
||||
assert.Equal(t, tt.priority, result, "priority for %s", tt.model)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestListPublisherModels_Success(t *testing.T) {
|
||||
// Create mock server
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
assert.Equal(t, http.MethodGet, r.Method)
|
||||
assert.Contains(t, r.URL.Path, "/v1/publishers/google/models")
|
||||
|
||||
response := publisherModelsResponse{
|
||||
PublisherModels: []publisherModel{
|
||||
{Name: "publishers/google/models/gemini-2.0-flash"},
|
||||
{Name: "publishers/google/models/gemini-pro"},
|
||||
},
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(response)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Note: This test would need to mock the actual API endpoint
|
||||
// For now, we just verify the mock server works
|
||||
resp, err := http.Get(server.URL + "/v1/publishers/google/models")
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
var response publisherModelsResponse
|
||||
err = json.NewDecoder(resp.Body).Decode(&response)
|
||||
require.NoError(t, err)
|
||||
|
||||
assert.Len(t, response.PublisherModels, 2)
|
||||
assert.Equal(t, "publishers/google/models/gemini-2.0-flash", response.PublisherModels[0].Name)
|
||||
}
|
||||
|
||||
func TestListPublisherModels_Pagination(t *testing.T) {
|
||||
callCount := 0
|
||||
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
callCount++
|
||||
|
||||
var response publisherModelsResponse
|
||||
if callCount == 1 {
|
||||
response = publisherModelsResponse{
|
||||
PublisherModels: []publisherModel{
|
||||
{Name: "publishers/google/models/gemini-flash"},
|
||||
},
|
||||
NextPageToken: "page2",
|
||||
}
|
||||
} else {
|
||||
response = publisherModelsResponse{
|
||||
PublisherModels: []publisherModel{
|
||||
{Name: "publishers/google/models/gemini-pro"},
|
||||
},
|
||||
NextPageToken: "",
|
||||
}
|
||||
}
|
||||
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
json.NewEncoder(w).Encode(response)
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
// Verify the server handles pagination correctly
|
||||
resp, err := http.Get(server.URL + "/page1")
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
|
||||
resp, err = http.Get(server.URL + "/page2")
|
||||
require.NoError(t, err)
|
||||
resp.Body.Close()
|
||||
|
||||
assert.Equal(t, 2, callCount)
|
||||
}
|
||||
|
||||
func TestListPublisherModels_ErrorResponse(t *testing.T) {
|
||||
server := httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusForbidden)
|
||||
w.Write([]byte(`{"error": "access denied"}`))
|
||||
}))
|
||||
defer server.Close()
|
||||
|
||||
resp, err := http.Get(server.URL + "/v1/publishers/google/models")
|
||||
require.NoError(t, err)
|
||||
defer resp.Body.Close()
|
||||
|
||||
assert.Equal(t, http.StatusForbidden, resp.StatusCode)
|
||||
}
|
||||
|
||||
func TestNewClient(t *testing.T) {
|
||||
client := NewClient()
|
||||
|
||||
assert.NotNil(t, client)
|
||||
assert.Equal(t, "VertexAI", client.Name)
|
||||
assert.NotNil(t, client.ProjectID)
|
||||
assert.NotNil(t, client.Region)
|
||||
assert.Equal(t, "global", client.Region.Value)
|
||||
}
|
||||
|
||||
func TestPublishersListComplete(t *testing.T) {
|
||||
// Verify supported publishers are in the list
|
||||
expectedPublishers := []string{"google", "anthropic"}
|
||||
|
||||
assert.Equal(t, expectedPublishers, publishers)
|
||||
}
|
||||
|
||||
func TestIsConversationalModel(t *testing.T) {
|
||||
tests := []struct {
|
||||
model string
|
||||
expected bool
|
||||
}{
|
||||
// Conversational models (should return true)
|
||||
{"gemini-2.0-flash", true},
|
||||
{"gemini-2.5-pro", true},
|
||||
{"claude-sonnet-4-5", true},
|
||||
{"claude-opus-4", true},
|
||||
{"deepseek-v3", true},
|
||||
{"llama-3.1-405b", true},
|
||||
{"mistral-large", true},
|
||||
|
||||
// Non-conversational models (should return false)
|
||||
{"imagen-3.0-capability-002", false},
|
||||
{"imagen-4.0-fast-generate-001", false},
|
||||
{"imagegeneration", false},
|
||||
{"imagetext", false},
|
||||
{"image-segmentation-001", false},
|
||||
{"textembedding-gecko", false},
|
||||
{"multimodalembedding", false},
|
||||
{"text-embedding-004", false},
|
||||
{"text-bison", false},
|
||||
{"text-unicorn", false},
|
||||
{"code-bison", false},
|
||||
{"code-gecko", false},
|
||||
{"codechat-bison", false},
|
||||
{"chat-bison", false},
|
||||
{"veo-001", false},
|
||||
{"chirp", false},
|
||||
{"medlm-medium", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.model, func(t *testing.T) {
|
||||
result := isConversationalModel(tt.model)
|
||||
assert.Equal(t, tt.expected, result, "isConversationalModel(%s)", tt.model)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestFilterConversationalModels(t *testing.T) {
|
||||
input := []string{
|
||||
"gemini-2.0-flash",
|
||||
"imagen-3.0-capability-002",
|
||||
"claude-sonnet-4-5",
|
||||
"textembedding-gecko",
|
||||
"deepseek-v3",
|
||||
"chat-bison",
|
||||
"llama-3.1-405b",
|
||||
"code-bison",
|
||||
}
|
||||
|
||||
result := filterConversationalModels(input)
|
||||
|
||||
expected := []string{
|
||||
"gemini-2.0-flash",
|
||||
"claude-sonnet-4-5",
|
||||
"deepseek-v3",
|
||||
"llama-3.1-405b",
|
||||
}
|
||||
|
||||
assert.Equal(t, expected, result)
|
||||
}
|
||||
|
||||
func TestFilterConversationalModels_EmptyInput(t *testing.T) {
|
||||
result := filterConversationalModels([]string{})
|
||||
assert.Empty(t, result)
|
||||
}
|
||||
|
||||
func TestFilterConversationalModels_AllFiltered(t *testing.T) {
|
||||
input := []string{
|
||||
"imagen-3.0",
|
||||
"textembedding-gecko",
|
||||
"chat-bison",
|
||||
}
|
||||
|
||||
result := filterConversationalModels(input)
|
||||
assert.Empty(t, result)
|
||||
}
|
||||
|
||||
func TestIsGeminiModel(t *testing.T) {
|
||||
tests := []struct {
|
||||
model string
|
||||
expected bool
|
||||
}{
|
||||
{"gemini-2.5-pro", true},
|
||||
{"gemini-3-pro-preview", true},
|
||||
{"Gemini-2.0-flash", true},
|
||||
{"GEMINI-flash", true},
|
||||
{"claude-sonnet-4-5", false},
|
||||
{"claude-opus-4", false},
|
||||
{"deepseek-v3", false},
|
||||
{"llama-3.1-405b", false},
|
||||
{"", false},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.model, func(t *testing.T) {
|
||||
result := isGeminiModel(tt.model)
|
||||
assert.Equal(t, tt.expected, result, "isGeminiModel(%s)", tt.model)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetMaxTokens(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
opts *domain.ChatOptions
|
||||
expected int64
|
||||
}{
|
||||
{
|
||||
name: "MaxTokens specified",
|
||||
opts: &domain.ChatOptions{MaxTokens: 8192},
|
||||
expected: 8192,
|
||||
},
|
||||
{
|
||||
name: "Default when MaxTokens is 0",
|
||||
opts: &domain.ChatOptions{MaxTokens: 0},
|
||||
expected: int64(defaultMaxTokens),
|
||||
},
|
||||
{
|
||||
name: "Default when MaxTokens is negative",
|
||||
opts: &domain.ChatOptions{MaxTokens: -1},
|
||||
expected: int64(defaultMaxTokens),
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := getMaxTokens(tt.opts)
|
||||
assert.Equal(t, tt.expected, result)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestParseGeminiThinking(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
level domain.ThinkingLevel
|
||||
expectNil bool
|
||||
expectedBudget int32
|
||||
}{
|
||||
{
|
||||
name: "empty string returns nil",
|
||||
level: "",
|
||||
expectNil: true,
|
||||
},
|
||||
{
|
||||
name: "off returns nil",
|
||||
level: domain.ThinkingOff,
|
||||
expectNil: true,
|
||||
},
|
||||
{
|
||||
name: "low thinking",
|
||||
level: domain.ThinkingLow,
|
||||
expectNil: false,
|
||||
expectedBudget: int32(domain.ThinkingBudgets[domain.ThinkingLow]),
|
||||
},
|
||||
{
|
||||
name: "medium thinking",
|
||||
level: domain.ThinkingMedium,
|
||||
expectNil: false,
|
||||
expectedBudget: int32(domain.ThinkingBudgets[domain.ThinkingMedium]),
|
||||
},
|
||||
{
|
||||
name: "high thinking",
|
||||
level: domain.ThinkingHigh,
|
||||
expectNil: false,
|
||||
expectedBudget: int32(domain.ThinkingBudgets[domain.ThinkingHigh]),
|
||||
},
|
||||
{
|
||||
name: "numeric string",
|
||||
level: "5000",
|
||||
expectNil: false,
|
||||
expectedBudget: 5000,
|
||||
},
|
||||
{
|
||||
name: "invalid string returns nil",
|
||||
level: "invalid",
|
||||
expectNil: true,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := parseGeminiThinking(tt.level)
|
||||
if tt.expectNil {
|
||||
assert.Nil(t, result)
|
||||
} else {
|
||||
require.NotNil(t, result)
|
||||
assert.True(t, result.IncludeThoughts)
|
||||
assert.Equal(t, tt.expectedBudget, *result.ThinkingBudget)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGeminiConfig(t *testing.T) {
|
||||
client := &Client{}
|
||||
|
||||
t.Run("basic config with temperature and TopP", func(t *testing.T) {
|
||||
opts := &domain.ChatOptions{
|
||||
Temperature: 0.7,
|
||||
TopP: 0.9,
|
||||
MaxTokens: 8192,
|
||||
}
|
||||
config := client.buildGeminiConfig(opts)
|
||||
|
||||
assert.NotNil(t, config)
|
||||
assert.Equal(t, float32(0.7), *config.Temperature)
|
||||
assert.Equal(t, float32(0.9), *config.TopP)
|
||||
assert.Equal(t, int32(8192), config.MaxOutputTokens)
|
||||
assert.Nil(t, config.Tools)
|
||||
assert.Nil(t, config.ThinkingConfig)
|
||||
})
|
||||
|
||||
t.Run("config with search enabled", func(t *testing.T) {
|
||||
opts := &domain.ChatOptions{
|
||||
Temperature: 0.5,
|
||||
TopP: 0.8,
|
||||
Search: true,
|
||||
}
|
||||
config := client.buildGeminiConfig(opts)
|
||||
|
||||
assert.NotNil(t, config.Tools)
|
||||
assert.Len(t, config.Tools, 1)
|
||||
assert.NotNil(t, config.Tools[0].GoogleSearch)
|
||||
})
|
||||
|
||||
t.Run("config with thinking enabled", func(t *testing.T) {
|
||||
opts := &domain.ChatOptions{
|
||||
Temperature: 0.5,
|
||||
TopP: 0.8,
|
||||
Thinking: domain.ThinkingHigh,
|
||||
}
|
||||
config := client.buildGeminiConfig(opts)
|
||||
|
||||
assert.NotNil(t, config.ThinkingConfig)
|
||||
assert.True(t, config.ThinkingConfig.IncludeThoughts)
|
||||
})
|
||||
}
|
||||
@@ -40,9 +40,10 @@ type ChatRequest struct {
|
||||
}
|
||||
|
||||
type StreamResponse struct {
|
||||
Type string `json:"type"` // "content", "error", "complete"
|
||||
Format string `json:"format"` // "markdown", "mermaid", "plain"
|
||||
Content string `json:"content"` // The actual content
|
||||
Type string `json:"type"` // "content", "usage", "error", "complete"
|
||||
Format string `json:"format,omitempty"` // "markdown", "mermaid", "plain"
|
||||
Content string `json:"content,omitempty"`
|
||||
Usage *domain.UsageMetadata `json:"usage,omitempty"`
|
||||
}
|
||||
|
||||
func NewChatHandler(r *gin.Engine, registry *core.PluginRegistry, db *fsdb.Db) *ChatHandler {
|
||||
@@ -98,7 +99,7 @@ func (h *ChatHandler) HandleChat(c *gin.Context) {
|
||||
log.Printf("Processing prompt %d: Model=%s Pattern=%s Context=%s",
|
||||
i+1, prompt.Model, prompt.PatternName, prompt.ContextName)
|
||||
|
||||
streamChan := make(chan string)
|
||||
streamChan := make(chan domain.StreamUpdate)
|
||||
|
||||
go func(p PromptRequest) {
|
||||
defer close(streamChan)
|
||||
@@ -117,10 +118,10 @@ func (h *ChatHandler) HandleChat(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
chatter, err := h.registry.GetChatter(p.Model, 2048, p.Vendor, "", false, false)
|
||||
chatter, err := h.registry.GetChatter(p.Model, 2048, p.Vendor, "", true, false)
|
||||
if err != nil {
|
||||
log.Printf("Error creating chatter: %v", err)
|
||||
streamChan <- fmt.Sprintf("Error: %v", err)
|
||||
streamChan <- domain.StreamUpdate{Type: domain.StreamTypeError, Content: fmt.Sprintf("Error: %v", err)}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -144,49 +145,46 @@ func (h *ChatHandler) HandleChat(c *gin.Context) {
|
||||
FrequencyPenalty: request.FrequencyPenalty,
|
||||
PresencePenalty: request.PresencePenalty,
|
||||
Thinking: request.Thinking,
|
||||
Search: request.Search,
|
||||
SearchLocation: request.SearchLocation,
|
||||
UpdateChan: streamChan,
|
||||
Quiet: true,
|
||||
}
|
||||
|
||||
session, err := chatter.Send(chatReq, opts)
|
||||
_, err = chatter.Send(chatReq, opts)
|
||||
if err != nil {
|
||||
log.Printf("Error from chatter.Send: %v", err)
|
||||
streamChan <- fmt.Sprintf("Error: %v", err)
|
||||
// Error already sent to streamChan via domain.StreamTypeError if occurred in Send loop
|
||||
return
|
||||
}
|
||||
|
||||
if session == nil {
|
||||
log.Printf("No session returned from chatter.Send")
|
||||
streamChan <- "Error: No response from model"
|
||||
return
|
||||
}
|
||||
|
||||
lastMsg := session.GetLastMessage()
|
||||
if lastMsg != nil {
|
||||
streamChan <- lastMsg.Content
|
||||
} else {
|
||||
log.Printf("No message content in session")
|
||||
streamChan <- "Error: No response content"
|
||||
}
|
||||
}(prompt)
|
||||
|
||||
for content := range streamChan {
|
||||
for update := range streamChan {
|
||||
select {
|
||||
case <-clientGone:
|
||||
return
|
||||
default:
|
||||
var response StreamResponse
|
||||
if strings.HasPrefix(content, "Error:") {
|
||||
switch update.Type {
|
||||
case domain.StreamTypeContent:
|
||||
response = StreamResponse{
|
||||
Type: "content",
|
||||
Format: detectFormat(update.Content),
|
||||
Content: update.Content,
|
||||
}
|
||||
case domain.StreamTypeUsage:
|
||||
response = StreamResponse{
|
||||
Type: "usage",
|
||||
Usage: update.Usage,
|
||||
}
|
||||
case domain.StreamTypeError:
|
||||
response = StreamResponse{
|
||||
Type: "error",
|
||||
Format: "plain",
|
||||
Content: content,
|
||||
}
|
||||
} else {
|
||||
response = StreamResponse{
|
||||
Type: "content",
|
||||
Format: detectFormat(content),
|
||||
Content: content,
|
||||
Content: update.Content,
|
||||
}
|
||||
}
|
||||
|
||||
if err := writeSSEResponse(c.Writer, response); err != nil {
|
||||
log.Printf("Error writing response: %v", err)
|
||||
return
|
||||
|
||||
@@ -1 +1 @@
|
||||
"1.4.366"
|
||||
"1.4.375"
|
||||
|
||||
Reference in New Issue
Block a user