mirror of
https://github.com/danielmiessler/Fabric.git
synced 2026-01-09 22:38:10 -05:00
Compare commits
28 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
da94411bf3 | ||
|
|
ab7b37be10 | ||
|
|
772337bf0d | ||
|
|
1e30c4e136 | ||
|
|
e12a40ad4f | ||
|
|
97beaecbeb | ||
|
|
7af6817bac | ||
|
|
50ecc32d85 | ||
|
|
ff1ef380a7 | ||
|
|
6a3a7e82d1 | ||
|
|
34bc0b5e31 | ||
|
|
ce59999503 | ||
|
|
9bb4ccf740 | ||
|
|
900b13f08c | ||
|
|
6824f0c0a7 | ||
|
|
a2481406db | ||
|
|
171f7eb3ab | ||
|
|
dccc70c433 | ||
|
|
e5ec9acfac | ||
|
|
f0eb9f90a3 | ||
|
|
758425f98a | ||
|
|
b4b5b0a4d9 | ||
|
|
81a47ecab7 | ||
|
|
0bce5c7b6e | ||
|
|
992936dbd8 | ||
|
|
48d74290f3 | ||
|
|
3d4e967b92 | ||
|
|
d8690c7cec |
6
.vscode/settings.json
vendored
6
.vscode/settings.json
vendored
@@ -75,6 +75,7 @@
|
||||
"jessevdk",
|
||||
"Jina",
|
||||
"joho",
|
||||
"kballard",
|
||||
"Keploy",
|
||||
"Kore",
|
||||
"ksylvan",
|
||||
@@ -98,6 +99,7 @@
|
||||
"mbed",
|
||||
"metacharacters",
|
||||
"Miessler",
|
||||
"mpga",
|
||||
"nometa",
|
||||
"numpy",
|
||||
"ollama",
|
||||
@@ -129,6 +131,7 @@
|
||||
"seaborn",
|
||||
"semgrep",
|
||||
"sess",
|
||||
"shellquote",
|
||||
"storer",
|
||||
"Streamlit",
|
||||
"stretchr",
|
||||
@@ -156,7 +159,8 @@
|
||||
"writeups",
|
||||
"xclip",
|
||||
"yourpatternname",
|
||||
"youtu"
|
||||
"youtu",
|
||||
"YTDLP"
|
||||
],
|
||||
"cSpell.ignorePaths": ["go.mod", ".gitignore", "CHANGELOG.md"],
|
||||
"markdownlint.config": {
|
||||
|
||||
93
CHANGELOG.md
93
CHANGELOG.md
@@ -1,5 +1,98 @@
|
||||
# Changelog
|
||||
|
||||
## v1.4.295 (2025-08-24)
|
||||
|
||||
### PR [#1727](https://github.com/danielmiessler/Fabric/pull/1727) by [ksylvan](https://github.com/ksylvan): Standardize Anthropic Beta Failure Logging
|
||||
|
||||
- Refactor: route Anthropic beta failure logs through internal debug logger
|
||||
- Replace fmt.Fprintf stderr with debuglog.Debug for beta failures
|
||||
- Import internal log package and remove os dependency
|
||||
- Standardize logging level to debuglog.Basic for beta errors
|
||||
- Preserve fallback stream behavior when beta features fail
|
||||
|
||||
## v1.4.294 (2025-08-20)
|
||||
|
||||
### PR [#1723](https://github.com/danielmiessler/Fabric/pull/1723) by [ksylvan](https://github.com/ksylvan): docs: update README with Venice AI provider and Windows install script
|
||||
|
||||
- Add Venice AI provider configuration with API endpoint
|
||||
- Document Venice AI as privacy-first open-source provider
|
||||
- Include PowerShell installation script for Windows users
|
||||
- Add debug levels section to table of contents
|
||||
- Update recent major features with v1.4.294 release notes
|
||||
|
||||
## v1.4.293 (2025-08-19)
|
||||
|
||||
### PR [#1718](https://github.com/danielmiessler/Fabric/pull/1718) by [ksylvan](https://github.com/ksylvan): Implement Configurable Debug Logging Levels
|
||||
|
||||
- Add --debug flag controlling runtime logging verbosity levels
|
||||
- Introduce internal/log package with Off, Basic, Detailed, Trace
|
||||
- Replace ad-hoc Debugf and globals with centralized debug logger
|
||||
- Wire debug level during early CLI argument parsing
|
||||
- Add bash, zsh, fish completions for --debug levels
|
||||
|
||||
## v1.4.292 (2025-08-18)
|
||||
|
||||
### PR [#1717](https://github.com/danielmiessler/Fabric/pull/1717) by [ksylvan](https://github.com/ksylvan): Highlight default vendor/model in model listing
|
||||
|
||||
- Update PrintWithVendor signature to accept default vendor and model
|
||||
- Mark default vendor/model with asterisk in non-shell output
|
||||
- Compare vendor and model case-insensitively when marking
|
||||
- Pass registry defaults to PrintWithVendor from CLI
|
||||
- Add test ensuring default selection appears with asterisk
|
||||
### Direct commits
|
||||
|
||||
- Docs: update version number in README updates section from v1.4.290 to v1.4.291
|
||||
|
||||
## v1.4.291 (2025-08-18)
|
||||
|
||||
### PR [#1715](https://github.com/danielmiessler/Fabric/pull/1715) by [ksylvan](https://github.com/ksylvan): feat: add speech-to-text via OpenAI with transcription flags and comp…
|
||||
|
||||
- Add --transcribe-file flag to transcribe audio or video
|
||||
- Add --transcribe-model flag with model listing and completion
|
||||
- Add --split-media-file flag to chunk files over 25MB
|
||||
- Implement OpenAI transcription using Whisper and GPT-4o Transcribe
|
||||
- Integrate transcription pipeline into CLI before readability processing
|
||||
|
||||
## v1.4.290 (2025-08-17)
|
||||
|
||||
### PR [#1714](https://github.com/danielmiessler/Fabric/pull/1714) by [ksylvan](https://github.com/ksylvan): feat: add per-pattern model mapping support via environment variables
|
||||
|
||||
- Add per-pattern model mapping support via environment variables
|
||||
- Implement environment variable lookup for pattern-specific models
|
||||
- Support vendor|model format in environment variable specification
|
||||
- Enable shell startup file configuration for patterns
|
||||
- Transform pattern names to uppercase environment variable format
|
||||
|
||||
## v1.4.289 (2025-08-16)
|
||||
|
||||
### PR [#1710](https://github.com/danielmiessler/Fabric/pull/1710) by [ksylvan](https://github.com/ksylvan): feat: add --no-variable-replacement flag to disable pattern variable …
|
||||
|
||||
- Add --no-variable-replacement flag to disable pattern variable substitution
|
||||
- Introduce CLI flag to skip pattern variable replacement and wire it into domain request and session builder
|
||||
- Provide PatternsEntity.GetWithoutVariables for input-only pattern processing support
|
||||
- Refactor patterns code into reusable load and apply helpers
|
||||
- Update bash, zsh, fish completions with new flag and document in README and CLI help output
|
||||
|
||||
## v1.4.288 (2025-08-16)
|
||||
|
||||
### PR [#1709](https://github.com/danielmiessler/Fabric/pull/1709) by [ksylvan](https://github.com/ksylvan): Enhanced YouTube Subtitle Language Fallback Handling
|
||||
|
||||
- Fix: improve YouTube subtitle language fallback handling in yt-dlp integration
|
||||
- Fix typo "Gemmini" to "Gemini" in README
|
||||
- Add "kballard" and "shellquote" to VSCode dictionary
|
||||
- Add "YTDLP" to VSCode spell checker
|
||||
- Enhance subtitle language options with fallback variants
|
||||
|
||||
## v1.4.287 (2025-08-14)
|
||||
|
||||
### PR [#1706](https://github.com/danielmiessler/Fabric/pull/1706) by [ksylvan](https://github.com/ksylvan): Gemini Thinking Support and README (New Features) automation
|
||||
|
||||
- Add comprehensive "Recent Major Features" section to README
|
||||
- Introduce new readme_updates Python script for automation
|
||||
- Enable Gemini thinking configuration with token budgets
|
||||
- Update CLI help text for Gemini thinking support
|
||||
- Add comprehensive test coverage for Gemini thinking
|
||||
|
||||
## v1.4.286 (2025-08-14)
|
||||
|
||||
### PR [#1700](https://github.com/danielmiessler/Fabric/pull/1700) by [ksylvan](https://github.com/ksylvan): Introduce Thinking Config Across Anthropic and OpenAI Providers
|
||||
|
||||
92
README.md
92
README.md
@@ -47,6 +47,54 @@ It's all really exciting and powerful, but _it's not easy to integrate this func
|
||||
|
||||
Fabric organizes prompts by real-world task, allowing people to create, collect, and organize their most important AI solutions in a single place for use in their favorite tools. And if you're command-line focused, you can use Fabric itself as the interface!
|
||||
|
||||
## Updates
|
||||
|
||||
Dear Users,
|
||||
|
||||
We've been doing so many exciting things here at Fabric, I wanted to give a quick summary here to give you a sense of our development velocity!
|
||||
|
||||
Below are the **new features and capabilities** we've added (newest first):
|
||||
|
||||
### Recent Major Features
|
||||
|
||||
- [v1.4.294](https://github.com/danielmiessler/fabric/releases/tag/v1.4.294) (Aug 20, 2025) — **Venice AI Support**: Added the Venice AI provider. Venice is a Privacy-First, Open-Source AI provider. See their ["About Venice"](https://docs.venice.ai/overview/about-venice) page for details.
|
||||
- [v1.4.291](https://github.com/danielmiessler/fabric/releases/tag/v1.4.291) (Aug 18, 2025) — **Speech To Text**: Add OpenAI speech-to-text support with `--transcribe-file`, `--transcribe-model`, and `--split-media-file` flags.
|
||||
- [v1.4.287](https://github.com/danielmiessler/fabric/releases/tag/v1.4.287) (Aug 16, 2025) — **AI Reasoning**: Add Thinking to Gemini models and introduce `readme_updates` python script
|
||||
- [v1.4.286](https://github.com/danielmiessler/fabric/releases/tag/v1.4.286) (Aug 14, 2025) — **AI Reasoning**: Introduce Thinking Config Across Anthropic and OpenAI Providers
|
||||
- [v1.4.285](https://github.com/danielmiessler/fabric/releases/tag/v1.4.285) (Aug 13, 2025) — **Extended Context**: Enable One Million Token Context Beta Feature for Sonnet-4
|
||||
- [v1.4.284](https://github.com/danielmiessler/fabric/releases/tag/v1.4.284) (Aug 12, 2025) — **Easy Shell Completions Setup**: Introduce One-Liner Curl Install for Completions
|
||||
- [v1.4.283](https://github.com/danielmiessler/fabric/releases/tag/v1.4.283) (Aug 12, 2025) — **Model Management**: Add Vendor Selection Support for Models
|
||||
- [v1.4.282](https://github.com/danielmiessler/fabric/releases/tag/v1.4.282) (Aug 11, 2025) — **Enhanced Shell Completions**: Enhanced Shell Completions for Fabric CLI Binaries
|
||||
- [v1.4.281](https://github.com/danielmiessler/fabric/releases/tag/v1.4.281) (Aug 11, 2025) — **Gemini Search Tool**: Add Web Search Tool Support for Gemini Models
|
||||
- [v1.4.278](https://github.com/danielmiessler/fabric/releases/tag/v1.4.278) (Aug 9, 2025) — **Enhance YouTube Transcripts**: Enhance YouTube Support with Custom yt-dlp Arguments
|
||||
- [v1.4.277](https://github.com/danielmiessler/fabric/releases/tag/v1.4.277) (Aug 8, 2025) — **Desktop Notifications**: Add cross-platform desktop notifications to Fabric CLI
|
||||
- [v1.4.274](https://github.com/danielmiessler/fabric/releases/tag/v1.4.274) (Aug 7, 2025) — **Claude 4.1 Added**: Add Support for Claude Opus 4.1 Model
|
||||
- [v1.4.271](https://github.com/danielmiessler/fabric/releases/tag/v1.4.271) (Jul 28, 2025) — **AI Summarized Release Notes**: Enable AI summary updates for GitHub releases
|
||||
- [v1.4.268](https://github.com/danielmiessler/fabric/releases/tag/v1.4.268) (Jul 26, 2025) — **Gemini TTS Voice Selection**: add Gemini TTS voice selection and listing functionality
|
||||
- [v1.4.267](https://github.com/danielmiessler/fabric/releases/tag/v1.4.267) (Jul 26, 2025) — **Text-to-Speech**: Update Gemini Plugin to New SDK with TTS Support
|
||||
- [v1.4.258](https://github.com/danielmiessler/fabric/releases/tag/v1.4.258) (Jul 17, 2025) — **Onboarding Improved**: Add startup check to initialize config and .env file automatically
|
||||
- [v1.4.257](https://github.com/danielmiessler/fabric/releases/tag/v1.4.257) (Jul 17, 2025) — **OpenAI Routing Control**: Introduce CLI Flag to Disable OpenAI Responses API
|
||||
- [v1.4.252](https://github.com/danielmiessler/fabric/releases/tag/v1.4.252) (Jul 16, 2025) — **Hide Thinking Block**: Optional Hiding of Model Thinking Process with Configurable Tags
|
||||
- [v1.4.246](https://github.com/danielmiessler/fabric/releases/tag/v1.4.246) (Jul 14, 2025) — **Automatic ChangeLog Updates**: Add AI-powered changelog generation with high-performance Go tool and comprehensive caching
|
||||
- [v1.4.245](https://github.com/danielmiessler/fabric/releases/tag/v1.4.245) (Jul 11, 2025) — **Together AI**: Together AI Support with OpenAI Fallback Mechanism Added
|
||||
- [v1.4.232](https://github.com/danielmiessler/fabric/releases/tag/v1.4.232) (Jul 6, 2025) — **Add Custom**: Add Custom Patterns Directory Support
|
||||
- [v1.4.231](https://github.com/danielmiessler/fabric/releases/tag/v1.4.231) (Jul 5, 2025) — **OAuth Auto-Auth**: OAuth Authentication Support for Anthropic (Use your Max Subscription)
|
||||
- [v1.4.230](https://github.com/danielmiessler/fabric/releases/tag/v1.4.230) (Jul 5, 2025) — **Model Management**: Add advanced image generation parameters for OpenAI models with four new CLI flags
|
||||
- [v1.4.227](https://github.com/danielmiessler/fabric/releases/tag/v1.4.227) (Jul 4, 2025) — **Add Image**: Add Image Generation Support to Fabric
|
||||
- [v1.4.226](https://github.com/danielmiessler/fabric/releases/tag/v1.4.226) (Jul 4, 2025) — **Web Search**: OpenAI Plugin Now Supports Web Search Functionality
|
||||
- [v1.4.225](https://github.com/danielmiessler/fabric/releases/tag/v1.4.225) (Jul 4, 2025) — **Web Search**: Runtime Web Search Control via Command-Line `--search` Flag
|
||||
- [v1.4.224](https://github.com/danielmiessler/fabric/releases/tag/v1.4.224) (Jul 1, 2025) — **Add code_review**: Add code_review pattern and updates in Pattern_Descriptions
|
||||
- [v1.4.222](https://github.com/danielmiessler/fabric/releases/tag/v1.4.222) (Jul 1, 2025) — **OpenAI Plugin**: OpenAI Plugin Migrates to New Responses API
|
||||
- [v1.4.218](https://github.com/danielmiessler/fabric/releases/tag/v1.4.218) (Jun 27, 2025) — **Model Management**: Add Support for OpenAI Search and Research Model Variants
|
||||
- [v1.4.217](https://github.com/danielmiessler/fabric/releases/tag/v1.4.217) (Jun 26, 2025) — **New YouTube**: New YouTube Transcript Endpoint Added to REST API
|
||||
- [v1.4.212](https://github.com/danielmiessler/fabric/releases/tag/v1.4.212) (Jun 23, 2025) — **Add Langdock**: Add Langdock AI and enhance generic OpenAI compatible support
|
||||
- [v1.4.211](https://github.com/danielmiessler/fabric/releases/tag/v1.4.211) (Jun 19, 2025) — **REST API**: REST API and Web UI Now Support Dynamic Pattern Variables
|
||||
- [v1.4.210](https://github.com/danielmiessler/fabric/releases/tag/v1.4.210) (Jun 18, 2025) — **Add Citations**: Add Citation Support to Perplexity Response
|
||||
- [v1.4.208](https://github.com/danielmiessler/fabric/releases/tag/v1.4.208) (Jun 17, 2025) — **Add Perplexity**: Add Perplexity AI Provider with Token Limits Support
|
||||
- [v1.4.203](https://github.com/danielmiessler/fabric/releases/tag/v1.4.203) (Jun 14, 2025) — **Add Amazon Bedrock**: Add support for Amazon Bedrock
|
||||
|
||||
These features represent our commitment to making Fabric the most powerful and flexible AI augmentation framework available!
|
||||
|
||||
## Intro videos
|
||||
|
||||
Keep in mind that many of these were recorded when Fabric was Python-based, so remember to use the current [install instructions](#installation) below.
|
||||
@@ -60,9 +108,11 @@ Keep in mind that many of these were recorded when Fabric was Python-based, so r
|
||||
|
||||
- [`fabric`](#fabric)
|
||||
- [What and why](#what-and-why)
|
||||
- [Updates](#updates)
|
||||
- [Recent Major Features](#recent-major-features)
|
||||
- [Intro videos](#intro-videos)
|
||||
- [Navigation](#navigation)
|
||||
- [Updates](#updates)
|
||||
- [Changelog](#changelog)
|
||||
- [Philosophy](#philosophy)
|
||||
- [Breaking problems into components](#breaking-problems-into-components)
|
||||
- [Too many prompts](#too-many-prompts)
|
||||
@@ -79,6 +129,7 @@ Keep in mind that many of these were recorded when Fabric was Python-based, so r
|
||||
- [From Source](#from-source)
|
||||
- [Environment Variables](#environment-variables)
|
||||
- [Setup](#setup)
|
||||
- [Per-Pattern Model Mapping](#per-pattern-model-mapping)
|
||||
- [Add aliases for all patterns](#add-aliases-for-all-patterns)
|
||||
- [Save your files in markdown using aliases](#save-your-files-in-markdown-using-aliases)
|
||||
- [Migration](#migration)
|
||||
@@ -89,6 +140,7 @@ Keep in mind that many of these were recorded when Fabric was Python-based, so r
|
||||
- [Bash Completion](#bash-completion)
|
||||
- [Fish Completion](#fish-completion)
|
||||
- [Usage](#usage)
|
||||
- [Debug Levels](#debug-levels)
|
||||
- [Our approach to prompting](#our-approach-to-prompting)
|
||||
- [Examples](#examples)
|
||||
- [Just use the Patterns](#just-use-the-patterns)
|
||||
@@ -112,7 +164,7 @@ Keep in mind that many of these were recorded when Fabric was Python-based, so r
|
||||
|
||||
<br />
|
||||
|
||||
## Updates
|
||||
## Changelog
|
||||
|
||||
Fabric is evolving rapidly.
|
||||
|
||||
@@ -159,6 +211,17 @@ To install Fabric, you can use the latest release binaries or install it from th
|
||||
|
||||
`https://github.com/danielmiessler/fabric/releases/latest/download/fabric-windows-amd64.exe`
|
||||
|
||||
Or via PowerShell, just copy and paste and run the following snippet to install the binary into `{HOME}\.local\bin`. Please make sure that directory is included in your `PATH`.
|
||||
|
||||
```powershell
|
||||
$ErrorActionPreference = "Stop"
|
||||
$LATEST="https://github.com/danielmiessler/fabric/releases/latest/download/fabric-windows-amd64.exe"
|
||||
$DIR="${HOME}\.local\bin"
|
||||
New-Item -Path $DIR -ItemType Directory -Force
|
||||
Invoke-WebRequest -URI "${LATEST}" -outfile "${DIR}\fabric.exe"
|
||||
& "${DIR}\fabric.exe" /version
|
||||
```
|
||||
|
||||
#### macOS (arm64)
|
||||
|
||||
`curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-darwin-arm64 > fabric && chmod +x fabric && ./fabric --version`
|
||||
@@ -236,6 +299,13 @@ fabric --setup
|
||||
|
||||
If everything works you are good to go.
|
||||
|
||||
### Per-Pattern Model Mapping
|
||||
|
||||
You can configure specific models for individual patterns using environment variables
|
||||
like `FABRIC_MODEL_PATTERN_NAME=vendor|model`
|
||||
|
||||
This makes it easy to maintain these per-pattern model mappings in your shell startup files.
|
||||
|
||||
### Add aliases for all patterns
|
||||
|
||||
In order to add aliases for all your patterns and use them directly as commands ie. `summarize` instead of `fabric --pattern summarize`
|
||||
@@ -543,6 +613,7 @@ Application Options:
|
||||
--printsession= Print session
|
||||
--readability Convert HTML input into a clean, readable view
|
||||
--input-has-vars Apply variables to user input
|
||||
--no-variable-replacement Disable pattern variable replacement
|
||||
--dry-run Show what would be sent to the model without actually sending it
|
||||
--serve Serve the Fabric Rest API
|
||||
--serveOllama Serve the Fabric Rest API with ollama endpoints
|
||||
@@ -576,13 +647,22 @@ Application Options:
|
||||
--notification-command= Custom command to run for notifications (overrides built-in
|
||||
notifications)
|
||||
--yt-dlp-args= Additional arguments to pass to yt-dlp (e.g. '--cookies-from-browser brave')
|
||||
--thinking= Set reasoning/thinking level (e.g., off, low, medium,
|
||||
high, or numeric tokens for Anthropic)
|
||||
|
||||
--thinking= Set reasoning/thinking level (e.g., off, low, medium, high, or
|
||||
numeric tokens for Anthropic or Google Gemini)
|
||||
--debug= Set debug level (0: off, 1: basic, 2: detailed, 3: trace)
|
||||
Help Options:
|
||||
-h, --help Show this help message
|
||||
```
|
||||
|
||||
### Debug Levels
|
||||
|
||||
Use the `--debug` flag to control runtime logging:
|
||||
|
||||
- `0`: off (default)
|
||||
- `1`: basic debug info
|
||||
- `2`: detailed debugging
|
||||
- `3`: trace level
|
||||
|
||||
## Our approach to prompting
|
||||
|
||||
Fabric _Patterns_ are different than most prompts you'll see.
|
||||
@@ -592,7 +672,7 @@ Fabric _Patterns_ are different than most prompts you'll see.
|
||||
Here's an example of a Fabric Pattern.
|
||||
|
||||
```bash
|
||||
https://github.com/danielmiessler/fabric/blob/main/patterns/extract_wisdom/system.md
|
||||
https://github.com/danielmiessler/Fabric/blob/main/data/patterns/extract_wisdom/system.md
|
||||
```
|
||||
|
||||
<img width="1461" alt="pattern-example" src="https://github.com/danielmiessler/fabric/assets/50654/b910c551-9263-405f-9735-71ca69bbab6d">
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
package main
|
||||
|
||||
var version = "v1.4.286"
|
||||
var version = "v1.4.295"
|
||||
|
||||
Binary file not shown.
@@ -59,6 +59,13 @@ _fabric_gemini_voices() {
|
||||
compadd -X "Gemini TTS Voices:" ${voices}
|
||||
}
|
||||
|
||||
_fabric_transcription_models() {
|
||||
local -a models
|
||||
local cmd=${words[1]}
|
||||
models=(${(f)"$($cmd --list-transcription-models --shell-complete-list 2>/dev/null)"})
|
||||
compadd -X "Transcription Models:" ${models}
|
||||
}
|
||||
|
||||
_fabric() {
|
||||
local curcontext="$curcontext" state line
|
||||
typeset -A opt_args
|
||||
@@ -107,6 +114,7 @@ _fabric() {
|
||||
'(--printsession)--printsession[Print session]:session:_fabric_sessions' \
|
||||
'(--readability)--readability[Convert HTML input into a clean, readable view]' \
|
||||
'(--input-has-vars)--input-has-vars[Apply variables to user input]' \
|
||||
'(--no-variable-replacement)--no-variable-replacement[Disable pattern variable replacement]' \
|
||||
'(--dry-run)--dry-run[Show what would be sent to the model without actually sending it]' \
|
||||
'(--serve)--serve[Serve the Fabric Rest API]' \
|
||||
'(--serveOllama)--serveOllama[Serve the Fabric Rest API with ollama endpoints]' \
|
||||
@@ -134,6 +142,10 @@ _fabric() {
|
||||
'(--think-start-tag)--think-start-tag[Start tag for thinking sections (default: <think>)]:start tag:' \
|
||||
'(--think-end-tag)--think-end-tag[End tag for thinking sections (default: </think>)]:end tag:' \
|
||||
'(--disable-responses-api)--disable-responses-api[Disable OpenAI Responses API (default: false)]' \
|
||||
'(--transcribe-file)--transcribe-file[Audio or video file to transcribe]:audio file:_files -g "*.mp3 *.mp4 *.mpeg *.mpga *.m4a *.wav *.webm"' \
|
||||
'(--transcribe-model)--transcribe-model[Model to use for transcription (separate from chat model)]:transcribe model:_fabric_transcription_models' \
|
||||
'(--split-media-file)--split-media-file[Split audio/video files larger than 25MB using ffmpeg]' \
|
||||
'(--debug)--debug[Set debug level (0=off, 1=basic, 2=detailed, 3=trace)]:debug level:(0 1 2 3)' \
|
||||
'(--notification)--notification[Send desktop notification when command completes]' \
|
||||
'(--notification-command)--notification-command[Custom command to run for notifications]:notification command:' \
|
||||
'(-h --help)'{-h,--help}'[Show this help message]' \
|
||||
|
||||
@@ -13,7 +13,7 @@ _fabric() {
|
||||
_get_comp_words_by_ref -n : cur prev words cword
|
||||
|
||||
# Define all possible options/flags
|
||||
local opts="--pattern -p --variable -v --context -C --session --attachment -a --setup -S --temperature -t --topp -T --stream -s --presencepenalty -P --raw -r --frequencypenalty -F --listpatterns -l --listmodels -L --listcontexts -x --listsessions -X --updatepatterns -U --copy -c --model -m --vendor -V --modelContextLength --output -o --output-session --latest -n --changeDefaultModel -d --youtube -y --playlist --transcript --transcript-with-timestamps --comments --metadata --yt-dlp-args --language -g --scrape_url -u --scrape_question -q --seed -e --thinking --wipecontext -w --wipesession -W --printcontext --printsession --readability --input-has-vars --dry-run --serve --serveOllama --address --api-key --config --search --search-location --image-file --image-size --image-quality --image-compression --image-background --suppress-think --think-start-tag --think-end-tag --disable-responses-api --voice --list-gemini-voices --notification --notification-command --version --listextensions --addextension --rmextension --strategy --liststrategies --listvendors --shell-complete-list --help -h"
|
||||
local opts="--pattern -p --variable -v --context -C --session --attachment -a --setup -S --temperature -t --topp -T --stream -s --presencepenalty -P --raw -r --frequencypenalty -F --listpatterns -l --listmodels -L --listcontexts -x --listsessions -X --updatepatterns -U --copy -c --model -m --vendor -V --modelContextLength --output -o --output-session --latest -n --changeDefaultModel -d --youtube -y --playlist --transcript --transcript-with-timestamps --comments --metadata --yt-dlp-args --language -g --scrape_url -u --scrape_question -q --seed -e --thinking --wipecontext -w --wipesession -W --printcontext --printsession --readability --input-has-vars --no-variable-replacement --dry-run --serve --serveOllama --address --api-key --config --search --search-location --image-file --image-size --image-quality --image-compression --image-background --suppress-think --think-start-tag --think-end-tag --disable-responses-api --transcribe-file --transcribe-model --split-media-file --voice --list-gemini-voices --notification --notification-command --debug --version --listextensions --addextension --rmextension --strategy --liststrategies --listvendors --shell-complete-list --help -h"
|
||||
|
||||
# Helper function for dynamic completions
|
||||
_fabric_get_list() {
|
||||
@@ -74,8 +74,16 @@ _fabric() {
|
||||
COMPREPLY=($(compgen -W "$(_fabric_get_list --list-gemini-voices)" -- "${cur}"))
|
||||
return 0
|
||||
;;
|
||||
--transcribe-model)
|
||||
COMPREPLY=($(compgen -W "$(_fabric_get_list --list-transcription-models)" -- "${cur}"))
|
||||
return 0
|
||||
;;
|
||||
--debug)
|
||||
COMPREPLY=($(compgen -W "0 1 2 3" -- "${cur}"))
|
||||
return 0
|
||||
;;
|
||||
# Options requiring file/directory paths
|
||||
-a | --attachment | -o | --output | --config | --addextension | --image-file)
|
||||
-a | --attachment | -o | --output | --config | --addextension | --image-file | --transcribe-file)
|
||||
_filedir
|
||||
return 0
|
||||
;;
|
||||
|
||||
@@ -47,6 +47,11 @@ function __fabric_get_gemini_voices
|
||||
$cmd --list-gemini-voices --shell-complete-list 2>/dev/null
|
||||
end
|
||||
|
||||
function __fabric_get_transcription_models
|
||||
set cmd (commandline -opc)[1]
|
||||
$cmd --list-transcription-models --shell-complete-list 2>/dev/null
|
||||
end
|
||||
|
||||
# Main completion function
|
||||
function __fabric_register_completions
|
||||
set cmd $argv[1]
|
||||
@@ -92,6 +97,9 @@ function __fabric_register_completions
|
||||
complete -c $cmd -l think-start-tag -d "Start tag for thinking sections (default: <think>)"
|
||||
complete -c $cmd -l think-end-tag -d "End tag for thinking sections (default: </think>)"
|
||||
complete -c $cmd -l voice -d "TTS voice name for supported models (e.g., Kore, Charon, Puck)" -a "(__fabric_get_gemini_voices)"
|
||||
complete -c $cmd -l transcribe-file -d "Audio or video file to transcribe" -r -a "*.mp3 *.mp4 *.mpeg *.mpga *.m4a *.wav *.webm"
|
||||
complete -c $cmd -l transcribe-model -d "Model to use for transcription (separate from chat model)" -a "(__fabric_get_transcription_models)"
|
||||
complete -c $cmd -l debug -d "Set debug level (0=off, 1=basic, 2=detailed, 3=trace)" -a "0 1 2 3"
|
||||
complete -c $cmd -l notification-command -d "Custom command to run for notifications (overrides built-in notifications)"
|
||||
|
||||
# Boolean flags (no arguments)
|
||||
@@ -113,8 +121,9 @@ function __fabric_register_completions
|
||||
complete -c $cmd -l metadata -d "Output video metadata"
|
||||
complete -c $cmd -l yt-dlp-args -d "Additional arguments to pass to yt-dlp (e.g. '--cookies-from-browser brave')"
|
||||
complete -c $cmd -l readability -d "Convert HTML input into a clean, readable view"
|
||||
complete -c $cmd -l input-has-vars -d "Apply variables to user input"
|
||||
complete -c $cmd -l dry-run -d "Show what would be sent to the model without actually sending it"
|
||||
complete -c $cmd -l input-has-vars -d "Apply variables to user input"
|
||||
complete -c $cmd -l no-variable-replacement -d "Disable pattern variable replacement"
|
||||
complete -c $cmd -l dry-run -d "Show what would be sent to the model without actually sending it"
|
||||
complete -c $cmd -l search -d "Enable web search tool for supported models (Anthropic, OpenAI, Gemini)"
|
||||
complete -c $cmd -l serve -d "Serve the Fabric Rest API"
|
||||
complete -c $cmd -l serveOllama -d "Serve the Fabric Rest API with ollama endpoints"
|
||||
@@ -126,6 +135,7 @@ function __fabric_register_completions
|
||||
complete -c $cmd -l shell-complete-list -d "Output raw list without headers/formatting (for shell completion)"
|
||||
complete -c $cmd -l suppress-think -d "Suppress text enclosed in thinking tags"
|
||||
complete -c $cmd -l disable-responses-api -d "Disable OpenAI Responses API (default: false)"
|
||||
complete -c $cmd -l split-media-file -d "Split audio/video files larger than 25MB using ffmpeg"
|
||||
complete -c $cmd -l notification -d "Send desktop notification when command completes"
|
||||
complete -c $cmd -s h -l help -d "Show this help message"
|
||||
end
|
||||
|
||||
139
docs/Using-Speech-To-Text.md
Normal file
139
docs/Using-Speech-To-Text.md
Normal file
@@ -0,0 +1,139 @@
|
||||
# Using Speech-To-Text (STT) with Fabric
|
||||
|
||||
Fabric supports speech-to-text transcription of audio and video files using OpenAI's transcription models. This feature allows you to convert spoken content into text that can then be processed through Fabric's patterns.
|
||||
|
||||
## Overview
|
||||
|
||||
The STT feature integrates OpenAI's Whisper and GPT-4o transcription models to convert audio/video files into text. The transcribed text is automatically passed as input to your chosen pattern or chat session.
|
||||
|
||||
## Requirements
|
||||
|
||||
- OpenAI API key configured in Fabric
|
||||
- For files larger than 25MB: `ffmpeg` installed on your system
|
||||
- Supported audio/video formats: `.mp3`, `.mp4`, `.mpeg`, `.mpga`, `.m4a`, `.wav`, `.webm`
|
||||
|
||||
## Basic Usage
|
||||
|
||||
### Simple Transcription
|
||||
|
||||
To transcribe an audio file and send the result to a pattern:
|
||||
|
||||
```bash
|
||||
fabric --transcribe-file /path/to/audio.mp3 --transcribe-model whisper-1 --pattern summarize
|
||||
```
|
||||
|
||||
### Transcription Only
|
||||
|
||||
To just transcribe a file without applying a pattern:
|
||||
|
||||
```bash
|
||||
fabric --transcribe-file /path/to/audio.mp3 --transcribe-model whisper-1
|
||||
```
|
||||
|
||||
## Command Line Flags
|
||||
|
||||
### Required Flags
|
||||
|
||||
- `--transcribe-file`: Path to the audio or video file to transcribe
|
||||
- `--transcribe-model`: Model to use for transcription (required when using transcription)
|
||||
|
||||
### Optional Flags
|
||||
|
||||
- `--split-media-file`: Automatically split files larger than 25MB into chunks using ffmpeg
|
||||
|
||||
## Available Models
|
||||
|
||||
You can list all available transcription models with:
|
||||
|
||||
```bash
|
||||
fabric --list-transcription-models
|
||||
```
|
||||
|
||||
Currently supported models:
|
||||
|
||||
- `whisper-1`: OpenAI's Whisper model
|
||||
- `gpt-4o-mini-transcribe`: GPT-4o Mini transcription model
|
||||
- `gpt-4o-transcribe`: GPT-4o transcription model
|
||||
|
||||
## File Size Handling
|
||||
|
||||
### Files Under 25MB
|
||||
|
||||
Files under the 25MB limit are processed directly without any special handling.
|
||||
|
||||
### Files Over 25MB
|
||||
|
||||
For files exceeding OpenAI's 25MB limit, you have two options:
|
||||
|
||||
1. **Manual handling**: The command will fail with an error message suggesting to use `--split-media-file`
|
||||
2. **Automatic splitting**: Use the `--split-media-file` flag to automatically split the file into chunks
|
||||
|
||||
```bash
|
||||
fabric --transcribe-file large_recording.mp4 --transcribe-model whisper-1 --split-media-file --pattern summarize
|
||||
```
|
||||
|
||||
When splitting is enabled:
|
||||
|
||||
- Fabric uses `ffmpeg` to split the file into 10-minute segments initially
|
||||
- If segments are still too large, it reduces the segment time by half repeatedly
|
||||
- All segments are transcribed and the results are concatenated
|
||||
- Temporary files are automatically cleaned up after processing
|
||||
|
||||
## Integration with Patterns
|
||||
|
||||
The transcribed text is seamlessly integrated into Fabric's workflow:
|
||||
|
||||
1. File is transcribed using the specified model
|
||||
2. Transcribed text becomes the input message
|
||||
3. Text is sent to the specified pattern or chat session
|
||||
|
||||
### Example Workflows
|
||||
|
||||
**Meeting transcription and summarization:**
|
||||
|
||||
```bash
|
||||
fabric --transcribe-file meeting.mp4 --transcribe-model gpt-4o-transcribe --pattern summarize
|
||||
```
|
||||
|
||||
**Interview analysis:**
|
||||
|
||||
```bash
|
||||
fabric --transcribe-file interview.mp3 --transcribe-model whisper-1 --pattern extract_insights
|
||||
```
|
||||
|
||||
**Large video file processing:**
|
||||
|
||||
```bash
|
||||
fabric --transcribe-file presentation.mp4 --transcribe-model gpt-4o-transcribe --split-media-file --pattern create_summary
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
Common error scenarios:
|
||||
|
||||
- **Unsupported format**: Only the listed audio/video formats are supported
|
||||
- **File too large**: Use `--split-media-file` for files over 25MB
|
||||
- **Missing ffmpeg**: Install ffmpeg for automatic file splitting
|
||||
- **Invalid model**: Use `--list-transcription-models` to see available models
|
||||
- **Missing model**: The `--transcribe-model` flag is required when using `--transcribe-file`
|
||||
|
||||
## Technical Details
|
||||
|
||||
### Implementation
|
||||
|
||||
- Transcription is handled in `internal/cli/transcribe.go:14`
|
||||
- OpenAI-specific implementation in `internal/plugins/ai/openai/openai_audio.go:41`
|
||||
- File splitting uses ffmpeg with configurable segment duration
|
||||
- Supports any vendor that implements the `transcriber` interface
|
||||
|
||||
### Processing Pipeline
|
||||
|
||||
1. CLI validates file format and size
|
||||
2. If file > 25MB and splitting enabled, file is split using ffmpeg
|
||||
3. Each file/segment is sent to OpenAI's transcription API
|
||||
4. Results are concatenated with spaces between segments
|
||||
5. Transcribed text is passed as input to the main Fabric pipeline
|
||||
|
||||
### Vendor Support
|
||||
|
||||
Currently, only OpenAI is supported for transcription, but the interface allows for future expansion to other vendors that provide transcription capabilities.
|
||||
@@ -18,6 +18,19 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
if messageTools != "" {
|
||||
currentFlags.AppendMessage(messageTools)
|
||||
}
|
||||
// Check for pattern-specific model via environment variable
|
||||
if currentFlags.Pattern != "" && currentFlags.Model == "" {
|
||||
envVar := "FABRIC_MODEL_" + strings.ToUpper(strings.ReplaceAll(currentFlags.Pattern, "-", "_"))
|
||||
if modelSpec := os.Getenv(envVar); modelSpec != "" {
|
||||
parts := strings.SplitN(modelSpec, "|", 2)
|
||||
if len(parts) == 2 {
|
||||
currentFlags.Vendor = parts[0]
|
||||
currentFlags.Model = parts[1]
|
||||
} else {
|
||||
currentFlags.Model = modelSpec
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
var chatter *core.Chatter
|
||||
if chatter, err = registry.GetChatter(currentFlags.Model, currentFlags.ModelContextLength,
|
||||
|
||||
@@ -74,6 +74,15 @@ func Cli(version string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// Handle transcription if specified
|
||||
if currentFlags.TranscribeFile != "" {
|
||||
var transcriptionMessage string
|
||||
if transcriptionMessage, err = handleTranscription(currentFlags, registry); err != nil {
|
||||
return
|
||||
}
|
||||
currentFlags.Message = AppendMessage(currentFlags.Message, transcriptionMessage)
|
||||
}
|
||||
|
||||
// Process HTML readability if needed
|
||||
if currentFlags.HtmlReadability {
|
||||
if msg, cleanErr := converter.HtmlReadability(currentFlags.Message); cleanErr != nil {
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/chat"
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/util"
|
||||
"github.com/jessevdk/go-flags"
|
||||
"golang.org/x/text/language"
|
||||
@@ -66,6 +67,7 @@ type Flags struct {
|
||||
PrintSession string `long:"printsession" description:"Print session"`
|
||||
HtmlReadability bool `long:"readability" description:"Convert HTML input into a clean, readable view"`
|
||||
InputHasVars bool `long:"input-has-vars" description:"Apply variables to user input"`
|
||||
NoVariableReplacement bool `long:"no-variable-replacement" description:"Disable pattern variable replacement"`
|
||||
DryRun bool `long:"dry-run" description:"Show what would be sent to the model without actually sending it"`
|
||||
Serve bool `long:"serve" description:"Serve the Fabric Rest API"`
|
||||
ServeOllama bool `long:"serveOllama" description:"Serve the Fabric Rest API with ollama endpoints"`
|
||||
@@ -91,23 +93,21 @@ type Flags struct {
|
||||
ThinkStartTag string `long:"think-start-tag" yaml:"thinkStartTag" description:"Start tag for thinking sections" default:"<think>"`
|
||||
ThinkEndTag string `long:"think-end-tag" yaml:"thinkEndTag" description:"End tag for thinking sections" default:"</think>"`
|
||||
DisableResponsesAPI bool `long:"disable-responses-api" yaml:"disableResponsesAPI" description:"Disable OpenAI Responses API (default: false)"`
|
||||
TranscribeFile string `long:"transcribe-file" yaml:"transcribeFile" description:"Audio or video file to transcribe"`
|
||||
TranscribeModel string `long:"transcribe-model" yaml:"transcribeModel" description:"Model to use for transcription (separate from chat model)"`
|
||||
SplitMediaFile bool `long:"split-media-file" yaml:"splitMediaFile" description:"Split audio/video files larger than 25MB using ffmpeg"`
|
||||
Voice string `long:"voice" yaml:"voice" description:"TTS voice name for supported models (e.g., Kore, Charon, Puck)" default:"Kore"`
|
||||
ListGeminiVoices bool `long:"list-gemini-voices" description:"List all available Gemini TTS voices"`
|
||||
ListTranscriptionModels bool `long:"list-transcription-models" description:"List all available transcription models"`
|
||||
Notification bool `long:"notification" yaml:"notification" description:"Send desktop notification when command completes"`
|
||||
NotificationCommand string `long:"notification-command" yaml:"notificationCommand" description:"Custom command to run for notifications (overrides built-in notifications)"`
|
||||
Thinking domain.ThinkingLevel `long:"thinking" yaml:"thinking" description:"Set reasoning/thinking level (e.g., off, low, medium, high, or numeric tokens for Anthropic)"`
|
||||
}
|
||||
|
||||
var debug = false
|
||||
|
||||
func Debugf(format string, a ...interface{}) {
|
||||
if debug {
|
||||
fmt.Printf("DEBUG: "+format, a...)
|
||||
}
|
||||
Thinking domain.ThinkingLevel `long:"thinking" yaml:"thinking" description:"Set reasoning/thinking level (e.g., off, low, medium, high, or numeric tokens for Anthropic or Google Gemini)"`
|
||||
Debug int `long:"debug" description:"Set debug level (0=off, 1=basic, 2=detailed, 3=trace)" default:"0"`
|
||||
}
|
||||
|
||||
// Init Initialize flags. returns a Flags struct and an error
|
||||
func Init() (ret *Flags, err error) {
|
||||
debuglog.SetLevel(debuglog.LevelFromInt(parseDebugLevel(os.Args[1:])))
|
||||
// Track which yaml-configured flags were set on CLI
|
||||
usedFlags := make(map[string]bool)
|
||||
yamlArgsScan := os.Args[1:]
|
||||
@@ -123,11 +123,11 @@ func Init() (ret *Flags, err error) {
|
||||
shortTag := field.Tag.Get("short")
|
||||
if longTag != "" {
|
||||
flagToYamlTag[longTag] = yamlTag
|
||||
Debugf("Mapped long flag %s to yaml tag %s\n", longTag, yamlTag)
|
||||
debuglog.Debug(debuglog.Detailed, "Mapped long flag %s to yaml tag %s\n", longTag, yamlTag)
|
||||
}
|
||||
if shortTag != "" {
|
||||
flagToYamlTag[shortTag] = yamlTag
|
||||
Debugf("Mapped short flag %s to yaml tag %s\n", shortTag, yamlTag)
|
||||
debuglog.Debug(debuglog.Detailed, "Mapped short flag %s to yaml tag %s\n", shortTag, yamlTag)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -139,7 +139,7 @@ func Init() (ret *Flags, err error) {
|
||||
if flag != "" {
|
||||
if yamlTag, exists := flagToYamlTag[flag]; exists {
|
||||
usedFlags[yamlTag] = true
|
||||
Debugf("CLI flag used: %s (yaml: %s)\n", flag, yamlTag)
|
||||
debuglog.Debug(debuglog.Detailed, "CLI flag used: %s (yaml: %s)\n", flag, yamlTag)
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -151,6 +151,7 @@ func Init() (ret *Flags, err error) {
|
||||
if args, err = parser.Parse(); err != nil {
|
||||
return
|
||||
}
|
||||
debuglog.SetLevel(debuglog.LevelFromInt(ret.Debug))
|
||||
|
||||
// Check to see if a ~/.config/fabric/config.yaml config file exists (only when user didn't specify a config)
|
||||
if ret.Config == "" {
|
||||
@@ -158,7 +159,7 @@ func Init() (ret *Flags, err error) {
|
||||
if defaultConfigPath, err := util.GetDefaultConfigPath(); err == nil && defaultConfigPath != "" {
|
||||
ret.Config = defaultConfigPath
|
||||
} else if err != nil {
|
||||
Debugf("Could not determine default config path: %v\n", err)
|
||||
debuglog.Debug(debuglog.Detailed, "Could not determine default config path: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -183,13 +184,13 @@ func Init() (ret *Flags, err error) {
|
||||
if flagField.CanSet() {
|
||||
if yamlField.Type() != flagField.Type() {
|
||||
if err := assignWithConversion(flagField, yamlField); err != nil {
|
||||
Debugf("Type conversion failed for %s: %v\n", yamlTag, err)
|
||||
debuglog.Debug(debuglog.Detailed, "Type conversion failed for %s: %v\n", yamlTag, err)
|
||||
continue
|
||||
}
|
||||
} else {
|
||||
flagField.Set(yamlField)
|
||||
}
|
||||
Debugf("Applied YAML value for %s: %v\n", yamlTag, yamlField.Interface())
|
||||
debuglog.Debug(debuglog.Detailed, "Applied YAML value for %s: %v\n", yamlTag, yamlField.Interface())
|
||||
}
|
||||
}
|
||||
}
|
||||
@@ -215,6 +216,22 @@ func Init() (ret *Flags, err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func parseDebugLevel(args []string) int {
|
||||
for i := 0; i < len(args); i++ {
|
||||
arg := args[i]
|
||||
if arg == "--debug" && i+1 < len(args) {
|
||||
if lvl, err := strconv.Atoi(args[i+1]); err == nil {
|
||||
return lvl
|
||||
}
|
||||
} else if strings.HasPrefix(arg, "--debug=") {
|
||||
if lvl, err := strconv.Atoi(strings.TrimPrefix(arg, "--debug=")); err == nil {
|
||||
return lvl
|
||||
}
|
||||
}
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func extractFlag(arg string) string {
|
||||
var flag string
|
||||
if strings.HasPrefix(arg, "--") {
|
||||
@@ -284,7 +301,7 @@ func loadYAMLConfig(configPath string) (*Flags, error) {
|
||||
return nil, fmt.Errorf("error parsing config file: %w", err)
|
||||
}
|
||||
|
||||
Debugf("Config: %v\n", config)
|
||||
debuglog.Debug(debuglog.Detailed, "Config: %v\n", config)
|
||||
|
||||
return config, nil
|
||||
}
|
||||
@@ -460,13 +477,14 @@ func (o *Flags) BuildChatOptions() (ret *domain.ChatOptions, err error) {
|
||||
|
||||
func (o *Flags) BuildChatRequest(Meta string) (ret *domain.ChatRequest, err error) {
|
||||
ret = &domain.ChatRequest{
|
||||
ContextName: o.Context,
|
||||
SessionName: o.Session,
|
||||
PatternName: o.Pattern,
|
||||
StrategyName: o.Strategy,
|
||||
PatternVariables: o.PatternVariables,
|
||||
InputHasVars: o.InputHasVars,
|
||||
Meta: Meta,
|
||||
ContextName: o.Context,
|
||||
SessionName: o.Session,
|
||||
PatternName: o.Pattern,
|
||||
StrategyName: o.Strategy,
|
||||
PatternVariables: o.PatternVariables,
|
||||
InputHasVars: o.InputHasVars,
|
||||
NoVariableReplacement: o.NoVariableReplacement,
|
||||
Meta: Meta,
|
||||
}
|
||||
|
||||
var message *chat.ChatCompletionMessage
|
||||
|
||||
@@ -5,6 +5,8 @@ import (
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
openai "github.com/openai/openai-go"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/gemini"
|
||||
@@ -39,7 +41,7 @@ func handleListingCommands(currentFlags *Flags, fabricDb *fsdb.Db, registry *cor
|
||||
if currentFlags.ShellCompleteOutput {
|
||||
models.Print(true)
|
||||
} else {
|
||||
models.PrintWithVendor(false)
|
||||
models.PrintWithVendor(false, registry.Defaults.Vendor.Value, registry.Defaults.Model.Value)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
@@ -70,5 +72,30 @@ func handleListingCommands(currentFlags *Flags, fabricDb *fsdb.Db, registry *cor
|
||||
return true, nil
|
||||
}
|
||||
|
||||
if currentFlags.ListTranscriptionModels {
|
||||
listTranscriptionModels(currentFlags.ShellCompleteOutput)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
// listTranscriptionModels lists all available transcription models
|
||||
func listTranscriptionModels(shellComplete bool) {
|
||||
models := []string{
|
||||
string(openai.AudioModelWhisper1),
|
||||
string(openai.AudioModelGPT4oMiniTranscribe),
|
||||
string(openai.AudioModelGPT4oTranscribe),
|
||||
}
|
||||
|
||||
if shellComplete {
|
||||
for _, model := range models {
|
||||
fmt.Println(model)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Available transcription models:")
|
||||
for _, model := range models {
|
||||
fmt.Printf(" %s\n", model)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
35
internal/cli/transcribe.go
Normal file
35
internal/cli/transcribe.go
Normal file
@@ -0,0 +1,35 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
)
|
||||
|
||||
type transcriber interface {
|
||||
TranscribeFile(ctx context.Context, filePath, model string, split bool) (string, error)
|
||||
}
|
||||
|
||||
func handleTranscription(flags *Flags, registry *core.PluginRegistry) (message string, err error) {
|
||||
vendorName := flags.Vendor
|
||||
if vendorName == "" {
|
||||
vendorName = "OpenAI"
|
||||
}
|
||||
vendor, ok := registry.VendorManager.VendorsByName[vendorName]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("vendor %s not configured", vendorName)
|
||||
}
|
||||
tr, ok := vendor.(transcriber)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("vendor %s does not support audio transcription", vendorName)
|
||||
}
|
||||
model := flags.TranscribeModel
|
||||
if model == "" {
|
||||
return "", fmt.Errorf("transcription model is required (use --transcribe-model)")
|
||||
}
|
||||
if message, err = tr.TranscribeFile(context.Background(), flags.TranscribeFile, model, flags.SplitMediaFile); err != nil {
|
||||
return
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -180,7 +180,7 @@ func (o *Chatter) BuildSession(request *domain.ChatRequest, raw bool) (session *
|
||||
}
|
||||
|
||||
// Now we know request.Message is not nil, process template variables
|
||||
if request.InputHasVars {
|
||||
if request.InputHasVars && !request.NoVariableReplacement {
|
||||
request.Message.Content, err = template.ApplyTemplate(request.Message.Content, request.PatternVariables, "")
|
||||
if err != nil {
|
||||
return nil, err
|
||||
@@ -190,7 +190,12 @@ func (o *Chatter) BuildSession(request *domain.ChatRequest, raw bool) (session *
|
||||
var patternContent string
|
||||
inputUsed := false
|
||||
if request.PatternName != "" {
|
||||
pattern, err := o.db.Patterns.GetApplyVariables(request.PatternName, request.PatternVariables, request.Message.Content)
|
||||
var pattern *fsdb.Pattern
|
||||
if request.NoVariableReplacement {
|
||||
pattern, err = o.db.Patterns.GetWithoutVariables(request.PatternName, request.Message.Content)
|
||||
} else {
|
||||
pattern, err = o.db.Patterns.GetApplyVariables(request.PatternName, request.PatternVariables, request.Message.Content)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not get pattern %s: %v", request.PatternName, err)
|
||||
|
||||
@@ -81,8 +81,10 @@ func TestGetChatter_WarnsOnAmbiguousModel(t *testing.T) {
|
||||
if err != nil {
|
||||
t.Fatalf("GetChatter() error = %v", err)
|
||||
}
|
||||
if chatter.vendor.GetName() != "VendorA" {
|
||||
t.Fatalf("expected vendor VendorA, got %s", chatter.vendor.GetName())
|
||||
// Verify that one of the valid vendors was selected (don't care which one due to map iteration randomness)
|
||||
vendorName := chatter.vendor.GetName()
|
||||
if vendorName != "VendorA" && vendorName != "VendorB" {
|
||||
t.Fatalf("expected vendor VendorA or VendorB, got %s", vendorName)
|
||||
}
|
||||
if !strings.Contains(string(warning), "multiple vendors provide model shared-model") {
|
||||
t.Fatalf("expected warning about multiple vendors, got %q", string(warning))
|
||||
|
||||
@@ -13,15 +13,16 @@ const (
|
||||
)
|
||||
|
||||
type ChatRequest struct {
|
||||
ContextName string
|
||||
SessionName string
|
||||
PatternName string
|
||||
PatternVariables map[string]string
|
||||
Message *chat.ChatCompletionMessage
|
||||
Language string
|
||||
Meta string
|
||||
InputHasVars bool
|
||||
StrategyName string
|
||||
ContextName string
|
||||
SessionName string
|
||||
PatternName string
|
||||
PatternVariables map[string]string
|
||||
Message *chat.ChatCompletionMessage
|
||||
Language string
|
||||
Meta string
|
||||
InputHasVars bool
|
||||
NoVariableReplacement bool
|
||||
StrategyName string
|
||||
}
|
||||
|
||||
type ChatOptions struct {
|
||||
|
||||
69
internal/log/log.go
Normal file
69
internal/log/log.go
Normal file
@@ -0,0 +1,69 @@
|
||||
package log
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"sync"
|
||||
)
|
||||
|
||||
// Level represents the debug verbosity.
|
||||
type Level int
|
||||
|
||||
const (
|
||||
// Off disables all debug output.
|
||||
Off Level = iota
|
||||
// Basic provides minimal debugging information.
|
||||
Basic
|
||||
// Detailed provides more verbose debugging.
|
||||
Detailed
|
||||
// Trace is the most verbose level.
|
||||
Trace
|
||||
)
|
||||
|
||||
var (
|
||||
mu sync.RWMutex
|
||||
level Level = Off
|
||||
output io.Writer = os.Stderr
|
||||
)
|
||||
|
||||
// SetLevel sets the global debug level.
|
||||
func SetLevel(l Level) {
|
||||
mu.Lock()
|
||||
level = l
|
||||
mu.Unlock()
|
||||
}
|
||||
|
||||
// LevelFromInt converts an int to a Level.
|
||||
func LevelFromInt(i int) Level {
|
||||
switch {
|
||||
case i <= 0:
|
||||
return Off
|
||||
case i == 1:
|
||||
return Basic
|
||||
case i == 2:
|
||||
return Detailed
|
||||
case i >= 3:
|
||||
return Trace
|
||||
default:
|
||||
return Off
|
||||
}
|
||||
}
|
||||
|
||||
// Debug writes a debug message if the global level permits.
|
||||
func Debug(l Level, format string, a ...interface{}) {
|
||||
mu.RLock()
|
||||
current := level
|
||||
w := output
|
||||
mu.RUnlock()
|
||||
if current >= l {
|
||||
fmt.Fprintf(w, "DEBUG: "+format, a...)
|
||||
}
|
||||
}
|
||||
|
||||
// SetOutput allows overriding the output destination for debug logs.
|
||||
func SetOutput(w io.Writer) {
|
||||
mu.Lock()
|
||||
output = w
|
||||
mu.Unlock()
|
||||
}
|
||||
@@ -4,7 +4,6 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
@@ -12,6 +11,7 @@ import (
|
||||
"github.com/anthropics/anthropic-sdk-go/option"
|
||||
"github.com/danielmiessler/fabric/internal/chat"
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/plugins"
|
||||
"github.com/danielmiessler/fabric/internal/util"
|
||||
)
|
||||
@@ -195,7 +195,7 @@ func (an *Client) SendStream(
|
||||
}
|
||||
stream := an.client.Messages.NewStreaming(ctx, params, reqOpts...)
|
||||
if stream.Err() != nil && len(betas) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "Anthropic beta feature %s failed: %v\n", strings.Join(betas, ","), stream.Err())
|
||||
debuglog.Debug(debuglog.Basic, "Anthropic beta feature %s failed: %v\n", strings.Join(betas, ","), stream.Err())
|
||||
stream = an.client.Messages.NewStreaming(ctx, params)
|
||||
}
|
||||
|
||||
@@ -289,7 +289,7 @@ func (an *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage,
|
||||
}
|
||||
if message, err = an.client.Messages.New(ctx, params, reqOpts...); err != nil {
|
||||
if len(betas) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "Anthropic beta feature %s failed: %v\n", strings.Join(betas, ","), err)
|
||||
debuglog.Debug(debuglog.Basic, "Anthropic beta feature %s failed: %v\n", strings.Join(betas, ","), err)
|
||||
if message, err = an.client.Messages.New(ctx, params); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/chat"
|
||||
@@ -170,6 +171,25 @@ func (o *Client) NeedsRawMode(modelName string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func parseThinkingConfig(level domain.ThinkingLevel) (*genai.ThinkingConfig, bool) {
|
||||
lower := strings.ToLower(strings.TrimSpace(string(level)))
|
||||
switch domain.ThinkingLevel(lower) {
|
||||
case "", domain.ThinkingOff:
|
||||
return nil, false
|
||||
case domain.ThinkingLow, domain.ThinkingMedium, domain.ThinkingHigh:
|
||||
if budget, ok := domain.ThinkingBudgets[domain.ThinkingLevel(lower)]; ok {
|
||||
b := int32(budget)
|
||||
return &genai.ThinkingConfig{IncludeThoughts: true, ThinkingBudget: &b}, true
|
||||
}
|
||||
default:
|
||||
if tokens, err := strconv.ParseInt(lower, 10, 32); err == nil && tokens > 0 {
|
||||
t := int32(tokens)
|
||||
return &genai.ThinkingConfig{IncludeThoughts: true, ThinkingBudget: &t}, true
|
||||
}
|
||||
}
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// buildGenerateContentConfig constructs the generation config with optional tools.
|
||||
// When search is enabled it injects the Google Search tool. The optional search
|
||||
// location accepts either:
|
||||
@@ -201,6 +221,10 @@ func (o *Client) buildGenerateContentConfig(opts *domain.ChatOptions) (*genai.Ge
|
||||
}
|
||||
}
|
||||
|
||||
if tc, ok := parseThinkingConfig(opts.Thinking); ok {
|
||||
cfg.ThinkingConfig = tc
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -129,6 +129,38 @@ func TestBuildGenerateContentConfig_LanguageCodeNormalization(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGenerateContentConfig_Thinking(t *testing.T) {
|
||||
client := &Client{}
|
||||
opts := &domain.ChatOptions{Thinking: domain.ThinkingLow}
|
||||
|
||||
cfg, err := client.buildGenerateContentConfig(opts)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if cfg.ThinkingConfig == nil || !cfg.ThinkingConfig.IncludeThoughts {
|
||||
t.Fatalf("expected thinking config with thoughts included")
|
||||
}
|
||||
if cfg.ThinkingConfig.ThinkingBudget == nil || *cfg.ThinkingConfig.ThinkingBudget != int32(domain.TokenBudgetLow) {
|
||||
t.Errorf("expected thinking budget %d, got %+v", domain.TokenBudgetLow, cfg.ThinkingConfig.ThinkingBudget)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGenerateContentConfig_ThinkingTokens(t *testing.T) {
|
||||
client := &Client{}
|
||||
opts := &domain.ChatOptions{Thinking: domain.ThinkingLevel("123")}
|
||||
|
||||
cfg, err := client.buildGenerateContentConfig(opts)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if cfg.ThinkingConfig == nil || cfg.ThinkingConfig.ThinkingBudget == nil {
|
||||
t.Fatalf("expected thinking config with budget")
|
||||
}
|
||||
if *cfg.ThinkingConfig.ThinkingBudget != 123 {
|
||||
t.Errorf("expected thinking budget 123, got %d", *cfg.ThinkingConfig.ThinkingBudget)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCitationFormatting(t *testing.T) {
|
||||
client := &Client{}
|
||||
response := &genai.GenerateContentResponse{
|
||||
|
||||
@@ -18,7 +18,8 @@ type VendorsModels struct {
|
||||
|
||||
// PrintWithVendor prints models including their vendor on each line.
|
||||
// When shellCompleteList is true, output is suitable for shell completion.
|
||||
func (o *VendorsModels) PrintWithVendor(shellCompleteList bool) {
|
||||
// Default vendor and model are highlighted with an asterisk.
|
||||
func (o *VendorsModels) PrintWithVendor(shellCompleteList bool, defaultVendor, defaultModel string) {
|
||||
if !shellCompleteList {
|
||||
fmt.Printf("\n%v:\n", o.SelectionLabel)
|
||||
}
|
||||
@@ -42,7 +43,11 @@ func (o *VendorsModels) PrintWithVendor(shellCompleteList bool) {
|
||||
if shellCompleteList {
|
||||
fmt.Printf("%s|%s\n", groupItems.Group, item)
|
||||
} else {
|
||||
fmt.Printf("\t[%d]\t%s|%s\n", currentItemIndex, groupItems.Group, item)
|
||||
mark := " "
|
||||
if strings.EqualFold(groupItems.Group, defaultVendor) && strings.EqualFold(item, defaultModel) {
|
||||
mark = " *"
|
||||
}
|
||||
fmt.Printf("%s\t[%d]\t%s|%s\n", mark, currentItemIndex, groupItems.Group, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,6 +1,9 @@
|
||||
package ai
|
||||
|
||||
import (
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
)
|
||||
|
||||
@@ -31,3 +34,23 @@ func TestFindVendorsByModel(t *testing.T) {
|
||||
t.Fatalf("FindVendorsByModel() = %v, want %v", foundVendors, []string{"vendor1"})
|
||||
}
|
||||
}
|
||||
|
||||
func TestPrintWithVendorMarksDefault(t *testing.T) {
|
||||
vendors := NewVendorsModels()
|
||||
vendors.AddGroupItems("vendor1", []string{"model1"}...)
|
||||
vendors.AddGroupItems("vendor2", []string{"model2"}...)
|
||||
|
||||
r, w, _ := os.Pipe()
|
||||
oldStdout := os.Stdout
|
||||
os.Stdout = w
|
||||
|
||||
vendors.PrintWithVendor(false, "vendor2", "model2")
|
||||
|
||||
w.Close()
|
||||
os.Stdout = oldStdout
|
||||
out, _ := io.ReadAll(r)
|
||||
|
||||
if !strings.Contains(string(out), " *\t[2]\tvendor2|model2") {
|
||||
t.Fatalf("default model not marked: %s", out)
|
||||
}
|
||||
}
|
||||
|
||||
153
internal/plugins/ai/openai/openai_audio.go
Normal file
153
internal/plugins/ai/openai/openai_audio.go
Normal file
@@ -0,0 +1,153 @@
|
||||
package openai
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"slices"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
|
||||
openai "github.com/openai/openai-go"
|
||||
)
|
||||
|
||||
// MaxAudioFileSize defines the maximum allowed size for audio uploads (25MB).
|
||||
const MaxAudioFileSize int64 = 25 * 1024 * 1024
|
||||
|
||||
// AllowedTranscriptionModels lists the models supported for transcription.
|
||||
var AllowedTranscriptionModels = []string{
|
||||
string(openai.AudioModelWhisper1),
|
||||
string(openai.AudioModelGPT4oMiniTranscribe),
|
||||
string(openai.AudioModelGPT4oTranscribe),
|
||||
}
|
||||
|
||||
// allowedAudioExtensions defines the supported input file extensions.
|
||||
var allowedAudioExtensions = map[string]struct{}{
|
||||
".mp3": {},
|
||||
".mp4": {},
|
||||
".mpeg": {},
|
||||
".mpga": {},
|
||||
".m4a": {},
|
||||
".wav": {},
|
||||
".webm": {},
|
||||
}
|
||||
|
||||
// TranscribeFile transcribes the given audio file using the specified model. If the file
|
||||
// exceeds the size limit, it can optionally be split into chunks using ffmpeg.
|
||||
func (o *Client) TranscribeFile(ctx context.Context, filePath, model string, split bool) (string, error) {
|
||||
if ctx == nil {
|
||||
ctx = context.Background()
|
||||
}
|
||||
|
||||
if !slices.Contains(AllowedTranscriptionModels, model) {
|
||||
return "", fmt.Errorf("model '%s' is not supported for transcription", model)
|
||||
}
|
||||
|
||||
ext := strings.ToLower(filepath.Ext(filePath))
|
||||
if _, ok := allowedAudioExtensions[ext]; !ok {
|
||||
return "", fmt.Errorf("unsupported audio format '%s'", ext)
|
||||
}
|
||||
|
||||
info, err := os.Stat(filePath)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
var files []string
|
||||
var cleanup func()
|
||||
if info.Size() > MaxAudioFileSize {
|
||||
if !split {
|
||||
return "", fmt.Errorf("file %s exceeds 25MB limit; use --split-media-file to enable automatic splitting", filePath)
|
||||
}
|
||||
debuglog.Debug(debuglog.Basic, "File %s is larger than the size limit... breaking it up into chunks...\n", filePath)
|
||||
if files, cleanup, err = splitAudioFile(filePath, ext, MaxAudioFileSize); err != nil {
|
||||
return "", err
|
||||
}
|
||||
defer cleanup()
|
||||
} else {
|
||||
files = []string{filePath}
|
||||
}
|
||||
|
||||
var builder strings.Builder
|
||||
for i, f := range files {
|
||||
debuglog.Debug(debuglog.Basic, "Using model %s to transcribe part %d (file name: %s)...\n", model, i+1, f)
|
||||
var chunk *os.File
|
||||
if chunk, err = os.Open(f); err != nil {
|
||||
return "", err
|
||||
}
|
||||
params := openai.AudioTranscriptionNewParams{
|
||||
File: chunk,
|
||||
Model: openai.AudioModel(model),
|
||||
}
|
||||
var resp *openai.Transcription
|
||||
resp, err = o.ApiClient.Audio.Transcriptions.New(ctx, params)
|
||||
chunk.Close()
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
if i > 0 {
|
||||
builder.WriteString(" ")
|
||||
}
|
||||
builder.WriteString(resp.Text)
|
||||
}
|
||||
|
||||
return builder.String(), nil
|
||||
}
|
||||
|
||||
// splitAudioFile splits the source file into chunks smaller than maxSize using ffmpeg.
|
||||
// It returns the list of chunk file paths and a cleanup function.
|
||||
func splitAudioFile(src, ext string, maxSize int64) (files []string, cleanup func(), err error) {
|
||||
if _, err = exec.LookPath("ffmpeg"); err != nil {
|
||||
return nil, nil, fmt.Errorf("ffmpeg not found: please install it")
|
||||
}
|
||||
|
||||
var dir string
|
||||
if dir, err = os.MkdirTemp("", "fabric-audio-*"); err != nil {
|
||||
return nil, nil, err
|
||||
}
|
||||
cleanup = func() { os.RemoveAll(dir) }
|
||||
|
||||
segmentTime := 600 // start with 10 minutes
|
||||
for {
|
||||
pattern := filepath.Join(dir, "chunk-%03d"+ext)
|
||||
debuglog.Debug(debuglog.Basic, "Running ffmpeg to split audio into %d-second chunks...\n", segmentTime)
|
||||
cmd := exec.Command("ffmpeg", "-y", "-i", src, "-f", "segment", "-segment_time", fmt.Sprintf("%d", segmentTime), "-c", "copy", pattern)
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
if err = cmd.Run(); err != nil {
|
||||
return nil, cleanup, fmt.Errorf("ffmpeg failed: %v: %s", err, stderr.String())
|
||||
}
|
||||
|
||||
if files, err = filepath.Glob(filepath.Join(dir, "chunk-*"+ext)); err != nil {
|
||||
return nil, cleanup, err
|
||||
}
|
||||
sort.Strings(files)
|
||||
|
||||
tooBig := false
|
||||
for _, f := range files {
|
||||
var info os.FileInfo
|
||||
if info, err = os.Stat(f); err != nil {
|
||||
return nil, cleanup, err
|
||||
}
|
||||
if info.Size() > maxSize {
|
||||
tooBig = true
|
||||
break
|
||||
}
|
||||
}
|
||||
if !tooBig {
|
||||
return files, cleanup, nil
|
||||
}
|
||||
for _, f := range files {
|
||||
_ = os.Remove(f)
|
||||
}
|
||||
if segmentTime <= 1 {
|
||||
return nil, cleanup, fmt.Errorf("unable to split file into acceptable size chunks")
|
||||
}
|
||||
segmentTime /= 2
|
||||
}
|
||||
}
|
||||
@@ -102,6 +102,11 @@ var ProviderMap = map[string]ProviderConfig{
|
||||
BaseURL: "https://api.together.xyz/v1",
|
||||
ImplementsResponses: false,
|
||||
},
|
||||
"Venice AI": {
|
||||
Name: "Venice AI",
|
||||
BaseURL: "https://api.venice.ai/api/v1",
|
||||
ImplementsResponses: false,
|
||||
},
|
||||
}
|
||||
|
||||
// GetProviderByName returns the provider configuration for a given name with O(1) lookup
|
||||
|
||||
@@ -148,7 +148,6 @@ func (o *VendorsManager) setupVendorTo(vendor Vendor, configuredVendors map[stri
|
||||
delete(configuredVendors, vendor.GetName())
|
||||
fmt.Printf("[%v] skipped\n", vendor.GetName())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
type modelResult struct {
|
||||
|
||||
@@ -31,6 +31,27 @@ type Pattern struct {
|
||||
func (o *PatternsEntity) GetApplyVariables(
|
||||
source string, variables map[string]string, input string) (pattern *Pattern, err error) {
|
||||
|
||||
if pattern, err = o.loadPattern(source); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
err = o.applyVariables(pattern, variables, input)
|
||||
return
|
||||
}
|
||||
|
||||
// GetWithoutVariables returns a pattern with only the {{input}} placeholder processed
|
||||
// and skips template variable replacement
|
||||
func (o *PatternsEntity) GetWithoutVariables(source, input string) (pattern *Pattern, err error) {
|
||||
|
||||
if pattern, err = o.loadPattern(source); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
o.applyInput(pattern, input)
|
||||
return
|
||||
}
|
||||
|
||||
func (o *PatternsEntity) loadPattern(source string) (pattern *Pattern, err error) {
|
||||
// Determine if this is a file path
|
||||
isFilePath := strings.HasPrefix(source, "\\") ||
|
||||
strings.HasPrefix(source, "/") ||
|
||||
@@ -39,8 +60,8 @@ func (o *PatternsEntity) GetApplyVariables(
|
||||
|
||||
if isFilePath {
|
||||
// Resolve the file path using GetAbsolutePath
|
||||
absPath, err := util.GetAbsolutePath(source)
|
||||
if err != nil {
|
||||
var absPath string
|
||||
if absPath, err = util.GetAbsolutePath(source); err != nil {
|
||||
return nil, fmt.Errorf("could not resolve file path: %v", err)
|
||||
}
|
||||
|
||||
@@ -51,26 +72,27 @@ func (o *PatternsEntity) GetApplyVariables(
|
||||
pattern, err = o.getFromDB(source)
|
||||
}
|
||||
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Apply variables to the pattern
|
||||
err = o.applyVariables(pattern, variables, input)
|
||||
return
|
||||
}
|
||||
|
||||
func (o *PatternsEntity) applyVariables(
|
||||
pattern *Pattern, variables map[string]string, input string) (err error) {
|
||||
|
||||
// Ensure pattern has an {{input}} placeholder
|
||||
// If not present, append it on a new line
|
||||
func (o *PatternsEntity) ensureInput(pattern *Pattern) {
|
||||
if !strings.Contains(pattern.Pattern, "{{input}}") {
|
||||
if !strings.HasSuffix(pattern.Pattern, "\n") {
|
||||
pattern.Pattern += "\n"
|
||||
}
|
||||
pattern.Pattern += "{{input}}"
|
||||
}
|
||||
}
|
||||
|
||||
func (o *PatternsEntity) applyInput(pattern *Pattern, input string) {
|
||||
o.ensureInput(pattern)
|
||||
pattern.Pattern = strings.ReplaceAll(pattern.Pattern, "{{input}}", input)
|
||||
}
|
||||
|
||||
func (o *PatternsEntity) applyVariables(
|
||||
pattern *Pattern, variables map[string]string, input string) (err error) {
|
||||
|
||||
o.ensureInput(pattern)
|
||||
|
||||
// Temporarily replace {{input}} with a sentinel token to protect it
|
||||
// from recursive variable resolution
|
||||
|
||||
@@ -145,6 +145,22 @@ func TestGetApplyVariables(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetWithoutVariables(t *testing.T) {
|
||||
entity, cleanup := setupTestPatternsEntity(t)
|
||||
defer cleanup()
|
||||
|
||||
createTestPattern(t, entity, "test-pattern", "Prefix {{input}} {{roam}}")
|
||||
|
||||
result, err := entity.GetWithoutVariables("test-pattern", "hello")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "Prefix hello {{roam}}", result.Pattern)
|
||||
|
||||
createTestPattern(t, entity, "no-input", "Static content")
|
||||
result, err = entity.GetWithoutVariables("no-input", "hi")
|
||||
require.NoError(t, err)
|
||||
assert.Equal(t, "Static content\nhi", result.Pattern)
|
||||
}
|
||||
|
||||
func TestPatternsEntity_Save(t *testing.T) {
|
||||
entity, cleanup := setupTestPatternsEntity(t)
|
||||
defer cleanup()
|
||||
|
||||
@@ -10,8 +10,9 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
|
||||
"gopkg.in/yaml.v3"
|
||||
// Add this import
|
||||
)
|
||||
|
||||
// ExtensionDefinition represents a single extension configuration
|
||||
@@ -87,9 +88,7 @@ func NewExtensionRegistry(configDir string) *ExtensionRegistry {
|
||||
r.ensureConfigDir()
|
||||
|
||||
if err := r.loadRegistry(); err != nil {
|
||||
if Debug {
|
||||
fmt.Printf("Warning: could not load extension registry: %v\n", err)
|
||||
}
|
||||
debuglog.Debug(debuglog.Basic, "Warning: could not load extension registry: %v\n", err)
|
||||
}
|
||||
|
||||
return r
|
||||
|
||||
@@ -6,6 +6,8 @@ import (
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
)
|
||||
|
||||
var (
|
||||
@@ -14,7 +16,6 @@ var (
|
||||
filePlugin = &FilePlugin{}
|
||||
fetchPlugin = &FetchPlugin{}
|
||||
sysPlugin = &SysPlugin{}
|
||||
Debug = false // Debug flag
|
||||
)
|
||||
|
||||
var extensionManager *ExtensionManager
|
||||
@@ -33,9 +34,7 @@ var pluginPattern = regexp.MustCompile(`\{\{plugin:([^:]+):([^:]+)(?::([^}]+))?\
|
||||
var extensionPattern = regexp.MustCompile(`\{\{ext:([^:]+):([^:]+)(?::([^}]+))?\}\}`)
|
||||
|
||||
func debugf(format string, a ...interface{}) {
|
||||
if Debug {
|
||||
fmt.Printf(format, a...)
|
||||
}
|
||||
debuglog.Debug(debuglog.Trace, format, a...)
|
||||
}
|
||||
|
||||
func ApplyTemplate(content string, variables map[string]string, input string) (string, error) {
|
||||
|
||||
@@ -181,7 +181,8 @@ func (o *YouTube) tryMethodYtDlpInternal(videoId string, language string, additi
|
||||
if len(langMatch) > 2 {
|
||||
langMatch = langMatch[:2]
|
||||
}
|
||||
args = append(args, "--sub-langs", langMatch)
|
||||
langOpts := language + "," + langMatch + ".*," + langMatch
|
||||
args = append(args, "--sub-langs", langOpts)
|
||||
}
|
||||
|
||||
// Add user-provided arguments last so they take precedence
|
||||
|
||||
@@ -1 +1 @@
|
||||
"1.4.286"
|
||||
"1.4.295"
|
||||
|
||||
99
scripts/readme_updates/README.md
Normal file
99
scripts/readme_updates/README.md
Normal file
@@ -0,0 +1,99 @@
|
||||
# README Update Scripts
|
||||
|
||||
This directory contains automation scripts for updating the main README.md file with release information from the changelog database.
|
||||
|
||||
## `update_readme_features.py`
|
||||
|
||||
A Python script that generates the "Recent Major Features" section for the README by extracting and filtering release information from the changelog SQLite database.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
# Generate the Recent Major Features section with default limit (20 releases)
|
||||
python scripts/readme_updates/update_readme_features.py
|
||||
|
||||
# Specify a custom limit
|
||||
python scripts/readme_updates/update_readme_features.py --limit 15
|
||||
|
||||
# Use a custom database path
|
||||
python scripts/readme_updates/update_readme_features.py --db /path/to/changelog.db
|
||||
```
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Database Connection**: Connects to `cmd/generate_changelog/changelog.db` (or custom path)
|
||||
2. **Data Extraction**: Queries the `versions` table for release information
|
||||
3. **Feature Filtering**: Uses heuristics to identify feature/improvement releases
|
||||
4. **Markdown Generation**: Formats output to match README style
|
||||
|
||||
### Feature Detection Heuristics
|
||||
|
||||
The script uses keyword-based heuristics to filter releases:
|
||||
|
||||
#### Include Keywords (Features/Improvements)
|
||||
- new, feature, feat, add, introduce, enable, support
|
||||
- improve, enhance, performance, speed
|
||||
- option, flag, argument, parameter
|
||||
- integration, provider, search, tts, audio, model
|
||||
- cli, ui, web, oauth, sync, database
|
||||
- notifications, desktop, reasoning, thinking
|
||||
|
||||
#### Exclude Keywords (Non-Features)
|
||||
- fix, bug, hotfix
|
||||
- ci, cd, pipeline, chore
|
||||
- docs, readme, refactor, style, typo
|
||||
- test, bump, deps, dependency
|
||||
- merge, revert, format, lint, build
|
||||
- release, prepare, coverage, security
|
||||
|
||||
### Integration with README
|
||||
|
||||
To update the README with new release features:
|
||||
|
||||
```bash
|
||||
# Generate the features and save to a temporary file
|
||||
python scripts/readme_updates/update_readme_features.py --limit 20 > /tmp/recent_features.md
|
||||
|
||||
# Manually replace the "### Recent Major Features" section in README.md
|
||||
# with the generated content
|
||||
```
|
||||
|
||||
### Database Schema
|
||||
|
||||
The script expects the following SQLite table structure:
|
||||
|
||||
```sql
|
||||
CREATE TABLE versions (
|
||||
name TEXT PRIMARY KEY,
|
||||
date DATETIME,
|
||||
commit_sha TEXT,
|
||||
pr_numbers TEXT,
|
||||
ai_summary TEXT,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
```
|
||||
|
||||
### Date Format Support
|
||||
|
||||
The script can parse various date formats:
|
||||
- ISO 8601 with timezone: `2025-08-14 14:11:04+00:00`
|
||||
- ISO 8601 basic: `2025-08-14T14:11:04`
|
||||
- Date only: `2025-08-14`
|
||||
- US format: `08/14/2025`
|
||||
|
||||
Output format is standardized to: `Aug 14, 2025`
|
||||
|
||||
### Maintenance Notes
|
||||
|
||||
- **AI Summary Format Changes**: If the format of AI summaries changes, update the `extract_title_desc()` and `split_summary()` functions
|
||||
- **Keyword Tuning**: Adjust `INCLUDE_RE` and `EXCLUDE_RE` patterns as needed
|
||||
- **Title Extraction**: The script attempts to extract concise titles from feature descriptions
|
||||
- **Description Length**: Descriptions are limited to 200 characters for readability
|
||||
|
||||
### Future Enhancements
|
||||
|
||||
Potential improvements for automated README updates:
|
||||
- Add section delimiter markers in README for automated replacement
|
||||
- Create a GitHub Action to run on new releases
|
||||
- Add support for categorizing features by type
|
||||
- Implement confidence scoring for feature detection
|
||||
281
scripts/readme_updates/update_readme_features.py
Executable file
281
scripts/readme_updates/update_readme_features.py
Executable file
@@ -0,0 +1,281 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate the '### Recent Major Features' markdown section for README from the changelog SQLite DB.
|
||||
|
||||
- Connects to cmd/generate_changelog/changelog.db
|
||||
- Extracts version, date, and AI summaries from the 'versions' table
|
||||
- Heuristically filters for feature/improvement items (excludes CI/CD/docs/bug fixes)
|
||||
- Formats output to match README style:
|
||||
- [vX.Y.Z](https://github.com/danielmiessler/fabric/releases/tag/vX.Y.Z) (Aug 14, 2025) — **Feature Name**: Short description
|
||||
|
||||
Usage:
|
||||
python scripts/readme_updates/update_readme_features.py --limit 20
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
import re
|
||||
import sys
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
# Heuristics for filtering feature-related lines
|
||||
EXCLUDE_RE = re.compile(
|
||||
r"(?i)\b(fix|bug|hotfix|ci|cd|pipeline|chore|docs|doc|readme|refactor|style|typo|"
|
||||
"test|tests|bump|deps|dependency|merge|revert|format|lint|build|release\b|prepare|"
|
||||
"codeowners|coverage|security)\b"
|
||||
)
|
||||
INCLUDE_RE = re.compile(
|
||||
r"(?i)\b(new|feature|feat|add|added|introduce|enable|support|improve|enhance|"
|
||||
"performance|speed|option|flag|argument|parameter|integration|provider|search|tts|"
|
||||
"audio|model|cli|ui|web|oauth|sync|database|notifications|desktop|reasoning|thinking)\b"
|
||||
)
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""Parse command-line arguments."""
|
||||
p = argparse.ArgumentParser(
|
||||
description="Generate README 'Recent Major Features' markdown from changelog DB."
|
||||
)
|
||||
p.add_argument(
|
||||
"--limit", type=int, default=20, help="Maximum number of releases to include."
|
||||
)
|
||||
p.add_argument(
|
||||
"--db",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Optional path to changelog.db (defaults to repo cmd/generate_changelog/changelog.db)",
|
||||
)
|
||||
return p.parse_args()
|
||||
|
||||
|
||||
def repo_root() -> Path:
|
||||
"""Get the repository root directory."""
|
||||
# scripts/readme_updates/update_readme_features.py -> repo root is parent.parent.parent
|
||||
return Path(__file__).resolve().parent.parent.parent
|
||||
|
||||
|
||||
def db_path(args) -> Path:
|
||||
"""Determine the database path."""
|
||||
if args.db:
|
||||
return Path(args.db).expanduser().resolve()
|
||||
return repo_root() / "cmd" / "generate_changelog" / "changelog.db"
|
||||
|
||||
|
||||
def connect(dbfile: Path):
|
||||
"""Connect to the SQLite database."""
|
||||
if not dbfile.exists():
|
||||
print(f"ERROR: changelog database not found: {dbfile}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
return sqlite3.connect(str(dbfile))
|
||||
|
||||
|
||||
def normalize_version(name: str) -> str:
|
||||
"""Ensure version string starts with 'v'."""
|
||||
n = str(name).strip()
|
||||
return n if n.startswith("v") else f"v{n}"
|
||||
|
||||
|
||||
def parse_date(value) -> str:
|
||||
"""Parse various date formats and return formatted string."""
|
||||
if value is None:
|
||||
return "(Unknown date)"
|
||||
|
||||
# Handle the ISO format with timezone from the database
|
||||
s = str(value).strip()
|
||||
|
||||
# Try to parse the ISO format with timezone
|
||||
if "+" in s or "T" in s:
|
||||
# Remove timezone info and microseconds for simpler parsing
|
||||
s_clean = s.split("+")[0].split(".")[0]
|
||||
try:
|
||||
dt = datetime.strptime(s_clean, "%Y-%m-%d %H:%M:%S")
|
||||
return dt.strftime("%b %d, %Y").replace(" 0", " ")
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Fallback formats
|
||||
fmts = [
|
||||
"%Y-%m-%d",
|
||||
"%Y-%m-%d %H:%M:%S",
|
||||
"%Y-%m-%dT%H:%M:%S",
|
||||
"%Y/%m/%d",
|
||||
"%m/%d/%Y",
|
||||
]
|
||||
|
||||
for fmt in fmts:
|
||||
try:
|
||||
dt = datetime.strptime(s, fmt)
|
||||
return dt.strftime("%b %d, %Y").replace(" 0", " ")
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
# Return original if we can't parse it
|
||||
return f"({s})"
|
||||
|
||||
|
||||
def split_summary(text: str) -> List[str]:
|
||||
"""Split AI summary into individual lines/bullets."""
|
||||
if not text:
|
||||
return []
|
||||
|
||||
lines = []
|
||||
# Split by newlines first
|
||||
for line in text.split("\n"):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
# Remove markdown headers
|
||||
line = re.sub(r"^#+\s+", "", line)
|
||||
# Remove PR links and author info
|
||||
line = re.sub(
|
||||
r"^PR\s+\[#\d+\]\([^)]+\)\s+by\s+\[[^\]]+\]\([^)]+\):\s*", "", line
|
||||
)
|
||||
# Remove bullet points
|
||||
line = re.sub(r"^[-*•]\s+", "", line)
|
||||
if line:
|
||||
lines.append(line)
|
||||
|
||||
return lines
|
||||
|
||||
|
||||
def is_feature_line(line: str) -> bool:
|
||||
"""Check if a line describes a feature/improvement (not a bug fix or CI/CD)."""
|
||||
line_lower = line.lower()
|
||||
|
||||
# Strong exclusions first
|
||||
if any(
|
||||
word in line_lower
|
||||
for word in ["chore:", "fix:", "docs:", "test:", "ci:", "build:", "refactor:"]
|
||||
):
|
||||
return False
|
||||
|
||||
if EXCLUDE_RE.search(line):
|
||||
return False
|
||||
|
||||
return bool(INCLUDE_RE.search(line))
|
||||
|
||||
|
||||
def extract_title_desc(line: str) -> Tuple[str, str]:
|
||||
"""Extract title and description from a feature line."""
|
||||
# Remove any markdown formatting
|
||||
line = re.sub(r"\*\*([^*]+)\*\*", r"\1", line)
|
||||
|
||||
# Look for colon separator first
|
||||
if ":" in line:
|
||||
parts = line.split(":", 1)
|
||||
if len(parts) == 2:
|
||||
title = parts[0].strip()
|
||||
desc = parts[1].strip()
|
||||
|
||||
# Clean up the title
|
||||
title = (
|
||||
title.replace("Introduce ", "")
|
||||
.replace("Enable ", "")
|
||||
.replace("Add ", "")
|
||||
)
|
||||
title = title.replace("Implement ", "").replace("Support ", "")
|
||||
|
||||
# Make title more concise
|
||||
if len(title) > 30:
|
||||
# Try to extract key words
|
||||
key_words = []
|
||||
for word in title.split():
|
||||
if word[0].isupper() or "-" in word or "_" in word:
|
||||
key_words.append(word)
|
||||
if key_words:
|
||||
title = " ".join(key_words[:3])
|
||||
|
||||
return (title, desc)
|
||||
|
||||
# Fallback: use first sentence as description
|
||||
sentences = re.split(r"[.!?]\s+", line)
|
||||
if sentences:
|
||||
desc = sentences[0].strip()
|
||||
# Extract a title from the description
|
||||
if "thinking" in desc.lower():
|
||||
return ("AI Reasoning", desc)
|
||||
elif "token" in desc.lower() and "context" in desc.lower():
|
||||
return ("Extended Context", desc)
|
||||
elif "curl" in desc.lower() or "install" in desc.lower():
|
||||
return ("Easy Setup", desc)
|
||||
elif "vendor" in desc.lower() or "model" in desc.lower():
|
||||
return ("Model Management", desc)
|
||||
elif "notification" in desc.lower():
|
||||
return ("Desktop Notifications", desc)
|
||||
elif "tts" in desc.lower() or "speech" in desc.lower():
|
||||
return ("Text-to-Speech", desc)
|
||||
elif "oauth" in desc.lower() or "auth" in desc.lower():
|
||||
return ("OAuth Auto-Auth", desc)
|
||||
elif "search" in desc.lower() and "web" in desc.lower():
|
||||
return ("Web Search", desc)
|
||||
else:
|
||||
# Generic title from first significant words
|
||||
words = desc.split()[:2]
|
||||
title = " ".join(words)
|
||||
return (title, desc)
|
||||
|
||||
return ("Feature", line)
|
||||
|
||||
|
||||
def pick_feature(ai_summary: str) -> Optional[Tuple[str, str]]:
|
||||
"""Pick the best feature line from the AI summary."""
|
||||
lines = split_summary(ai_summary)
|
||||
|
||||
# Look for the first feature line
|
||||
for line in lines:
|
||||
if is_feature_line(line):
|
||||
title, desc = extract_title_desc(line)
|
||||
# Clean up description - remove redundant info
|
||||
desc = desc[:200] if len(desc) > 200 else desc # Limit length
|
||||
return (title, desc)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def build_item(
|
||||
version: str, date_str: str, feature_title: str, feature_desc: str
|
||||
) -> str:
|
||||
"""Build a markdown list item for a release."""
|
||||
url = f"https://github.com/danielmiessler/fabric/releases/tag/{version}"
|
||||
return f"- [{version}]({url}) ({date_str}) — **{feature_title}**: {feature_desc}"
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function."""
|
||||
args = parse_args()
|
||||
dbfile = db_path(args)
|
||||
conn = connect(dbfile)
|
||||
cur = conn.cursor()
|
||||
|
||||
# Query the database
|
||||
cur.execute("SELECT name, date, ai_summary FROM versions ORDER BY date DESC")
|
||||
rows = cur.fetchall()
|
||||
|
||||
items = []
|
||||
for name, date, summary in rows:
|
||||
version = normalize_version(name)
|
||||
date_fmt = parse_date(date)
|
||||
feat = pick_feature(summary or "")
|
||||
|
||||
if not feat:
|
||||
continue
|
||||
|
||||
title, desc = feat
|
||||
items.append(build_item(version, date_fmt, title, desc))
|
||||
|
||||
if len(items) >= args.limit:
|
||||
break
|
||||
|
||||
conn.close()
|
||||
|
||||
# Output the markdown
|
||||
print("### Recent Major Features")
|
||||
print()
|
||||
for item in items:
|
||||
print(item)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
Reference in New Issue
Block a user