mirror of
https://github.com/danielmiessler/Fabric.git
synced 2026-01-09 22:38:10 -05:00
Compare commits
186 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
48d74290f3 | ||
|
|
3d4e967b92 | ||
|
|
d8690c7cec | ||
|
|
7eed9c3c64 | ||
|
|
97b75cb153 | ||
|
|
b485a4584f | ||
|
|
f4dbafc638 | ||
|
|
eae56e0038 | ||
|
|
72a5e49855 | ||
|
|
17b7d96da1 | ||
|
|
1b2d9ec0ed | ||
|
|
63fe320b16 | ||
|
|
aafca303ad | ||
|
|
41821efd27 | ||
|
|
3a4082a1f3 | ||
|
|
b6fa44d003 | ||
|
|
09d2d7efc5 | ||
|
|
4c2ebf25fa | ||
|
|
b1b748dc9c | ||
|
|
cc3e4226d7 | ||
|
|
0f994d8136 | ||
|
|
298a9007ad | ||
|
|
b36e5d3372 | ||
|
|
d1b8eb10ce | ||
|
|
6000e7469e | ||
|
|
88d3fe65f3 | ||
|
|
558e7f877d | ||
|
|
f33d27f836 | ||
|
|
1694324261 | ||
|
|
3a3f5c50a8 | ||
|
|
b1abfd71c2 | ||
|
|
f5b7279225 | ||
|
|
b974e1bfd5 | ||
|
|
8dda68b3b9 | ||
|
|
33c24e0cb2 | ||
|
|
8fb0c5b8a8 | ||
|
|
d82122b624 | ||
|
|
f5966af95a | ||
|
|
9470ee1655 | ||
|
|
9a118cf637 | ||
|
|
d69757908f | ||
|
|
30525ef1c0 | ||
|
|
8414e72545 | ||
|
|
caca366511 | ||
|
|
261eb30951 | ||
|
|
bdb36ee296 | ||
|
|
1351f138fb | ||
|
|
8da51968dc | ||
|
|
30d23f15be | ||
|
|
0a718be622 | ||
|
|
21f258caa4 | ||
|
|
3584f83b30 | ||
|
|
056791233a | ||
|
|
dc435dcc6e | ||
|
|
6edbc9dd38 | ||
|
|
fd60d66c0d | ||
|
|
08ec89bbe1 | ||
|
|
836557f41c | ||
|
|
f7c5c6d344 | ||
|
|
9d18ad523e | ||
|
|
efcd7dcac2 | ||
|
|
768e87879e | ||
|
|
3c51cad614 | ||
|
|
bc642904e0 | ||
|
|
fa135036f4 | ||
|
|
2d414ec394 | ||
|
|
9e72df9c6c | ||
|
|
1a933e1c9a | ||
|
|
d5431f9843 | ||
|
|
e2dabc406d | ||
|
|
31f7f22629 | ||
|
|
29aaf430ca | ||
|
|
9ef3518a07 | ||
|
|
0b40bad986 | ||
|
|
34ff4d30f2 | ||
|
|
2b195f204d | ||
|
|
1d9596bf3d | ||
|
|
72d099d40a | ||
|
|
7ab6fe3baa | ||
|
|
198964df82 | ||
|
|
f0998d3686 | ||
|
|
75875ba9f5 | ||
|
|
ea009ff64b | ||
|
|
3c317f088b | ||
|
|
f91ee2ce3c | ||
|
|
98968d972f | ||
|
|
8ea264e96c | ||
|
|
5203cba5a7 | ||
|
|
f5fba12360 | ||
|
|
d7cc3ff8f1 | ||
|
|
4887cdc353 | ||
|
|
6aa38d2abc | ||
|
|
737e37f00e | ||
|
|
42bb72ab65 | ||
|
|
612ae4e3b5 | ||
|
|
27f9134912 | ||
|
|
c02718855d | ||
|
|
4f16222b31 | ||
|
|
8c27b34d0f | ||
|
|
0b71b54698 | ||
|
|
614b1322d5 | ||
|
|
eab335873e | ||
|
|
577dc9896d | ||
|
|
3a4bb4b9b2 | ||
|
|
c766915764 | ||
|
|
71c08648c6 | ||
|
|
95e2e6a5ac | ||
|
|
5cdf297d85 | ||
|
|
5d7137804a | ||
|
|
8b6b8fbd44 | ||
|
|
3e75aa260f | ||
|
|
92aca524a4 | ||
|
|
f70eff2e41 | ||
|
|
489c481acc | ||
|
|
3a1eaf375f | ||
|
|
52246dda28 | ||
|
|
3c200e2883 | ||
|
|
bda6505d5c | ||
|
|
a241c98837 | ||
|
|
12d7803044 | ||
|
|
d37a1acc9b | ||
|
|
7254571501 | ||
|
|
c300262804 | ||
|
|
e8ba57be90 | ||
|
|
15fad3da87 | ||
|
|
e2b0d3c368 | ||
|
|
3de85eb50e | ||
|
|
58e635c873 | ||
|
|
dde21d2337 | ||
|
|
e3fcbcb12b | ||
|
|
839296e3ba | ||
|
|
5b97b0e56a | ||
|
|
38ff2288da | ||
|
|
771a1ac2e6 | ||
|
|
f6fd6f535a | ||
|
|
f548ca5f82 | ||
|
|
616f51748e | ||
|
|
db5aaf9da6 | ||
|
|
a922032756 | ||
|
|
a415409a48 | ||
|
|
19d95b9014 | ||
|
|
73c7a8c147 | ||
|
|
4dc84bd64d | ||
|
|
dd96014f9b | ||
|
|
3cf2557af3 | ||
|
|
fcda0338cb | ||
|
|
ac19c81ef0 | ||
|
|
a83d57065f | ||
|
|
055ed32ab8 | ||
|
|
8d62165444 | ||
|
|
63bc7a7e79 | ||
|
|
f2b2501767 | ||
|
|
be1e2485ee | ||
|
|
38c4211649 | ||
|
|
71e6355c10 | ||
|
|
64411cdc02 | ||
|
|
9a2ff983a4 | ||
|
|
a522d4a411 | ||
|
|
9bdd77c277 | ||
|
|
cc68dddfe8 | ||
|
|
07ee7f8b21 | ||
|
|
15a355f08a | ||
|
|
c50486b611 | ||
|
|
edaca7a045 | ||
|
|
28432a50f0 | ||
|
|
8ab891fcff | ||
|
|
cab6df88ea | ||
|
|
42afd92f31 | ||
|
|
76d6b1721e | ||
|
|
7d562096d1 | ||
|
|
91c1aca0dd | ||
|
|
b8008a34fb | ||
|
|
482759ae72 | ||
|
|
b0d096d0ea | ||
|
|
e56ecfb7ae | ||
|
|
951bd134eb | ||
|
|
7ff04658f3 | ||
|
|
272f04dd32 | ||
|
|
29cb3796bf | ||
|
|
f51f9e75a9 | ||
|
|
63475784c7 | ||
|
|
1a7bb27370 | ||
|
|
4badaa4c85 | ||
|
|
bf6be964fd | ||
|
|
cdbcb0a512 | ||
|
|
f81cf193a2 |
100
.github/workflows/release.yml
vendored
100
.github/workflows/release.yml
vendored
@@ -27,8 +27,39 @@ jobs:
|
||||
- name: Run tests
|
||||
run: go test -v ./...
|
||||
|
||||
get_version:
|
||||
name: Get version
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
latest_tag: ${{ steps.get_version.outputs.latest_tag }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get version from source
|
||||
id: get_version
|
||||
shell: bash
|
||||
run: |
|
||||
if [ ! -f "nix/pkgs/fabric/version.nix" ]; then
|
||||
echo "Error: version.nix file not found"
|
||||
exit 1
|
||||
fi
|
||||
version=$(cat nix/pkgs/fabric/version.nix | tr -d '"' | tr -cd '0-9.')
|
||||
if [ -z "$version" ]; then
|
||||
echo "Error: version is empty"
|
||||
exit 1
|
||||
fi
|
||||
if ! echo "$version" | grep -E '^[0-9]+\.[0-9]+\.[0-9]+' > /dev/null; then
|
||||
echo "Error: Invalid version format: $version"
|
||||
exit 1
|
||||
fi
|
||||
echo "latest_tag=v$version" >> $GITHUB_OUTPUT
|
||||
|
||||
build:
|
||||
name: Build binaries for Windows, macOS, and Linux
|
||||
needs: [test, get_version]
|
||||
runs-on: ${{ matrix.os }}
|
||||
permissions:
|
||||
contents: write
|
||||
@@ -51,25 +82,14 @@ jobs:
|
||||
with:
|
||||
go-version-file: ./go.mod
|
||||
|
||||
- name: Determine OS Name
|
||||
id: os-name
|
||||
run: |
|
||||
if [ "${{ matrix.os }}" == "ubuntu-latest" ]; then
|
||||
echo "OS=linux" >> $GITHUB_ENV
|
||||
elif [ "${{ matrix.os }}" == "macos-latest" ]; then
|
||||
echo "OS=darwin" >> $GITHUB_ENV
|
||||
else
|
||||
echo "OS=windows" >> $GITHUB_ENV
|
||||
fi
|
||||
shell: bash
|
||||
|
||||
- name: Build binary on Linux and macOS
|
||||
if: matrix.os != 'windows-latest'
|
||||
env:
|
||||
GOOS: ${{ env.OS }}
|
||||
GOOS: ${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
run: |
|
||||
go build -o fabric-${OS}-${{ matrix.arch }} ./cmd/fabric
|
||||
OS_NAME="${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}"
|
||||
go build -o fabric-${OS_NAME}-${{ matrix.arch }} ./cmd/fabric
|
||||
|
||||
- name: Build binary on Windows
|
||||
if: matrix.os == 'windows-latest'
|
||||
@@ -83,8 +103,8 @@ jobs:
|
||||
if: matrix.os != 'windows-latest'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fabric-${OS}-${{ matrix.arch }}
|
||||
path: fabric-${OS}-${{ matrix.arch }}
|
||||
name: fabric-${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}-${{ matrix.arch }}
|
||||
path: fabric-${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}-${{ matrix.arch }}
|
||||
|
||||
- name: Upload build artifact
|
||||
if: matrix.os == 'windows-latest'
|
||||
@@ -93,29 +113,15 @@ jobs:
|
||||
name: fabric-windows-${{ matrix.arch }}.exe
|
||||
path: fabric-windows-${{ matrix.arch }}.exe
|
||||
|
||||
- name: Get latest tag
|
||||
if: matrix.os != 'windows-latest'
|
||||
id: get_latest_tag
|
||||
run: |
|
||||
latest_tag=$(git tag --sort=-creatordate | head -n 1)
|
||||
echo "latest_tag=$latest_tag" >> $GITHUB_ENV
|
||||
|
||||
- name: Get latest tag
|
||||
if: matrix.os == 'windows-latest'
|
||||
id: get_latest_tag_windows
|
||||
run: |
|
||||
$latest_tag = git tag --sort=-creatordate | Select-Object -First 1
|
||||
Add-Content -Path $env:GITHUB_ENV -Value "latest_tag=$latest_tag"
|
||||
|
||||
- name: Create release if it doesn't exist
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
if ! gh release view ${{ env.latest_tag }} >/dev/null 2>&1; then
|
||||
gh release create ${{ env.latest_tag }} --title "Release ${{ env.latest_tag }}" --notes "Automated release for ${{ env.latest_tag }}"
|
||||
if ! gh release view ${{ needs.get_version.outputs.latest_tag }} >/dev/null 2>&1; then
|
||||
gh release create ${{ needs.get_version.outputs.latest_tag }} --title "Release ${{ needs.get_version.outputs.latest_tag }}" --notes "Automated release for ${{ needs.get_version.outputs.latest_tag }}"
|
||||
else
|
||||
echo "Release ${{ env.latest_tag }} already exists."
|
||||
echo "Release ${{ needs.get_version.outputs.latest_tag }} already exists."
|
||||
fi
|
||||
|
||||
- name: Upload release artifact
|
||||
@@ -123,11 +129,35 @@ jobs:
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh release upload ${{ env.latest_tag }} fabric-windows-${{ matrix.arch }}.exe
|
||||
gh release upload ${{ needs.get_version.outputs.latest_tag }} fabric-windows-${{ matrix.arch }}.exe
|
||||
|
||||
- name: Upload release artifact
|
||||
if: matrix.os != 'windows-latest'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh release upload ${{ env.latest_tag }} fabric-${OS}-${{ matrix.arch }}
|
||||
OS_NAME="${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}"
|
||||
gh release upload ${{ needs.get_version.outputs.latest_tag }} fabric-${OS_NAME}-${{ matrix.arch }}
|
||||
|
||||
update_release_notes:
|
||||
needs: [build, get_version]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: ./go.mod
|
||||
|
||||
- name: Update release description
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
go run ./cmd/generate_changelog --sync-db
|
||||
go run ./cmd/generate_changelog --release ${{ needs.get_version.outputs.latest_tag }}
|
||||
|
||||
@@ -9,6 +9,7 @@ on:
|
||||
- "**/*.md"
|
||||
- "data/strategies/**"
|
||||
- "cmd/generate_changelog/*.db"
|
||||
- "cmd/generate_changelog/incoming/*.txt"
|
||||
- "scripts/pattern_descriptions/*.json"
|
||||
- "web/static/data/pattern_descriptions.json"
|
||||
|
||||
@@ -83,14 +84,24 @@ jobs:
|
||||
run: |
|
||||
nix run .#gomod2nix -- --outdir nix/pkgs/fabric
|
||||
|
||||
- name: Generate Changelog Entry
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
go run ./cmd/generate_changelog --process-prs ${{ env.new_tag }}
|
||||
go run ./cmd/generate_changelog --sync-db
|
||||
- name: Commit changes
|
||||
run: |
|
||||
# These files are modified by the version bump process
|
||||
git add cmd/fabric/version.go
|
||||
git add nix/pkgs/fabric/version.nix
|
||||
git add nix/pkgs/fabric/gomod2nix.toml
|
||||
git add .
|
||||
|
||||
# The changelog tool is responsible for staging CHANGELOG.md, changelog.db,
|
||||
# and removing the incoming/ directory.
|
||||
|
||||
if ! git diff --staged --quiet; then
|
||||
git commit -m "Update version to ${{ env.new_tag }} and commit $commit_hash"
|
||||
git commit -m "chore(release): Update version to ${{ env.new_tag }}"
|
||||
else
|
||||
echo "No changes to commit."
|
||||
fi
|
||||
|
||||
3
.gitignore
vendored
3
.gitignore
vendored
@@ -350,3 +350,6 @@ web/static/*.png
|
||||
# Local tmp directory
|
||||
.tmp/
|
||||
tmp/
|
||||
|
||||
# Ignore .claude/
|
||||
.claude/
|
||||
|
||||
34
.vscode/settings.json
vendored
34
.vscode/settings.json
vendored
@@ -1,33 +1,49 @@
|
||||
{
|
||||
"cSpell.words": [
|
||||
"Achird",
|
||||
"addextension",
|
||||
"adduser",
|
||||
"AIML",
|
||||
"Anki",
|
||||
"anthropics",
|
||||
"Aoede",
|
||||
"atotto",
|
||||
"Autonoe",
|
||||
"badfile",
|
||||
"Behrens",
|
||||
"blindspots",
|
||||
"Bombal",
|
||||
"Callirhoe",
|
||||
"Callirrhoe",
|
||||
"Cerebras",
|
||||
"compadd",
|
||||
"compdef",
|
||||
"compinit",
|
||||
"creatordate",
|
||||
"curcontext",
|
||||
"custompatterns",
|
||||
"danielmiessler",
|
||||
"davidanson",
|
||||
"Debugf",
|
||||
"dedup",
|
||||
"deepseek",
|
||||
"Despina",
|
||||
"direnv",
|
||||
"DMARC",
|
||||
"dryrun",
|
||||
"dsrp",
|
||||
"editability",
|
||||
"Eisler",
|
||||
"elif",
|
||||
"envrc",
|
||||
"Erinome",
|
||||
"Errorf",
|
||||
"eugeis",
|
||||
"Eugen",
|
||||
"excalidraw",
|
||||
"exolab",
|
||||
"fabriclogo",
|
||||
"flac",
|
||||
"fpath",
|
||||
"frequencypenalty",
|
||||
"fsdb",
|
||||
@@ -59,10 +75,14 @@
|
||||
"jessevdk",
|
||||
"Jina",
|
||||
"joho",
|
||||
"Keploy",
|
||||
"Kore",
|
||||
"ksylvan",
|
||||
"Langdock",
|
||||
"Laomedeia",
|
||||
"ldflags",
|
||||
"libexec",
|
||||
"libnotify",
|
||||
"listcontexts",
|
||||
"listextensions",
|
||||
"listmodels",
|
||||
@@ -76,12 +96,17 @@
|
||||
"matplotlib",
|
||||
"mattn",
|
||||
"mbed",
|
||||
"metacharacters",
|
||||
"Miessler",
|
||||
"nometa",
|
||||
"numpy",
|
||||
"ollama",
|
||||
"ollamaapi",
|
||||
"openaiapi",
|
||||
"opencode",
|
||||
"openrouter",
|
||||
"Orus",
|
||||
"osascript",
|
||||
"otiai",
|
||||
"pdflatex",
|
||||
"pipx",
|
||||
@@ -90,11 +115,14 @@
|
||||
"presencepenalty",
|
||||
"printcontext",
|
||||
"printsession",
|
||||
"Pulcherrima",
|
||||
"pycache",
|
||||
"pyperclip",
|
||||
"readystream",
|
||||
"restapi",
|
||||
"rmextension",
|
||||
"Sadachbia",
|
||||
"Sadaltager",
|
||||
"samber",
|
||||
"sashabaranov",
|
||||
"sdist",
|
||||
@@ -104,14 +132,18 @@
|
||||
"storer",
|
||||
"Streamlit",
|
||||
"stretchr",
|
||||
"subchunk",
|
||||
"Sulafat",
|
||||
"talkpanel",
|
||||
"Telos",
|
||||
"testpattern",
|
||||
"testuser",
|
||||
"Thacker",
|
||||
"tidwall",
|
||||
"topp",
|
||||
"ttrc",
|
||||
"unalias",
|
||||
"unconfigured",
|
||||
"unmarshalling",
|
||||
"updatepatterns",
|
||||
"videoid",
|
||||
@@ -119,6 +151,8 @@
|
||||
"WEBVTT",
|
||||
"wipecontext",
|
||||
"wipesession",
|
||||
"wireframes",
|
||||
"Worktree",
|
||||
"writeups",
|
||||
"xclip",
|
||||
"yourpatternname",
|
||||
|
||||
307
CHANGELOG.md
307
CHANGELOG.md
@@ -1,5 +1,312 @@
|
||||
# Changelog
|
||||
|
||||
## v1.4.287 (2025-08-14)
|
||||
|
||||
### PR [#1706](https://github.com/danielmiessler/Fabric/pull/1706) by [ksylvan](https://github.com/ksylvan): Gemini Thinking Support and README (New Features) automation
|
||||
|
||||
- Add comprehensive "Recent Major Features" section to README
|
||||
- Introduce new readme_updates Python script for automation
|
||||
- Enable Gemini thinking configuration with token budgets
|
||||
- Update CLI help text for Gemini thinking support
|
||||
- Add comprehensive test coverage for Gemini thinking
|
||||
|
||||
## v1.4.286 (2025-08-14)
|
||||
|
||||
### PR [#1700](https://github.com/danielmiessler/Fabric/pull/1700) by [ksylvan](https://github.com/ksylvan): Introduce Thinking Config Across Anthropic and OpenAI Providers
|
||||
|
||||
- Add --thinking CLI flag for configurable reasoning levels across providers
|
||||
- Implement Anthropic ThinkingConfig with standardized budgets and tokens
|
||||
- Map OpenAI reasoning effort from thinking levels
|
||||
- Show thinking level in dry-run formatted options
|
||||
- Overhaul suggest_pattern docs with categories, workflows, usage examples
|
||||
|
||||
## v1.4.285 (2025-08-13)
|
||||
|
||||
### PR [#1698](https://github.com/danielmiessler/Fabric/pull/1698) by [ksylvan](https://github.com/ksylvan): Enable One Million Token Context Beta Feature for Sonnet-4
|
||||
|
||||
- Chore: upgrade anthropic-sdk-go to v1.9.1 and add beta feature support for context-1m
|
||||
- Add modelBetas map for beta feature configuration
|
||||
- Implement context-1m-2025-08-07 beta for Claude Sonnet 4
|
||||
- Add beta header support with fallback handling
|
||||
- Preserve existing beta headers in OAuth transport
|
||||
|
||||
## v1.4.284 (2025-08-12)
|
||||
|
||||
### PR [#1695](https://github.com/danielmiessler/Fabric/pull/1695) by [ksylvan](https://github.com/ksylvan): Introduce One-Liner Curl Install for Completions
|
||||
|
||||
- Add one-liner curl install method for shell completions without requiring repository cloning
|
||||
- Support downloading completions when files are missing locally with dry-run option for previewing changes
|
||||
- Enable custom download source via environment variable and create temporary directory for downloaded completion files
|
||||
- Add automatic cleanup of temporary files and validate downloaded files are non-empty and not HTML
|
||||
- Improve error handling and standardize logging by routing informational messages to stderr to avoid stdout pollution
|
||||
|
||||
## v1.4.283 (2025-08-12)
|
||||
|
||||
### PR [#1692](https://github.com/danielmiessler/Fabric/pull/1692) by [ksylvan](https://github.com/ksylvan): Add Vendor Selection Support for Models
|
||||
|
||||
- Add -V/--vendor flag to specify model vendor
|
||||
- Implement vendor-aware model resolution and availability validation
|
||||
- Warn on ambiguous models; suggest --vendor to disambiguate
|
||||
- Update bash, zsh, fish completions with vendor suggestions
|
||||
- Extend --listmodels to print vendor|model when interactive
|
||||
|
||||
## v1.4.282 (2025-08-11)
|
||||
|
||||
### PR [#1689](https://github.com/danielmiessler/Fabric/pull/1689) by [ksylvan](https://github.com/ksylvan): Enhanced Shell Completions for Fabric CLI Binaries
|
||||
|
||||
- Add 'fabric-ai' alias support across all shell completions
|
||||
- Use invoked command name for dynamic completion list queries
|
||||
- Refactor fish completions into reusable registrar for multiple commands
|
||||
- Update Bash completion to reference executable via COMP_WORDS[0]
|
||||
- Install completions automatically with new cross-shell setup script
|
||||
|
||||
## v1.4.281 (2025-08-11)
|
||||
|
||||
### PR [#1687](https://github.com/danielmiessler/Fabric/pull/1687) by [ksylvan](https://github.com/ksylvan): Add Web Search Tool Support for Gemini Models
|
||||
|
||||
- Enable Gemini models to use web search tool with --search flag
|
||||
- Add validation for search-location timezone and language code formats
|
||||
- Normalize language codes from underscores to hyphenated form
|
||||
- Append deduplicated web citations under standardized Sources section
|
||||
- Improve robustness for nil candidates and content parts
|
||||
|
||||
## v1.4.280 (2025-08-10)
|
||||
|
||||
### PR [#1686](https://github.com/danielmiessler/Fabric/pull/1686) by [ksylvan](https://github.com/ksylvan): Prevent duplicate text output in OpenAI streaming responses
|
||||
|
||||
- Fix: prevent duplicate text output in OpenAI streaming responses
|
||||
- Skip processing of ResponseOutputTextDone events
|
||||
- Prevent doubled text in stream output
|
||||
- Add clarifying comment about API behavior
|
||||
- Maintain delta chunk streaming functionality
|
||||
|
||||
## v1.4.279 (2025-08-10)
|
||||
|
||||
### PR [#1685](https://github.com/danielmiessler/Fabric/pull/1685) by [ksylvan](https://github.com/ksylvan): Fix Gemini Role Mapping for API Compatibility
|
||||
|
||||
- Fix Gemini role mapping to ensure proper API compatibility by converting chat roles to Gemini's user/model format
|
||||
- Map assistant role to model role per Gemini API constraints
|
||||
- Map system, developer, function, and tool roles to user role for proper handling
|
||||
- Default unrecognized roles to user role to preserve instruction context
|
||||
- Add comprehensive unit tests to validate convertMessages role mapping logic
|
||||
|
||||
## v1.4.278 (2025-08-09)
|
||||
|
||||
### PR [#1681](https://github.com/danielmiessler/Fabric/pull/1681) by [ksylvan](https://github.com/ksylvan): Enhance YouTube Support with Custom yt-dlp Arguments
|
||||
|
||||
- Add `--yt-dlp-args` flag for custom YouTube downloader options with advanced control capabilities
|
||||
- Implement smart subtitle language fallback system when requested locale is unavailable
|
||||
- Add fallback logic for YouTube subtitle language detection with auto-detection of downloaded languages
|
||||
- Replace custom argument parser with shellquote and precompile regexes for improved performance and safety
|
||||
|
||||
## v1.4.277 (2025-08-08)
|
||||
|
||||
### PR [#1679](https://github.com/danielmiessler/Fabric/pull/1679) by [ksylvan](https://github.com/ksylvan): Add cross-platform desktop notifications to Fabric CLI
|
||||
|
||||
- Add cross-platform desktop notifications with secure custom commands
|
||||
- Integrate notification sending into chat processing workflow
|
||||
- Add --notification and --notification-command CLI flags and help
|
||||
- Provide cross-platform providers: macOS, Linux, Windows with fallbacks
|
||||
- Escape shell metacharacters to prevent injection vulnerabilities
|
||||
|
||||
## v1.4.276 (2025-08-08)
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Ci: add write permissions to update_release_notes job
|
||||
|
||||
- Add contents write permission to release notes job
|
||||
|
||||
- Enable GitHub Actions to modify repository contents
|
||||
- Fix potential permission issues during release process
|
||||
|
||||
## v1.4.275 (2025-08-07)
|
||||
|
||||
### PR [#1676](https://github.com/danielmiessler/Fabric/pull/1676) by [ksylvan](https://github.com/ksylvan): Refactor authentication to support GITHUB_TOKEN and GH_TOKEN
|
||||
|
||||
- Refactor: centralize GitHub token retrieval logic into utility function
|
||||
- Support both GITHUB_TOKEN and GH_TOKEN environment variables with fallback handling
|
||||
- Add new util/token.go file for centralized token handling across the application
|
||||
- Update walker.go and main.go to use the new centralized token utility function
|
||||
- Feat: add 'gpt-5' to raw-mode models in OpenAI client to bypass structured chat message formatting
|
||||
|
||||
## v1.4.274 (2025-08-07)
|
||||
|
||||
### PR [#1673](https://github.com/danielmiessler/Fabric/pull/1673) by [ksylvan](https://github.com/ksylvan): Add Support for Claude Opus 4.1 Model
|
||||
|
||||
- Add Claude Opus 4.1 model support
|
||||
- Upgrade anthropic-sdk-go from v1.4.0 to v1.7.0
|
||||
- Fix temperature/topP parameter conflict for models
|
||||
- Refactor release workflow to use shared version job and simplify OS handling
|
||||
- Improve chat parameter defaults handling with domain constants
|
||||
|
||||
## v1.4.273 (2025-08-05)
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Remove redundant words from codebase
|
||||
- Fix typos in t_ patterns
|
||||
|
||||
## v1.4.272 (2025-07-28)
|
||||
|
||||
### PR [#1658](https://github.com/danielmiessler/Fabric/pull/1658) by [ksylvan](https://github.com/ksylvan): Update Release Process for Data Consistency
|
||||
|
||||
- Add database sync before generating changelog in release workflow
|
||||
- Ensure changelog generation includes latest database updates
|
||||
- Update changelog cache database
|
||||
|
||||
## v1.4.271 (2025-07-28)
|
||||
|
||||
### PR [#1657](https://github.com/danielmiessler/Fabric/pull/1657) by [ksylvan](https://github.com/ksylvan): Add GitHub Release Description Update Feature
|
||||
|
||||
- Add GitHub release description update via `--release` flag
|
||||
- Implement `ReleaseManager` for managing release descriptions
|
||||
- Create `release.go` for handling release updates
|
||||
- Update `release.yml` to run changelog generation
|
||||
- Enable AI summary updates for GitHub releases
|
||||
|
||||
## v1.4.270 (2025-07-27)
|
||||
|
||||
### PR [#1654](https://github.com/danielmiessler/Fabric/pull/1654) by [ksylvan](https://github.com/ksylvan): Refine Output File Handling for Safety
|
||||
|
||||
- Fix: prevent file overwrite and improve output messaging in CreateOutputFile
|
||||
- Add file existence check before creating output file
|
||||
- Return error if target file already exists
|
||||
- Change success message to write to stderr
|
||||
- Update message format with brackets for clarity
|
||||
|
||||
## v1.4.269 (2025-07-26)
|
||||
|
||||
### PR [#1653](https://github.com/danielmiessler/Fabric/pull/1653) by [ksylvan](https://github.com/ksylvan): docs: update Gemini TTS model references to gemini-2.5-flash-preview-tts
|
||||
|
||||
- Updated Gemini TTS model references from gemini-2.0-flash-tts to gemini-2.5-flash-preview-tts throughout documentation
|
||||
- Modified documentation examples to use the new gemini-2.5-flash-preview-tts model
|
||||
- Updated voice selection example commands in Gemini-TTS.md
|
||||
- Revised CLI help text example commands to reflect model changes
|
||||
- Updated changelog database binary file
|
||||
|
||||
## v1.4.268 (2025-07-26)
|
||||
|
||||
### PR [#1652](https://github.com/danielmiessler/Fabric/pull/1652) by [ksylvan](https://github.com/ksylvan): Implement Voice Selection for Gemini Text-to-Speech
|
||||
|
||||
- Feat: add Gemini TTS voice selection and listing functionality
|
||||
- Add `--voice` flag for TTS voice selection
|
||||
- Add `--list-gemini-voices` command for voice discovery
|
||||
- Implement voice validation for Gemini TTS models
|
||||
- Update shell completions for voice options
|
||||
|
||||
## v1.4.267 (2025-07-26)
|
||||
|
||||
### PR [#1650](https://github.com/danielmiessler/Fabric/pull/1650) by [ksylvan](https://github.com/ksylvan): Update Gemini Plugin to New SDK with TTS Support
|
||||
|
||||
- Update Gemini SDK to new genai library and add TTS audio output support
|
||||
- Replace deprecated generative-ai-go with google.golang.org/genai library
|
||||
- Add TTS model detection and audio output validation
|
||||
- Implement WAV file generation for TTS audio responses
|
||||
- Add audio format checking utilities in CLI output
|
||||
|
||||
## v1.4.266 (2025-07-25)
|
||||
|
||||
### PR [#1649](https://github.com/danielmiessler/Fabric/pull/1649) by [ksylvan](https://github.com/ksylvan): Fix Conditional API Initialization to Prevent Unnecessary Error Messages
|
||||
|
||||
- Prevent unconfigured API initialization and add Docker test suite
|
||||
- Add BEDROCK_AWS_REGION requirement for Bedrock initialization
|
||||
- Implement IsConfigured check for Ollama API URL
|
||||
- Create comprehensive Docker testing environment with 6 scenarios
|
||||
- Add interactive test runner with shell access
|
||||
|
||||
## v1.4.265 (2025-07-25)
|
||||
|
||||
### PR [#1647](https://github.com/danielmiessler/Fabric/pull/1647) by [ksylvan](https://github.com/ksylvan): Simplify Workflow with Single Version Retrieval Step
|
||||
|
||||
- Replace git tag lookup with version.nix file reading for release workflow
|
||||
- Remove OS-specific git tag retrieval steps and add unified version extraction from nix file
|
||||
- Include version format validation with regex check
|
||||
- Add error handling for missing version file
|
||||
- Consolidate cross-platform version logic into single step with bash shell for consistent version parsing
|
||||
|
||||
## v1.4.264 (2025-07-22)
|
||||
|
||||
### PR [#1642](https://github.com/danielmiessler/Fabric/pull/1642) by [ksylvan](https://github.com/ksylvan): Add --sync-db to `generate_changelog`, plus many fixes
|
||||
|
||||
- Add database synchronization command with comprehensive validation and sync-db flag for database integrity validation
|
||||
- Implement version and commit existence checking methods with enhanced time parsing using RFC3339Nano fallback support
|
||||
- Improve timestamp handling and merge commit detection in changelog generator with comprehensive merge commit detection using parents
|
||||
- Add email field support to PRCommit struct for author information and improve error logging throughout changelog generation
|
||||
- Optimize merge pattern matching with lazy initialization and thread-safe pattern compilation for better performance
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Chore: incoming 1642 changelog entry
|
||||
- Fix: improve error message formatting in version date parsing
|
||||
|
||||
- Add actual error details to date parsing failure message
|
||||
|
||||
- Include error variable in stderr output formatting
|
||||
- Enhance debugging information for invalid date formats
|
||||
- Docs: Update CHANGELOG after v1.4.263
|
||||
|
||||
## v1.4.263 (2025-07-21)
|
||||
|
||||
### PR [#1641](https://github.com/danielmiessler/Fabric/pull/1641) by [ksylvan](https://github.com/ksylvan): Fix Fabric Web timeout error
|
||||
|
||||
- Chore: extend proxy timeout in `vite.config.ts` to 15 minutes
|
||||
- Increase `/api` proxy timeout to 900,000 ms
|
||||
- Increase `/names` proxy timeout to 900,000 ms
|
||||
|
||||
## v1.4.262 (2025-07-21)
|
||||
|
||||
### PR [#1640](https://github.com/danielmiessler/Fabric/pull/1640) by [ksylvan](https://github.com/ksylvan): Implement Automated Changelog System for CI/CD Integration
|
||||
|
||||
- Add automated changelog processing for CI/CD integration with comprehensive test coverage and GitHub client validation methods
|
||||
- Implement release aggregation for incoming files with git operations for staging changes and support for version detection from nix files
|
||||
- Change push behavior from opt-out to opt-in with GitHub token authentication and automatic repository detection
|
||||
- Enhance changelog generation to avoid duplicate commit entries by extracting PR numbers and filtering commits already included via PR files
|
||||
- Add version parameter requirement for PR processing with commit SHA tracking to prevent duplicate entries and improve formatting consistency
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Docs: Update CHANGELOG after v1.4.261
|
||||
|
||||
## v1.4.261 (2025-07-19)
|
||||
|
||||
### PR [#1637](https://github.com/danielmiessler/Fabric/pull/1637) by [ksylvan](https://github.com/ksylvan): chore: update `NeedsRawMode` to include `mistral` prefix for Ollama
|
||||
|
||||
- Updated `NeedsRawMode` to include `mistral` prefix for Ollama compatibility
|
||||
- Added `mistral` to `ollamaPrefixes` list for improved model support
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Updated CHANGELOG after v1.4.260 release
|
||||
|
||||
## v1.4.260 (2025-07-18)
|
||||
|
||||
### PR [#1634](https://github.com/danielmiessler/Fabric/pull/1634) by [ksylvan](https://github.com/ksylvan): Fix abort in Exo-Labs provider plugin; with credit to @sakithahSenid
|
||||
|
||||
- Fix abort issue in Exo-Labs provider plugin
|
||||
- Add API key setup question to Exolab AI plugin configuration
|
||||
- Include API key setup question in Exolab client with required field validation
|
||||
- Add "openaiapi" to VSCode spell check dictionary
|
||||
- Maintain existing API base URL configuration order
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Update CHANGELOG after v1.4.259
|
||||
|
||||
## v1.4.259 (2025-07-18)
|
||||
|
||||
### PR [#1633](https://github.com/danielmiessler/Fabric/pull/1633) by [ksylvan](https://github.com/ksylvan): YouTube VTT Processing Enhancement
|
||||
|
||||
- Fix: prevent duplicate segments in VTT file processing by adding deduplication map to track seen segments
|
||||
- Feat: enhance VTT duplicate filtering to allow legitimate repeated content with configurable time gap detection
|
||||
- Feat: improve timestamp parsing to handle fractional seconds and optional seconds/milliseconds formats
|
||||
- Chore: refactor timestamp regex to global scope and improve performance by avoiding repeated compilation
|
||||
- Fix: Youtube VTT parsing gap test and extract seconds parsing logic into reusable function
|
||||
|
||||
### Direct commits
|
||||
|
||||
- Docs: Update CHANGELOG after v1.4.258
|
||||
|
||||
## v1.4.258 (2025-07-17)
|
||||
|
||||
### PR [#1629](https://github.com/danielmiessler/Fabric/pull/1629) by [ksylvan](https://github.com/ksylvan): Create Default (empty) .env in ~/.config/fabric on Demand
|
||||
|
||||
85
README.md
85
README.md
@@ -47,6 +47,52 @@ It's all really exciting and powerful, but _it's not easy to integrate this func
|
||||
|
||||
Fabric organizes prompts by real-world task, allowing people to create, collect, and organize their most important AI solutions in a single place for use in their favorite tools. And if you're command-line focused, you can use Fabric itself as the interface!
|
||||
|
||||
## Updates
|
||||
|
||||
Dear Users,
|
||||
|
||||
We've been doing so many exciting things here at Fabric, I wanted to give a quick summary here to give you a sense of our development velocity!
|
||||
|
||||
Below are the **new features and capabilities** we've added (newest first):
|
||||
|
||||
### Recent Major Features
|
||||
|
||||
- [v1.4.287](https://github.com/danielmiessler/fabric/releases/tag/v1.4.287) (Aug 16, 2025) — **AI Reasoning**: Add Thinking to Gemini models and introduce `readme_updates` python script
|
||||
- [v1.4.286](https://github.com/danielmiessler/fabric/releases/tag/v1.4.286) (Aug 14, 2025) — **AI Reasoning**: Introduce Thinking Config Across Anthropic and OpenAI Providers
|
||||
- [v1.4.285](https://github.com/danielmiessler/fabric/releases/tag/v1.4.285) (Aug 13, 2025) — **Extended Context**: Enable One Million Token Context Beta Feature for Sonnet-4
|
||||
- [v1.4.284](https://github.com/danielmiessler/fabric/releases/tag/v1.4.284) (Aug 12, 2025) — **Easy Shell Completions Setup**: Introduce One-Liner Curl Install for Completions
|
||||
- [v1.4.283](https://github.com/danielmiessler/fabric/releases/tag/v1.4.283) (Aug 12, 2025) — **Model Management**: Add Vendor Selection Support for Models
|
||||
- [v1.4.282](https://github.com/danielmiessler/fabric/releases/tag/v1.4.282) (Aug 11, 2025) — **Enhanced Shell Completions**: Enhanced Shell Completions for Fabric CLI Binaries
|
||||
- [v1.4.281](https://github.com/danielmiessler/fabric/releases/tag/v1.4.281) (Aug 11, 2025) — **Gemini Search Tool**: Add Web Search Tool Support for Gemini Models
|
||||
- [v1.4.278](https://github.com/danielmiessler/fabric/releases/tag/v1.4.278) (Aug 9, 2025) — **Enhance YouTube Transcripts**: Enhance YouTube Support with Custom yt-dlp Arguments
|
||||
- [v1.4.277](https://github.com/danielmiessler/fabric/releases/tag/v1.4.277) (Aug 8, 2025) — **Desktop Notifications**: Add cross-platform desktop notifications to Fabric CLI
|
||||
- [v1.4.274](https://github.com/danielmiessler/fabric/releases/tag/v1.4.274) (Aug 7, 2025) — **Claude 4.1 Added**: Add Support for Claude Opus 4.1 Model
|
||||
- [v1.4.271](https://github.com/danielmiessler/fabric/releases/tag/v1.4.271) (Jul 28, 2025) — **AI Summarized Release Notes**: Enable AI summary updates for GitHub releases
|
||||
- [v1.4.268](https://github.com/danielmiessler/fabric/releases/tag/v1.4.268) (Jul 26, 2025) — **Gemmini TTS Voice Selection**: add Gemini TTS voice selection and listing functionality
|
||||
- [v1.4.267](https://github.com/danielmiessler/fabric/releases/tag/v1.4.267) (Jul 26, 2025) — **Text-to-Speech**: Update Gemini Plugin to New SDK with TTS Support
|
||||
- [v1.4.258](https://github.com/danielmiessler/fabric/releases/tag/v1.4.258) (Jul 17, 2025) — **Onboarding Improved**: Add startup check to initialize config and .env file automatically
|
||||
- [v1.4.257](https://github.com/danielmiessler/fabric/releases/tag/v1.4.257) (Jul 17, 2025) — **OpenAI Routing Control**: Introduce CLI Flag to Disable OpenAI Responses API
|
||||
- [v1.4.252](https://github.com/danielmiessler/fabric/releases/tag/v1.4.252) (Jul 16, 2025) — **Hide Thinking Block**: Optional Hiding of Model Thinking Process with Configurable Tags
|
||||
- [v1.4.246](https://github.com/danielmiessler/fabric/releases/tag/v1.4.246) (Jul 14, 2025) — **Automatic ChangeLog Updates**: Add AI-powered changelog generation with high-performance Go tool and comprehensive caching
|
||||
- [v1.4.245](https://github.com/danielmiessler/fabric/releases/tag/v1.4.245) (Jul 11, 2025) — **Together AI**: Together AI Support with OpenAI Fallback Mechanism Added
|
||||
- [v1.4.232](https://github.com/danielmiessler/fabric/releases/tag/v1.4.232) (Jul 6, 2025) — **Add Custom**: Add Custom Patterns Directory Support
|
||||
- [v1.4.231](https://github.com/danielmiessler/fabric/releases/tag/v1.4.231) (Jul 5, 2025) — **OAuth Auto-Auth**: OAuth Authentication Support for Anthropic (Use your Max Subscription)
|
||||
- [v1.4.230](https://github.com/danielmiessler/fabric/releases/tag/v1.4.230) (Jul 5, 2025) — **Model Management**: Add advanced image generation parameters for OpenAI models with four new CLI flags
|
||||
- [v1.4.227](https://github.com/danielmiessler/fabric/releases/tag/v1.4.227) (Jul 4, 2025) — **Add Image**: Add Image Generation Support to Fabric
|
||||
- [v1.4.226](https://github.com/danielmiessler/fabric/releases/tag/v1.4.226) (Jul 4, 2025) — **Web Search**: OpenAI Plugin Now Supports Web Search Functionality
|
||||
- [v1.4.225](https://github.com/danielmiessler/fabric/releases/tag/v1.4.225) (Jul 4, 2025) — **Web Search**: Runtime Web Search Control via Command-Line `--search` Flag
|
||||
- [v1.4.224](https://github.com/danielmiessler/fabric/releases/tag/v1.4.224) (Jul 1, 2025) — **Add code_review**: Add code_review pattern and updates in Pattern_Descriptions
|
||||
- [v1.4.222](https://github.com/danielmiessler/fabric/releases/tag/v1.4.222) (Jul 1, 2025) — **OpenAI Plugin**: OpenAI Plugin Migrates to New Responses API
|
||||
- [v1.4.218](https://github.com/danielmiessler/fabric/releases/tag/v1.4.218) (Jun 27, 2025) — **Model Management**: Add Support for OpenAI Search and Research Model Variants
|
||||
- [v1.4.217](https://github.com/danielmiessler/fabric/releases/tag/v1.4.217) (Jun 26, 2025) — **New YouTube**: New YouTube Transcript Endpoint Added to REST API
|
||||
- [v1.4.212](https://github.com/danielmiessler/fabric/releases/tag/v1.4.212) (Jun 23, 2025) — **Add Langdock**: Add Langdock AI and enhance generic OpenAI compatible support
|
||||
- [v1.4.211](https://github.com/danielmiessler/fabric/releases/tag/v1.4.211) (Jun 19, 2025) — **REST API**: REST API and Web UI Now Support Dynamic Pattern Variables
|
||||
- [v1.4.210](https://github.com/danielmiessler/fabric/releases/tag/v1.4.210) (Jun 18, 2025) — **Add Citations**: Add Citation Support to Perplexity Response
|
||||
- [v1.4.208](https://github.com/danielmiessler/fabric/releases/tag/v1.4.208) (Jun 17, 2025) — **Add Perplexity**: Add Perplexity AI Provider with Token Limits Support
|
||||
- [v1.4.203](https://github.com/danielmiessler/fabric/releases/tag/v1.4.203) (Jun 14, 2025) — **Add Amazon Bedrock**: Add support for Amazon Bedrock
|
||||
|
||||
These features represent our commitment to making Fabric the most powerful and flexible AI augmentation framework available!
|
||||
|
||||
## Intro videos
|
||||
|
||||
Keep in mind that many of these were recorded when Fabric was Python-based, so remember to use the current [install instructions](#installation) below.
|
||||
@@ -60,9 +106,11 @@ Keep in mind that many of these were recorded when Fabric was Python-based, so r
|
||||
|
||||
- [`fabric`](#fabric)
|
||||
- [What and why](#what-and-why)
|
||||
- [Updates](#updates)
|
||||
- [Recent Major Features](#recent-major-features)
|
||||
- [Intro videos](#intro-videos)
|
||||
- [Navigation](#navigation)
|
||||
- [Updates](#updates)
|
||||
- [Changelog](#changelog)
|
||||
- [Philosophy](#philosophy)
|
||||
- [Breaking problems into components](#breaking-problems-into-components)
|
||||
- [Too many prompts](#too-many-prompts)
|
||||
@@ -84,6 +132,7 @@ Keep in mind that many of these were recorded when Fabric was Python-based, so r
|
||||
- [Migration](#migration)
|
||||
- [Upgrading](#upgrading)
|
||||
- [Shell Completions](#shell-completions)
|
||||
- [Quick install (no clone required)](#quick-install-no-clone-required)
|
||||
- [Zsh Completion](#zsh-completion)
|
||||
- [Bash Completion](#bash-completion)
|
||||
- [Fish Completion](#fish-completion)
|
||||
@@ -111,7 +160,7 @@ Keep in mind that many of these were recorded when Fabric was Python-based, so r
|
||||
|
||||
<br />
|
||||
|
||||
## Updates
|
||||
## Changelog
|
||||
|
||||
Fabric is evolving rapidly.
|
||||
|
||||
@@ -428,6 +477,25 @@ Fabric provides shell completion scripts for Zsh, Bash, and Fish
|
||||
shells, making it easier to use the CLI by providing tab completion
|
||||
for commands and options.
|
||||
|
||||
#### Quick install (no clone required)
|
||||
|
||||
You can install completions directly via a one-liner:
|
||||
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/danielmiessler/Fabric/refs/heads/main/completions/setup-completions.sh | sh
|
||||
```
|
||||
|
||||
Optional variants:
|
||||
|
||||
```bash
|
||||
# Dry-run (see actions without changing your system)
|
||||
curl -fsSL https://raw.githubusercontent.com/danielmiessler/Fabric/refs/heads/main/completions/setup-completions.sh | sh -s -- --dry-run
|
||||
|
||||
# Override the download source (advanced)
|
||||
FABRIC_COMPLETIONS_BASE_URL="https://raw.githubusercontent.com/danielmiessler/Fabric/refs/heads/main/completions" \
|
||||
sh -c "$(curl -fsSL https://raw.githubusercontent.com/danielmiessler/Fabric/refs/heads/main/completions/setup-completions.sh)"
|
||||
```
|
||||
|
||||
#### Zsh Completion
|
||||
|
||||
To enable Zsh completion:
|
||||
@@ -498,6 +566,7 @@ Application Options:
|
||||
-U, --updatepatterns Update patterns
|
||||
-c, --copy Copy to clipboard
|
||||
-m, --model= Choose model
|
||||
-V, --vendor= Specify vendor for chosen model (e.g., -V "LM Studio" -m openai/gpt-oss-20b)
|
||||
--modelContextLength= Model context length (only affects ollama)
|
||||
-o, --output= Output to file
|
||||
--output-session Output the entire session (also a temporary one) to the output file
|
||||
@@ -536,7 +605,7 @@ Application Options:
|
||||
--liststrategies List all strategies
|
||||
--listvendors List all vendors
|
||||
--shell-complete-list Output raw list without headers/formatting (for shell completion)
|
||||
--search Enable web search tool for supported models (Anthropic, OpenAI)
|
||||
--search Enable web search tool for supported models (Anthropic, OpenAI, Gemini)
|
||||
--search-location= Set location for web search results (e.g., 'America/Los_Angeles')
|
||||
--image-file= Save generated image to specified file path (e.g., 'output.png')
|
||||
--image-size= Image dimensions: 1024x1024, 1536x1024, 1024x1536, auto (default: auto)
|
||||
@@ -548,7 +617,15 @@ Application Options:
|
||||
--think-start-tag= Start tag for thinking sections (default: <think>)
|
||||
--think-end-tag= End tag for thinking sections (default: </think>)
|
||||
--disable-responses-api Disable OpenAI Responses API (default: false)
|
||||
|
||||
--voice= TTS voice name for supported models (e.g., Kore, Charon, Puck)
|
||||
(default: Kore)
|
||||
--list-gemini-voices List all available Gemini TTS voices
|
||||
--notification Send desktop notification when command completes
|
||||
--notification-command= Custom command to run for notifications (overrides built-in
|
||||
notifications)
|
||||
--yt-dlp-args= Additional arguments to pass to yt-dlp (e.g. '--cookies-from-browser brave')
|
||||
--thinking= Set reasoning/thinking level (e.g., off, low, medium, high, or
|
||||
numeric tokens for Anthropic or Google Gemini)
|
||||
Help Options:
|
||||
-h, --help Show this help message
|
||||
```
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
package main
|
||||
|
||||
var version = "v1.4.259"
|
||||
var version = "v1.4.287"
|
||||
|
||||
@@ -101,6 +101,7 @@ generate_changelog --cache /path/to/cache.db
|
||||
| `--force-pr-sync` | | Force a full PR sync from GitHub | false |
|
||||
| `--token` | | GitHub API token | `$GITHUB_TOKEN` |
|
||||
| `--ai-summarize` | | Generate AI-enhanced summaries using Fabric | false |
|
||||
| `--release` | | Update GitHub release description with AI summary for version | |
|
||||
|
||||
## Output Format
|
||||
|
||||
|
||||
Binary file not shown.
30
cmd/generate_changelog/internal/cache/cache.go
vendored
30
cmd/generate_changelog/internal/cache/cache.go
vendored
@@ -4,6 +4,7 @@ import (
|
||||
"database/sql"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"time"
|
||||
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/git"
|
||||
@@ -201,7 +202,14 @@ func (c *Cache) GetVersions() (map[string]*git.Version, error) {
|
||||
}
|
||||
|
||||
if dateStr.Valid {
|
||||
v.Date, _ = time.Parse(time.RFC3339, dateStr.String)
|
||||
// Try RFC3339Nano first (for nanosecond precision), then fall back to RFC3339
|
||||
v.Date, err = time.Parse(time.RFC3339Nano, dateStr.String)
|
||||
if err != nil {
|
||||
v.Date, err = time.Parse(time.RFC3339, dateStr.String)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error parsing date '%s' for version '%s': %v. Expected format: RFC3339 or RFC3339Nano.\n", dateStr.String, v.Name, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if prNumbersJSON != "" {
|
||||
@@ -260,6 +268,26 @@ func (c *Cache) Clear() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
// VersionExists checks if a version already exists in the cache
|
||||
func (c *Cache) VersionExists(version string) (bool, error) {
|
||||
var count int
|
||||
err := c.db.QueryRow("SELECT COUNT(*) FROM versions WHERE name = ?", version).Scan(&count)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return count > 0, nil
|
||||
}
|
||||
|
||||
// CommitExists checks if a commit already exists in the cache
|
||||
func (c *Cache) CommitExists(hash string) (bool, error) {
|
||||
var count int
|
||||
err := c.db.QueryRow("SELECT COUNT(*) FROM commits WHERE sha = ?", hash).Scan(&count)
|
||||
if err != nil {
|
||||
return false, err
|
||||
}
|
||||
return count > 0, nil
|
||||
}
|
||||
|
||||
// GetLastPRSync returns the timestamp of the last PR sync
|
||||
func (c *Cache) GetLastPRSync() (time.Time, error) {
|
||||
var timestamp string
|
||||
|
||||
@@ -65,7 +65,7 @@ func (g *Generator) Generate() (string, error) {
|
||||
return "", fmt.Errorf("failed to collect data: %w", err)
|
||||
}
|
||||
|
||||
if err := g.fetchPRs(); err != nil {
|
||||
if err := g.fetchPRs(g.cfg.ForcePRSync); err != nil {
|
||||
return "", fmt.Errorf("failed to fetch PRs: %w", err)
|
||||
}
|
||||
|
||||
@@ -193,7 +193,7 @@ func (g *Generator) collectData() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (g *Generator) fetchPRs() error {
|
||||
func (g *Generator) fetchPRs(forcePRSync bool) error {
|
||||
// First, load all cached PRs
|
||||
if g.cache != nil {
|
||||
cachedPRs, err := g.cache.GetAllPRs()
|
||||
@@ -229,7 +229,7 @@ func (g *Generator) fetchPRs() error {
|
||||
}
|
||||
// If we have never synced or it's been more than 24 hours, do a full sync
|
||||
// Also sync if we have versions with PR numbers that aren't cached
|
||||
needsSync := lastSync.IsZero() || time.Since(lastSync) > 24*time.Hour || g.cfg.ForcePRSync || missingPRs
|
||||
needsSync := lastSync.IsZero() || time.Since(lastSync) > 24*time.Hour || forcePRSync || missingPRs
|
||||
|
||||
if !needsSync {
|
||||
fmt.Fprintf(os.Stderr, "Using cached PR data (last sync: %s)\n", lastSync.Format("2006-01-02 15:04:05"))
|
||||
@@ -697,3 +697,109 @@ func hashContent(content string) string {
|
||||
hash := sha256.Sum256([]byte(content))
|
||||
return fmt.Sprintf("%x", hash)
|
||||
}
|
||||
|
||||
// SyncDatabase performs a comprehensive database synchronization and validation
|
||||
func (g *Generator) SyncDatabase() error {
|
||||
if g.cache == nil {
|
||||
return fmt.Errorf("cache is disabled, cannot sync database")
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "[SYNC] Starting database synchronization...\n")
|
||||
|
||||
// Step 1: Force PR sync (pass true explicitly)
|
||||
fmt.Fprintf(os.Stderr, "[PR_SYNC] Forcing PR sync from GitHub...\n")
|
||||
if err := g.fetchPRs(true); err != nil {
|
||||
return fmt.Errorf("failed to sync PRs: %w", err)
|
||||
}
|
||||
|
||||
// Step 2: Rebuild git history and verify versions/commits completeness
|
||||
fmt.Fprintf(os.Stderr, "[VERIFY] Verifying git history and version completeness...\n")
|
||||
if err := g.syncGitHistory(); err != nil {
|
||||
return fmt.Errorf("failed to sync git history: %w", err)
|
||||
}
|
||||
|
||||
// Step 3: Verify commit-PR mappings
|
||||
fmt.Fprintf(os.Stderr, "[MAPPING] Verifying commit-PR mappings...\n")
|
||||
if err := g.verifyCommitPRMappings(); err != nil {
|
||||
return fmt.Errorf("failed to verify commit-PR mappings: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, "[SUCCESS] Database synchronization completed successfully!\n")
|
||||
return nil
|
||||
}
|
||||
|
||||
// syncGitHistory walks the complete git history and ensures all versions and commits are cached
|
||||
func (g *Generator) syncGitHistory() error {
|
||||
// Walk complete git history (reuse existing logic)
|
||||
versions, err := g.gitWalker.WalkHistory()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to walk git history: %w", err)
|
||||
}
|
||||
|
||||
// Save only new versions and commits (preserve existing data)
|
||||
var newVersions, newCommits int
|
||||
for _, version := range versions {
|
||||
// Only save version if it doesn't exist
|
||||
exists, err := g.cache.VersionExists(version.Name)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Failed to check existence of version %s: %v. This may affect the completeness of the sync operation.\n", version.Name, err)
|
||||
continue
|
||||
}
|
||||
if !exists {
|
||||
if err := g.cache.SaveVersion(version); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Failed to save version %s: %v\n", version.Name, err)
|
||||
} else {
|
||||
newVersions++
|
||||
}
|
||||
}
|
||||
|
||||
// Only save commits that don't exist
|
||||
for _, commit := range version.Commits {
|
||||
exists, err := g.cache.CommitExists(commit.SHA)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Failed to check commit %s existence: %v\n", commit.SHA, err)
|
||||
continue
|
||||
}
|
||||
if !exists {
|
||||
if err := g.cache.SaveCommit(commit, version.Name); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Failed to save commit %s: %v\n", commit.SHA, err)
|
||||
} else {
|
||||
newCommits++
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Update last processed tag
|
||||
if latestTag, err := g.gitWalker.GetLatestTag(); err == nil && latestTag != "" {
|
||||
if err := g.cache.SetLastProcessedTag(latestTag); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Failed to update last processed tag: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, " Added %d new versions and %d new commits (preserved existing data)\n", newVersions, newCommits)
|
||||
return nil
|
||||
}
|
||||
|
||||
// verifyCommitPRMappings ensures all PR commits have proper mappings
|
||||
func (g *Generator) verifyCommitPRMappings() error {
|
||||
// Get all cached PRs
|
||||
allPRs, err := g.cache.GetAllPRs()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get cached PRs: %w", err)
|
||||
}
|
||||
|
||||
// Convert to slice for batch operations (reuse existing logic)
|
||||
var prSlice []*github.PR
|
||||
for _, pr := range allPRs {
|
||||
prSlice = append(prSlice, pr)
|
||||
}
|
||||
|
||||
// Save commit-PR mappings (reuse existing logic)
|
||||
if err := g.cache.SaveCommitPRMappings(prSlice); err != nil {
|
||||
return fmt.Errorf("failed to save commit-PR mappings: %w", err)
|
||||
}
|
||||
|
||||
fmt.Fprintf(os.Stderr, " Verified mappings for %d PRs\n", len(prSlice))
|
||||
return nil
|
||||
}
|
||||
|
||||
115
cmd/generate_changelog/internal/changelog/generator_test.go
Normal file
115
cmd/generate_changelog/internal/changelog/generator_test.go
Normal file
@@ -0,0 +1,115 @@
|
||||
package changelog
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"testing"
|
||||
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/config"
|
||||
)
|
||||
|
||||
func TestDetectVersionFromNix(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
|
||||
t.Run("version.nix exists", func(t *testing.T) {
|
||||
versionNixContent := `"1.2.3"`
|
||||
versionNixPath := filepath.Join(tempDir, "version.nix")
|
||||
err := os.WriteFile(versionNixPath, []byte(versionNixContent), 0644)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write version.nix: %v", err)
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(versionNixPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read version.nix: %v", err)
|
||||
}
|
||||
|
||||
versionRegex := regexp.MustCompile(`"([^"]+)"`)
|
||||
matches := versionRegex.FindStringSubmatch(string(data))
|
||||
|
||||
if len(matches) <= 1 {
|
||||
t.Fatalf("No version found in version.nix")
|
||||
}
|
||||
|
||||
version := matches[1]
|
||||
if version != "1.2.3" {
|
||||
t.Errorf("Expected version 1.2.3, got %s", version)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
func TestEnsureIncomingDir(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
incomingDir := filepath.Join(tempDir, "incoming")
|
||||
|
||||
cfg := &config.Config{
|
||||
IncomingDir: incomingDir,
|
||||
}
|
||||
|
||||
g := &Generator{cfg: cfg}
|
||||
|
||||
err := g.ensureIncomingDir()
|
||||
if err != nil {
|
||||
t.Fatalf("ensureIncomingDir failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(incomingDir); os.IsNotExist(err) {
|
||||
t.Errorf("Incoming directory was not created")
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertVersionAtTop(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
changelogPath := filepath.Join(tempDir, "CHANGELOG.md")
|
||||
|
||||
cfg := &config.Config{
|
||||
RepoPath: tempDir,
|
||||
}
|
||||
|
||||
g := &Generator{cfg: cfg}
|
||||
|
||||
t.Run("new changelog", func(t *testing.T) {
|
||||
entry := "## v1.0.0 (2025-01-01)\n\n- Initial release"
|
||||
|
||||
err := g.insertVersionAtTop(entry)
|
||||
if err != nil {
|
||||
t.Fatalf("insertVersionAtTop failed: %v", err)
|
||||
}
|
||||
|
||||
content, err := os.ReadFile(changelogPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read changelog: %v", err)
|
||||
}
|
||||
|
||||
expected := "# Changelog\n\n## v1.0.0 (2025-01-01)\n\n- Initial release\n"
|
||||
if string(content) != expected {
|
||||
t.Errorf("Expected:\n%s\nGot:\n%s", expected, string(content))
|
||||
}
|
||||
})
|
||||
|
||||
t.Run("existing changelog", func(t *testing.T) {
|
||||
existingContent := "# Changelog\n\n## v0.9.0 (2024-12-01)\n\n- Previous release"
|
||||
err := os.WriteFile(changelogPath, []byte(existingContent), 0644)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to write existing changelog: %v", err)
|
||||
}
|
||||
|
||||
entry := "## v1.0.0 (2025-01-01)\n\n- New release"
|
||||
|
||||
err = g.insertVersionAtTop(entry)
|
||||
if err != nil {
|
||||
t.Fatalf("insertVersionAtTop failed: %v", err)
|
||||
}
|
||||
|
||||
content, err := os.ReadFile(changelogPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read changelog: %v", err)
|
||||
}
|
||||
|
||||
expected := "# Changelog\n\n## v1.0.0 (2025-01-01)\n\n- New release\n## v0.9.0 (2024-12-01)\n\n- Previous release"
|
||||
if string(content) != expected {
|
||||
t.Errorf("Expected:\n%s\nGot:\n%s", expected, string(content))
|
||||
}
|
||||
})
|
||||
}
|
||||
@@ -0,0 +1,82 @@
|
||||
package changelog
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/github"
|
||||
)
|
||||
|
||||
func TestIsMergeCommit(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
commit github.PRCommit
|
||||
expected bool
|
||||
}{
|
||||
{
|
||||
name: "Regular commit with single parent",
|
||||
commit: github.PRCommit{
|
||||
SHA: "abc123",
|
||||
Message: "Fix bug in user authentication",
|
||||
Author: "John Doe",
|
||||
Date: time.Now(),
|
||||
Parents: []string{"def456"},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
{
|
||||
name: "Merge commit with multiple parents",
|
||||
commit: github.PRCommit{
|
||||
SHA: "abc123",
|
||||
Message: "Merge pull request #42 from feature/auth",
|
||||
Author: "GitHub",
|
||||
Date: time.Now(),
|
||||
Parents: []string{"def456", "ghi789"},
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Merge commit detected by message pattern only",
|
||||
commit: github.PRCommit{
|
||||
SHA: "abc123",
|
||||
Message: "Merge pull request #123 from user/feature-branch",
|
||||
Author: "GitHub",
|
||||
Date: time.Now(),
|
||||
Parents: []string{}, // Empty parents - fallback to message detection
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Merge branch commit pattern",
|
||||
commit: github.PRCommit{
|
||||
SHA: "abc123",
|
||||
Message: "Merge branch 'feature' into main",
|
||||
Author: "Developer",
|
||||
Date: time.Now(),
|
||||
Parents: []string{"def456"}, // Single parent but merge pattern
|
||||
},
|
||||
expected: true,
|
||||
},
|
||||
{
|
||||
name: "Regular commit with no merge patterns",
|
||||
commit: github.PRCommit{
|
||||
SHA: "abc123",
|
||||
Message: "Add new feature for user management",
|
||||
Author: "Jane Doe",
|
||||
Date: time.Now(),
|
||||
Parents: []string{"def456"},
|
||||
},
|
||||
expected: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
result := isMergeCommit(tt.commit)
|
||||
if result != tt.expected {
|
||||
t.Errorf("isMergeCommit() = %v, expected %v for commit: %s",
|
||||
result, tt.expected, tt.commit.Message)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
521
cmd/generate_changelog/internal/changelog/processing.go
Normal file
521
cmd/generate_changelog/internal/changelog/processing.go
Normal file
@@ -0,0 +1,521 @@
|
||||
package changelog
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/git"
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/github"
|
||||
)
|
||||
|
||||
var (
|
||||
mergePatterns []*regexp.Regexp
|
||||
mergePatternsOnce sync.Once
|
||||
)
|
||||
|
||||
// getMergePatterns returns the compiled merge patterns, initializing them lazily
|
||||
func getMergePatterns() []*regexp.Regexp {
|
||||
mergePatternsOnce.Do(func() {
|
||||
mergePatterns = []*regexp.Regexp{
|
||||
regexp.MustCompile(`^Merge pull request #\d+`), // "Merge pull request #123 from..."
|
||||
regexp.MustCompile(`^Merge branch '.*' into .*`), // "Merge branch 'feature' into main"
|
||||
regexp.MustCompile(`^Merge remote-tracking branch`), // "Merge remote-tracking branch..."
|
||||
regexp.MustCompile(`^Merge '.*' into .*`), // "Merge 'feature' into main"
|
||||
}
|
||||
})
|
||||
return mergePatterns
|
||||
}
|
||||
|
||||
// isMergeCommit determines if a commit is a merge commit based on its parents and message patterns.
|
||||
func isMergeCommit(commit github.PRCommit) bool {
|
||||
// Primary method: Check parent count (merge commits have multiple parents)
|
||||
if len(commit.Parents) > 1 {
|
||||
return true
|
||||
}
|
||||
|
||||
// Fallback method: Check commit message patterns
|
||||
mergePatterns := getMergePatterns()
|
||||
for _, pattern := range mergePatterns {
|
||||
if pattern.MatchString(commit.Message) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// calculateVersionDate determines the version date based on the most recent commit date from the provided PRs.
|
||||
//
|
||||
// If no valid commit dates are found, the function falls back to the current time.
|
||||
// The function iterates through the provided PRs and their associated commits, comparing commit dates
|
||||
// to identify the most recent one. If a valid date is found, it is returned; otherwise, the fallback is used.
|
||||
func calculateVersionDate(fetchedPRs []*github.PR) time.Time {
|
||||
versionDate := time.Now() // fallback to current time
|
||||
if len(fetchedPRs) > 0 {
|
||||
var mostRecentCommitDate time.Time
|
||||
for _, pr := range fetchedPRs {
|
||||
for _, commit := range pr.Commits {
|
||||
if commit.Date.After(mostRecentCommitDate) {
|
||||
mostRecentCommitDate = commit.Date
|
||||
}
|
||||
}
|
||||
}
|
||||
if !mostRecentCommitDate.IsZero() {
|
||||
versionDate = mostRecentCommitDate
|
||||
}
|
||||
}
|
||||
return versionDate
|
||||
}
|
||||
|
||||
// ProcessIncomingPR processes a single PR for changelog entry creation
|
||||
func (g *Generator) ProcessIncomingPR(prNumber int) error {
|
||||
if err := g.validatePRState(prNumber); err != nil {
|
||||
return fmt.Errorf("PR validation failed: %w", err)
|
||||
}
|
||||
|
||||
if err := g.validateGitStatus(); err != nil {
|
||||
return fmt.Errorf("git status validation failed: %w", err)
|
||||
}
|
||||
|
||||
// Now fetch the full PR with commits for content generation
|
||||
pr, err := g.ghClient.GetPRWithCommits(prNumber)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch PR %d: %w", prNumber, err)
|
||||
}
|
||||
|
||||
content := g.formatPR(pr)
|
||||
|
||||
if g.cfg.EnableAISummary {
|
||||
aiContent, err := SummarizeVersionContent(content)
|
||||
if err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: AI summarization failed: %v\n", err)
|
||||
} else if !checkForAIError(aiContent) {
|
||||
content = strings.TrimSpace(aiContent)
|
||||
}
|
||||
}
|
||||
|
||||
if err := g.ensureIncomingDir(); err != nil {
|
||||
return fmt.Errorf("failed to create incoming directory: %w", err)
|
||||
}
|
||||
|
||||
filename := filepath.Join(g.cfg.IncomingDir, fmt.Sprintf("%d.txt", prNumber))
|
||||
|
||||
// Ensure content ends with a single newline
|
||||
content = strings.TrimSpace(content) + "\n"
|
||||
|
||||
if err := os.WriteFile(filename, []byte(content), 0644); err != nil {
|
||||
return fmt.Errorf("failed to write incoming file: %w", err)
|
||||
}
|
||||
|
||||
if err := g.commitAndPushIncoming(prNumber, filename); err != nil {
|
||||
return fmt.Errorf("failed to commit and push: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully created incoming changelog entry: %s\n", filename)
|
||||
return nil
|
||||
}
|
||||
|
||||
// CreateNewChangelogEntry aggregates all incoming PR files for release and includes direct commits
|
||||
func (g *Generator) CreateNewChangelogEntry(version string) error {
|
||||
files, err := filepath.Glob(filepath.Join(g.cfg.IncomingDir, "*.txt"))
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to scan incoming directory: %w", err)
|
||||
}
|
||||
|
||||
var content strings.Builder
|
||||
var processingErrors []string
|
||||
|
||||
// First, aggregate all incoming PR files
|
||||
for _, file := range files {
|
||||
data, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
processingErrors = append(processingErrors, fmt.Sprintf("failed to read %s: %v", file, err))
|
||||
continue // Continue to attempt processing other files
|
||||
}
|
||||
content.WriteString(string(data))
|
||||
// Note: No extra newline needed here as each incoming file already ends with a newline
|
||||
}
|
||||
|
||||
if len(processingErrors) > 0 {
|
||||
return fmt.Errorf("encountered errors while processing incoming files: %s", strings.Join(processingErrors, "; "))
|
||||
}
|
||||
|
||||
// Extract PR numbers and their commit SHAs from processed files to avoid including their commits as "direct"
|
||||
processedPRs := make(map[int]bool)
|
||||
processedCommitSHAs := make(map[string]bool)
|
||||
var fetchedPRs []*github.PR
|
||||
var prNumbers []int
|
||||
|
||||
for _, file := range files {
|
||||
// Extract PR number from filename (e.g., "1640.txt" -> 1640)
|
||||
filename := filepath.Base(file)
|
||||
if prNumStr := strings.TrimSuffix(filename, ".txt"); prNumStr != filename {
|
||||
if prNum, err := strconv.Atoi(prNumStr); err == nil {
|
||||
processedPRs[prNum] = true
|
||||
prNumbers = append(prNumbers, prNum)
|
||||
|
||||
// Fetch the PR to get its commit SHAs
|
||||
if pr, err := g.ghClient.GetPRWithCommits(prNum); err == nil {
|
||||
fetchedPRs = append(fetchedPRs, pr)
|
||||
for _, commit := range pr.Commits {
|
||||
processedCommitSHAs[commit.SHA] = true
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Now add direct commits since the last release, excluding commits from processed PRs
|
||||
directCommitsContent, err := g.getDirectCommitsSinceLastRelease(processedPRs, processedCommitSHAs)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get direct commits since last release: %w", err)
|
||||
}
|
||||
content.WriteString(directCommitsContent)
|
||||
|
||||
// Check if we have any content at all
|
||||
if content.Len() == 0 {
|
||||
if len(files) == 0 {
|
||||
fmt.Fprintf(os.Stderr, "No incoming PR files found in %s and no direct commits since last release\n", g.cfg.IncomingDir)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "No content found in incoming files and no direct commits since last release\n")
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// Calculate the version date for the changelog entry as the most recent commit date from processed PRs
|
||||
versionDate := calculateVersionDate(fetchedPRs)
|
||||
|
||||
entry := fmt.Sprintf("## %s (%s)\n\n%s",
|
||||
version, versionDate.Format("2006-01-02"), strings.TrimLeft(content.String(), "\n"))
|
||||
|
||||
if err := g.insertVersionAtTop(entry); err != nil {
|
||||
return fmt.Errorf("failed to update CHANGELOG.md: %w", err)
|
||||
}
|
||||
|
||||
if g.cache != nil {
|
||||
// Cache the fetched PRs using the same logic as normal changelog generation
|
||||
if len(fetchedPRs) > 0 {
|
||||
// Save PRs to cache
|
||||
if err := g.cache.SavePRBatch(fetchedPRs); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Failed to save PR batch to cache: %v\n", err)
|
||||
}
|
||||
|
||||
// Save SHA→PR mappings for lightning-fast git operations
|
||||
if err := g.cache.SaveCommitPRMappings(fetchedPRs); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Failed to cache commit mappings: %v\n", err)
|
||||
}
|
||||
|
||||
// Save individual commits to cache for each PR
|
||||
for _, pr := range fetchedPRs {
|
||||
for _, commit := range pr.Commits {
|
||||
// Use actual commit timestamp, with fallback to current time if invalid
|
||||
commitDate := commit.Date
|
||||
if commitDate.IsZero() {
|
||||
commitDate = time.Now()
|
||||
fmt.Fprintf(os.Stderr, "Warning: Commit %s has invalid timestamp, using current time as fallback\n", commit.SHA)
|
||||
}
|
||||
|
||||
// Convert github.PRCommit to git.Commit
|
||||
gitCommit := &git.Commit{
|
||||
SHA: commit.SHA,
|
||||
Message: commit.Message,
|
||||
Author: commit.Author,
|
||||
Email: commit.Email, // Use email from GitHub API
|
||||
Date: commitDate, // Use actual commit timestamp from GitHub API
|
||||
IsMerge: isMergeCommit(commit), // Detect merge commits using parents and message patterns
|
||||
PRNumber: pr.Number,
|
||||
}
|
||||
if err := g.cache.SaveCommit(gitCommit, version); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Failed to save commit %s to cache: %v\n", commit.SHA, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Create a proper new version entry for the database
|
||||
newVersionEntry := &git.Version{
|
||||
Name: version,
|
||||
Date: versionDate, // Use most recent commit date instead of current time
|
||||
CommitSHA: "", // Will be set when the release commit is made
|
||||
PRNumbers: prNumbers, // Now we have the actual PR numbers
|
||||
AISummary: content.String(),
|
||||
}
|
||||
|
||||
if err := g.cache.SaveVersion(newVersionEntry); err != nil {
|
||||
return fmt.Errorf("failed to save new version entry to database: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
for _, file := range files {
|
||||
// Convert to relative path for git operations
|
||||
relativeFile, err := filepath.Rel(g.cfg.RepoPath, file)
|
||||
if err != nil {
|
||||
relativeFile = file
|
||||
}
|
||||
|
||||
// Use git remove to handle both filesystem and git index
|
||||
if err := g.gitWalker.RemoveFile(relativeFile); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Failed to remove %s from git index: %v\n", relativeFile, err)
|
||||
// Fallback to filesystem-only removal
|
||||
if err := os.Remove(file); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: Failed to remove %s from the filesystem after failing to remove it from the git index.\n", relativeFile)
|
||||
fmt.Fprintf(os.Stderr, "Filesystem error: %v\n", err)
|
||||
fmt.Fprintf(os.Stderr, "Manual intervention required:\n")
|
||||
fmt.Fprintf(os.Stderr, " 1. Remove the file %s manually (using the OS-specific command)\n", file)
|
||||
fmt.Fprintf(os.Stderr, " 2. Remove from git index: git rm --cached %s\n", relativeFile)
|
||||
fmt.Fprintf(os.Stderr, " 3. Or reset git index: git reset HEAD %s\n", relativeFile)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if err := g.stageChangesForRelease(); err != nil {
|
||||
return fmt.Errorf("critical: failed to stage changes for release: %w", err)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully processed %d incoming PR files for version %s\n", len(files), version)
|
||||
return nil
|
||||
}
|
||||
|
||||
// getDirectCommitsSinceLastRelease gets all direct commits (not part of PRs) since the last release
|
||||
func (g *Generator) getDirectCommitsSinceLastRelease(processedPRs map[int]bool, processedCommitSHAs map[string]bool) (string, error) {
|
||||
// Get the latest tag to determine what commits are unreleased
|
||||
latestTag, err := g.gitWalker.GetLatestTag()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get latest tag: %w", err)
|
||||
}
|
||||
|
||||
// Get all commits since the latest tag
|
||||
unreleasedVersion, err := g.gitWalker.WalkCommitsSinceTag(latestTag)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to walk commits since tag %s: %w", latestTag, err)
|
||||
}
|
||||
|
||||
if unreleasedVersion == nil || len(unreleasedVersion.Commits) == 0 {
|
||||
return "", nil // No unreleased commits
|
||||
}
|
||||
|
||||
// Filter out commits that are part of PRs (we already have those from incoming files)
|
||||
// and format the direct commits
|
||||
var directCommits []*git.Commit
|
||||
for _, commit := range unreleasedVersion.Commits {
|
||||
// Skip version bump commits
|
||||
if commit.IsVersion {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip commits that belong to PRs we've already processed from incoming files (by PR number)
|
||||
if commit.PRNumber > 0 && processedPRs[commit.PRNumber] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Skip commits whose SHA is already included in processed PRs (this catches commits
|
||||
// that might not have been detected as part of a PR but are actually in the PR)
|
||||
if processedCommitSHAs[commit.SHA] {
|
||||
continue
|
||||
}
|
||||
|
||||
// Only include commits that are NOT part of any PR (direct commits)
|
||||
if commit.PRNumber == 0 {
|
||||
directCommits = append(directCommits, commit)
|
||||
}
|
||||
}
|
||||
|
||||
if len(directCommits) == 0 {
|
||||
return "", nil // No direct commits
|
||||
}
|
||||
|
||||
// Format the direct commits similar to how it's done in generateRawVersionContent
|
||||
var sb strings.Builder
|
||||
sb.WriteString("### Direct commits\n\n")
|
||||
|
||||
// Sort direct commits by date (newest first) for consistent ordering
|
||||
sort.Slice(directCommits, func(i, j int) bool {
|
||||
return directCommits[i].Date.After(directCommits[j].Date)
|
||||
})
|
||||
|
||||
for _, commit := range directCommits {
|
||||
message := g.formatCommitMessage(strings.TrimSpace(commit.Message))
|
||||
if message != "" && !g.isDuplicateMessage(message, directCommits) {
|
||||
sb.WriteString(fmt.Sprintf("- %s\n", message))
|
||||
}
|
||||
}
|
||||
|
||||
return sb.String(), nil
|
||||
}
|
||||
|
||||
// validatePRState validates that a PR is in the correct state for processing
|
||||
func (g *Generator) validatePRState(prNumber int) error {
|
||||
// Use lightweight validation call that doesn't fetch commits
|
||||
details, err := g.ghClient.GetPRValidationDetails(prNumber)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to fetch PR %d: %w", prNumber, err)
|
||||
}
|
||||
|
||||
if details.State != "open" {
|
||||
return fmt.Errorf("PR %d is not open (current state: %s)", prNumber, details.State)
|
||||
}
|
||||
|
||||
if !details.Mergeable {
|
||||
return fmt.Errorf("PR %d is not mergeable - please resolve conflicts first", prNumber)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// validateGitStatus ensures the working directory is clean
|
||||
func (g *Generator) validateGitStatus() error {
|
||||
isClean, err := g.gitWalker.IsWorkingDirectoryClean()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to check git status: %w", err)
|
||||
}
|
||||
|
||||
if !isClean {
|
||||
// Get detailed status for better error message
|
||||
statusDetails, statusErr := g.gitWalker.GetStatusDetails()
|
||||
if statusErr == nil && statusDetails != "" {
|
||||
return fmt.Errorf("working directory is not clean - please commit or stash changes before proceeding:\n%s", statusDetails)
|
||||
}
|
||||
return fmt.Errorf("working directory is not clean - please commit or stash changes before proceeding")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// ensureIncomingDir creates the incoming directory if it doesn't exist
|
||||
func (g *Generator) ensureIncomingDir() error {
|
||||
if err := os.MkdirAll(g.cfg.IncomingDir, 0755); err != nil {
|
||||
return fmt.Errorf("failed to create directory %s: %w", g.cfg.IncomingDir, err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// commitAndPushIncoming commits and optionally pushes the incoming changelog file
|
||||
func (g *Generator) commitAndPushIncoming(prNumber int, filename string) error {
|
||||
relativeFilename, err := filepath.Rel(g.cfg.RepoPath, filename)
|
||||
if err != nil {
|
||||
relativeFilename = filename
|
||||
}
|
||||
|
||||
// Add file to git index
|
||||
if err := g.gitWalker.AddFile(relativeFilename); err != nil {
|
||||
return fmt.Errorf("failed to add file %s: %w", relativeFilename, err)
|
||||
}
|
||||
|
||||
// Commit changes
|
||||
commitMessage := fmt.Sprintf("chore: incoming %d changelog entry", prNumber)
|
||||
_, err = g.gitWalker.CommitChanges(commitMessage)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to commit changes: %w", err)
|
||||
}
|
||||
|
||||
// Push to remote if enabled
|
||||
if g.cfg.Push {
|
||||
if err := g.gitWalker.PushToRemote(); err != nil {
|
||||
return fmt.Errorf("failed to push to remote: %w", err)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Commit created successfully. Please review and push manually.")
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// detectVersion detects the current version from version.nix or git tags
|
||||
func (g *Generator) detectVersion() (string, error) {
|
||||
versionNixPath := filepath.Join(g.cfg.RepoPath, "version.nix")
|
||||
if _, err := os.Stat(versionNixPath); err == nil {
|
||||
data, err := os.ReadFile(versionNixPath)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to read version.nix: %w", err)
|
||||
}
|
||||
|
||||
versionRegex := regexp.MustCompile(`"([^"]+)"`)
|
||||
matches := versionRegex.FindStringSubmatch(string(data))
|
||||
if len(matches) > 1 {
|
||||
return matches[1], nil
|
||||
}
|
||||
}
|
||||
|
||||
latestTag, err := g.gitWalker.GetLatestTag()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get latest tag: %w", err)
|
||||
}
|
||||
|
||||
if latestTag == "" {
|
||||
return "v1.0.0", nil
|
||||
}
|
||||
|
||||
return latestTag, nil
|
||||
}
|
||||
|
||||
// insertVersionAtTop inserts a new version entry at the top of CHANGELOG.md
|
||||
func (g *Generator) insertVersionAtTop(entry string) error {
|
||||
changelogPath := filepath.Join(g.cfg.RepoPath, "CHANGELOG.md")
|
||||
header := "# Changelog"
|
||||
headerRegex := regexp.MustCompile(`(?m)^# Changelog\s*`)
|
||||
|
||||
existingContent, err := os.ReadFile(changelogPath)
|
||||
if err != nil {
|
||||
if !os.IsNotExist(err) {
|
||||
return fmt.Errorf("failed to read existing CHANGELOG.md: %w", err)
|
||||
}
|
||||
// File doesn't exist, create it.
|
||||
newContent := fmt.Sprintf("%s\n\n%s\n", header, entry)
|
||||
return os.WriteFile(changelogPath, []byte(newContent), 0644)
|
||||
}
|
||||
|
||||
contentStr := string(existingContent)
|
||||
var newContent string
|
||||
|
||||
if loc := headerRegex.FindStringIndex(contentStr); loc != nil {
|
||||
// Found the header, insert after it.
|
||||
insertionPoint := loc[1]
|
||||
// Skip any existing newlines after the header to avoid double spacing
|
||||
for insertionPoint < len(contentStr) && (contentStr[insertionPoint] == '\n' || contentStr[insertionPoint] == '\r') {
|
||||
insertionPoint++
|
||||
}
|
||||
// Insert with proper spacing: single newline after header, then entry, then newline before existing content
|
||||
newContent = contentStr[:loc[1]] + entry + "\n" + contentStr[insertionPoint:]
|
||||
} else {
|
||||
// Header not found, prepend everything.
|
||||
newContent = fmt.Sprintf("%s\n\n%s\n\n%s", header, entry, contentStr)
|
||||
}
|
||||
|
||||
return os.WriteFile(changelogPath, []byte(newContent), 0644)
|
||||
}
|
||||
|
||||
// stageChangesForRelease stages the modified files for the release commit
|
||||
func (g *Generator) stageChangesForRelease() error {
|
||||
changelogPath := filepath.Join(g.cfg.RepoPath, "CHANGELOG.md")
|
||||
relativeChangelog, err := filepath.Rel(g.cfg.RepoPath, changelogPath)
|
||||
if err != nil {
|
||||
relativeChangelog = "CHANGELOG.md"
|
||||
}
|
||||
|
||||
relativeCacheFile, err := filepath.Rel(g.cfg.RepoPath, g.cfg.CacheFile)
|
||||
if err != nil {
|
||||
relativeCacheFile = g.cfg.CacheFile
|
||||
}
|
||||
|
||||
// Add CHANGELOG.md to git index
|
||||
if err := g.gitWalker.AddFile(relativeChangelog); err != nil {
|
||||
return fmt.Errorf("failed to add %s: %w", relativeChangelog, err)
|
||||
}
|
||||
|
||||
// Add cache file to git index
|
||||
if err := g.gitWalker.AddFile(relativeCacheFile); err != nil {
|
||||
return fmt.Errorf("failed to add %s: %w", relativeCacheFile, err)
|
||||
}
|
||||
|
||||
// Note: Individual incoming files are now removed during the main processing loop
|
||||
// No need to remove the entire directory here
|
||||
|
||||
return nil
|
||||
}
|
||||
262
cmd/generate_changelog/internal/changelog/processing_test.go
Normal file
262
cmd/generate_changelog/internal/changelog/processing_test.go
Normal file
@@ -0,0 +1,262 @@
|
||||
package changelog
|
||||
|
||||
import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/config"
|
||||
)
|
||||
|
||||
func TestDetectVersion(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
versionNixContent string
|
||||
expectedVersion string
|
||||
shouldError bool
|
||||
}{
|
||||
{
|
||||
name: "valid version.nix",
|
||||
versionNixContent: `"1.2.3"`,
|
||||
expectedVersion: "1.2.3",
|
||||
shouldError: false,
|
||||
},
|
||||
{
|
||||
name: "version with extra whitespace",
|
||||
versionNixContent: `"1.2.3" `,
|
||||
expectedVersion: "1.2.3",
|
||||
shouldError: false,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Create version.nix file
|
||||
versionNixPath := filepath.Join(tempDir, "version.nix")
|
||||
if err := os.WriteFile(versionNixPath, []byte(tt.versionNixContent), 0644); err != nil {
|
||||
t.Fatalf("Failed to create version.nix: %v", err)
|
||||
}
|
||||
|
||||
cfg := &config.Config{
|
||||
RepoPath: tempDir,
|
||||
}
|
||||
|
||||
g := &Generator{cfg: cfg}
|
||||
|
||||
version, err := g.detectVersion()
|
||||
if tt.shouldError && err == nil {
|
||||
t.Errorf("Expected error but got none")
|
||||
}
|
||||
if !tt.shouldError && err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
if version != tt.expectedVersion {
|
||||
t.Errorf("Expected version '%s', got '%s'", tt.expectedVersion, version)
|
||||
}
|
||||
|
||||
// Clean up
|
||||
os.Remove(versionNixPath)
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestInsertVersionAtTop_ImprovedRobustness(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
changelogPath := filepath.Join(tempDir, "CHANGELOG.md")
|
||||
|
||||
cfg := &config.Config{
|
||||
RepoPath: tempDir,
|
||||
}
|
||||
|
||||
g := &Generator{cfg: cfg}
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
existingContent string
|
||||
entry string
|
||||
expectedContent string
|
||||
}{
|
||||
{
|
||||
name: "header with trailing spaces",
|
||||
existingContent: "# Changelog \n\n## v1.0.0\n- Old content",
|
||||
entry: "## v2.0.0\n- New content",
|
||||
expectedContent: "# Changelog \n\n## v2.0.0\n- New content\n## v1.0.0\n- Old content",
|
||||
},
|
||||
{
|
||||
name: "header with different line endings",
|
||||
existingContent: "# Changelog\r\n\r\n## v1.0.0\r\n- Old content",
|
||||
entry: "## v2.0.0\n- New content",
|
||||
expectedContent: "# Changelog\r\n\r\n## v2.0.0\n- New content\n## v1.0.0\r\n- Old content",
|
||||
},
|
||||
{
|
||||
name: "no existing header",
|
||||
existingContent: "Some existing content without header",
|
||||
entry: "## v1.0.0\n- New content",
|
||||
expectedContent: "# Changelog\n\n## v1.0.0\n- New content\n\nSome existing content without header",
|
||||
},
|
||||
{
|
||||
name: "new file creation",
|
||||
existingContent: "",
|
||||
entry: "## v1.0.0\n- Initial release",
|
||||
expectedContent: "# Changelog\n\n## v1.0.0\n- Initial release\n",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Write existing content (or create empty file)
|
||||
if tt.existingContent != "" {
|
||||
if err := os.WriteFile(changelogPath, []byte(tt.existingContent), 0644); err != nil {
|
||||
t.Fatalf("Failed to write existing content: %v", err)
|
||||
}
|
||||
} else {
|
||||
// Remove file if it exists to test new file creation
|
||||
os.Remove(changelogPath)
|
||||
}
|
||||
|
||||
// Insert new version
|
||||
if err := g.insertVersionAtTop(tt.entry); err != nil {
|
||||
t.Fatalf("insertVersionAtTop failed: %v", err)
|
||||
}
|
||||
|
||||
// Read result
|
||||
result, err := os.ReadFile(changelogPath)
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to read result: %v", err)
|
||||
}
|
||||
|
||||
if string(result) != tt.expectedContent {
|
||||
t.Errorf("Expected:\n%q\nGot:\n%q", tt.expectedContent, string(result))
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestProcessIncomingPRs_FileAggregation(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
incomingDir := filepath.Join(tempDir, "incoming")
|
||||
|
||||
// Create incoming directory and files
|
||||
if err := os.MkdirAll(incomingDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create incoming dir: %v", err)
|
||||
}
|
||||
|
||||
// Create test incoming files
|
||||
file1Content := "## PR #1\n- Feature A"
|
||||
file2Content := "## PR #2\n- Feature B"
|
||||
|
||||
if err := os.WriteFile(filepath.Join(incomingDir, "1.txt"), []byte(file1Content), 0644); err != nil {
|
||||
t.Fatalf("Failed to create test file: %v", err)
|
||||
}
|
||||
if err := os.WriteFile(filepath.Join(incomingDir, "2.txt"), []byte(file2Content), 0644); err != nil {
|
||||
t.Fatalf("Failed to create test file: %v", err)
|
||||
}
|
||||
|
||||
// Test file aggregation logic by calling the internal functions
|
||||
files, err := filepath.Glob(filepath.Join(incomingDir, "*.txt"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to glob files: %v", err)
|
||||
}
|
||||
|
||||
if len(files) != 2 {
|
||||
t.Fatalf("Expected 2 files, got %d", len(files))
|
||||
}
|
||||
|
||||
// Test content aggregation
|
||||
var content strings.Builder
|
||||
var processingErrors []string
|
||||
for _, file := range files {
|
||||
data, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
processingErrors = append(processingErrors, err.Error())
|
||||
continue
|
||||
}
|
||||
content.WriteString(string(data))
|
||||
content.WriteString("\n")
|
||||
}
|
||||
|
||||
if len(processingErrors) > 0 {
|
||||
t.Fatalf("Unexpected processing errors: %v", processingErrors)
|
||||
}
|
||||
|
||||
aggregatedContent := content.String()
|
||||
if !strings.Contains(aggregatedContent, "Feature A") {
|
||||
t.Errorf("Aggregated content should contain 'Feature A'")
|
||||
}
|
||||
if !strings.Contains(aggregatedContent, "Feature B") {
|
||||
t.Errorf("Aggregated content should contain 'Feature B'")
|
||||
}
|
||||
}
|
||||
|
||||
func TestFileProcessing_ErrorHandling(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
incomingDir := filepath.Join(tempDir, "incoming")
|
||||
|
||||
// Create incoming directory with one good file and one unreadable file
|
||||
if err := os.MkdirAll(incomingDir, 0755); err != nil {
|
||||
t.Fatalf("Failed to create incoming dir: %v", err)
|
||||
}
|
||||
|
||||
// Create a good file
|
||||
if err := os.WriteFile(filepath.Join(incomingDir, "1.txt"), []byte("content"), 0644); err != nil {
|
||||
t.Fatalf("Failed to create test file: %v", err)
|
||||
}
|
||||
|
||||
// Create an unreadable file (simulate permission error)
|
||||
unreadableFile := filepath.Join(incomingDir, "2.txt")
|
||||
if err := os.WriteFile(unreadableFile, []byte("content"), 0000); err != nil {
|
||||
t.Fatalf("Failed to create unreadable file: %v", err)
|
||||
}
|
||||
defer os.Chmod(unreadableFile, 0644) // Clean up
|
||||
|
||||
// Test error aggregation logic
|
||||
files, err := filepath.Glob(filepath.Join(incomingDir, "*.txt"))
|
||||
if err != nil {
|
||||
t.Fatalf("Failed to glob files: %v", err)
|
||||
}
|
||||
|
||||
var content strings.Builder
|
||||
var processingErrors []string
|
||||
for _, file := range files {
|
||||
data, err := os.ReadFile(file)
|
||||
if err != nil {
|
||||
processingErrors = append(processingErrors, err.Error())
|
||||
continue
|
||||
}
|
||||
content.WriteString(string(data))
|
||||
content.WriteString("\n")
|
||||
}
|
||||
|
||||
if len(processingErrors) == 0 {
|
||||
t.Errorf("Expected processing errors due to unreadable file")
|
||||
}
|
||||
|
||||
// Verify error message format
|
||||
errorMsg := strings.Join(processingErrors, "; ")
|
||||
if !strings.Contains(errorMsg, "2.txt") {
|
||||
t.Errorf("Error message should mention the problematic file")
|
||||
}
|
||||
}
|
||||
|
||||
func TestEnsureIncomingDirCreation(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
incomingDir := filepath.Join(tempDir, "incoming")
|
||||
|
||||
cfg := &config.Config{
|
||||
IncomingDir: incomingDir,
|
||||
}
|
||||
|
||||
g := &Generator{cfg: cfg}
|
||||
|
||||
err := g.ensureIncomingDir()
|
||||
if err != nil {
|
||||
t.Fatalf("ensureIncomingDir failed: %v", err)
|
||||
}
|
||||
|
||||
if _, err := os.Stat(incomingDir); os.IsNotExist(err) {
|
||||
t.Errorf("Incoming directory was not created")
|
||||
}
|
||||
}
|
||||
@@ -1,15 +1,21 @@
|
||||
package config
|
||||
|
||||
type Config struct {
|
||||
RepoPath string
|
||||
OutputFile string
|
||||
Limit int
|
||||
Version string
|
||||
SaveData bool
|
||||
CacheFile string
|
||||
NoCache bool
|
||||
RebuildCache bool
|
||||
GitHubToken string
|
||||
ForcePRSync bool
|
||||
EnableAISummary bool
|
||||
RepoPath string
|
||||
OutputFile string
|
||||
Limit int
|
||||
Version string
|
||||
SaveData bool
|
||||
CacheFile string
|
||||
NoCache bool
|
||||
RebuildCache bool
|
||||
GitHubToken string
|
||||
ForcePRSync bool
|
||||
EnableAISummary bool
|
||||
IncomingPR int
|
||||
ProcessPRsVersion string
|
||||
IncomingDir string
|
||||
Push bool
|
||||
SyncDB bool
|
||||
Release string
|
||||
}
|
||||
|
||||
@@ -7,14 +7,24 @@ import (
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/util"
|
||||
"github.com/go-git/go-git/v5"
|
||||
"github.com/go-git/go-git/v5/plumbing"
|
||||
"github.com/go-git/go-git/v5/plumbing/object"
|
||||
"github.com/go-git/go-git/v5/plumbing/storer"
|
||||
"github.com/go-git/go-git/v5/plumbing/transport/http"
|
||||
)
|
||||
|
||||
var (
|
||||
versionPattern = regexp.MustCompile(`Update version to (v\d+\.\d+\.\d+)`)
|
||||
// The versionPattern matches version commit messages with or without the optional "chore(release): " prefix.
|
||||
// Examples of matching commit messages:
|
||||
// - "chore(release): Update version to v1.2.3"
|
||||
// - "Update version to v1.2.3"
|
||||
// Examples of non-matching commit messages:
|
||||
// - "fix: Update version to v1.2.3" (missing "chore(release): " or "Update version to")
|
||||
// - "chore(release): Update version to 1.2.3" (missing "v" prefix in version)
|
||||
// - "Update version to v1.2" (incomplete version number)
|
||||
versionPattern = regexp.MustCompile(`(?:chore\(release\): )?Update version to (v\d+\.\d+\.\d+)`)
|
||||
prPattern = regexp.MustCompile(`Merge pull request #(\d+)`)
|
||||
)
|
||||
|
||||
@@ -400,3 +410,165 @@ func dedupInts(ints []int) []int {
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// Worktree returns the git worktree for performing git operations
|
||||
func (w *Walker) Worktree() (*git.Worktree, error) {
|
||||
return w.repo.Worktree()
|
||||
}
|
||||
|
||||
// Repository returns the underlying git repository
|
||||
func (w *Walker) Repository() *git.Repository {
|
||||
return w.repo
|
||||
}
|
||||
|
||||
// IsWorkingDirectoryClean checks if the working directory has any uncommitted changes
|
||||
func (w *Walker) IsWorkingDirectoryClean() (bool, error) {
|
||||
worktree, err := w.repo.Worktree()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get worktree: %w", err)
|
||||
}
|
||||
|
||||
status, err := worktree.Status()
|
||||
if err != nil {
|
||||
return false, fmt.Errorf("failed to get git status: %w", err)
|
||||
}
|
||||
|
||||
return status.IsClean(), nil
|
||||
}
|
||||
|
||||
// GetStatusDetails returns a detailed status of the working directory
|
||||
func (w *Walker) GetStatusDetails() (string, error) {
|
||||
worktree, err := w.repo.Worktree()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get worktree: %w", err)
|
||||
}
|
||||
|
||||
status, err := worktree.Status()
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to get git status: %w", err)
|
||||
}
|
||||
|
||||
if status.IsClean() {
|
||||
return "", nil
|
||||
}
|
||||
|
||||
var details strings.Builder
|
||||
for file, fileStatus := range status {
|
||||
details.WriteString(fmt.Sprintf(" %c%c %s\n", fileStatus.Staging, fileStatus.Worktree, file))
|
||||
}
|
||||
|
||||
return details.String(), nil
|
||||
}
|
||||
|
||||
// AddFile adds a file to the git index
|
||||
func (w *Walker) AddFile(filename string) error {
|
||||
worktree, err := w.repo.Worktree()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get worktree: %w", err)
|
||||
}
|
||||
|
||||
_, err = worktree.Add(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to add file %s: %w", filename, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
// CommitChanges creates a commit with the given message
|
||||
func (w *Walker) CommitChanges(message string) (plumbing.Hash, error) {
|
||||
worktree, err := w.repo.Worktree()
|
||||
if err != nil {
|
||||
return plumbing.ZeroHash, fmt.Errorf("failed to get worktree: %w", err)
|
||||
}
|
||||
|
||||
// Get git config for author information
|
||||
cfg, err := w.repo.Config()
|
||||
if err != nil {
|
||||
return plumbing.ZeroHash, fmt.Errorf("failed to get git config: %w", err)
|
||||
}
|
||||
|
||||
var authorName, authorEmail string
|
||||
if cfg.User.Name != "" {
|
||||
authorName = cfg.User.Name
|
||||
} else {
|
||||
authorName = "Changelog Bot"
|
||||
}
|
||||
if cfg.User.Email != "" {
|
||||
authorEmail = cfg.User.Email
|
||||
} else {
|
||||
authorEmail = "bot@changelog.local"
|
||||
}
|
||||
|
||||
commit, err := worktree.Commit(message, &git.CommitOptions{
|
||||
Author: &object.Signature{
|
||||
Name: authorName,
|
||||
Email: authorEmail,
|
||||
When: time.Now(),
|
||||
},
|
||||
})
|
||||
if err != nil {
|
||||
return plumbing.ZeroHash, fmt.Errorf("failed to commit: %w", err)
|
||||
}
|
||||
|
||||
return commit, nil
|
||||
}
|
||||
|
||||
// PushToRemote pushes the current branch to the remote repository
|
||||
// It automatically detects GitHub repositories and uses token authentication when available
|
||||
func (w *Walker) PushToRemote() error {
|
||||
pushOptions := &git.PushOptions{}
|
||||
|
||||
// Check if we have a GitHub token for authentication
|
||||
if githubToken := util.GetTokenFromEnv(""); githubToken != "" {
|
||||
// Get remote URL to check if it's a GitHub repository
|
||||
remotes, err := w.repo.Remotes()
|
||||
if err == nil && len(remotes) > 0 {
|
||||
// Get the origin remote (or first remote if origin doesn't exist)
|
||||
var remote *git.Remote
|
||||
for _, r := range remotes {
|
||||
if r.Config().Name == "origin" {
|
||||
remote = r
|
||||
break
|
||||
}
|
||||
}
|
||||
if remote == nil {
|
||||
remote = remotes[0]
|
||||
}
|
||||
|
||||
// Check if this is a GitHub repository
|
||||
urls := remote.Config().URLs
|
||||
if len(urls) > 0 {
|
||||
url := urls[0]
|
||||
if strings.Contains(url, "github.com") {
|
||||
// Use token authentication for GitHub repositories
|
||||
pushOptions.Auth = &http.BasicAuth{
|
||||
Username: "token", // GitHub expects "token" as username
|
||||
Password: githubToken,
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
err := w.repo.Push(pushOptions)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to push: %w", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// RemoveFile removes a file from both the working directory and git index
|
||||
func (w *Walker) RemoveFile(filename string) error {
|
||||
worktree, err := w.repo.Worktree()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get worktree: %w", err)
|
||||
}
|
||||
|
||||
_, err = worktree.Remove(filename)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to remove file %s: %w", filename, err)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
@@ -100,35 +100,89 @@ func (c *Client) FetchPRs(prNumbers []int) ([]*PR, error) {
|
||||
return prs, nil
|
||||
}
|
||||
|
||||
func (c *Client) fetchSinglePR(ctx context.Context, prNumber int) (*PR, error) {
|
||||
pr, _, err := c.client.PullRequests.Get(ctx, c.owner, c.repo, prNumber)
|
||||
// GetPRValidationDetails fetches only the data needed for validation (lightweight).
|
||||
func (c *Client) GetPRValidationDetails(prNumber int) (*PRDetails, error) {
|
||||
ctx := context.Background()
|
||||
ghPR, _, err := c.client.PullRequests.Get(ctx, c.owner, c.repo, prNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
return nil, fmt.Errorf("failed to get PR %d: %w", prNumber, err)
|
||||
}
|
||||
|
||||
commits, _, err := c.client.PullRequests.ListCommits(ctx, c.owner, c.repo, prNumber, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch commits: %w", err)
|
||||
// Only return validation data, no commits fetched
|
||||
details := &PRDetails{
|
||||
PR: nil, // Will be populated later if needed
|
||||
State: getString(ghPR.State),
|
||||
Mergeable: ghPR.Mergeable != nil && *ghPR.Mergeable,
|
||||
}
|
||||
|
||||
return details, nil
|
||||
}
|
||||
|
||||
// GetPRWithCommits fetches the full PR and its commits.
|
||||
func (c *Client) GetPRWithCommits(prNumber int) (*PR, error) {
|
||||
ctx := context.Background()
|
||||
ghPR, _, err := c.client.PullRequests.Get(ctx, c.owner, c.repo, prNumber)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get PR %d: %w", prNumber, err)
|
||||
}
|
||||
|
||||
return c.buildPRWithCommits(ctx, ghPR)
|
||||
}
|
||||
|
||||
// GetPRDetails fetches a comprehensive set of details for a single PR.
|
||||
// Deprecated: Use GetPRValidationDetails + GetPRWithCommits for better performance
|
||||
func (c *Client) GetPRDetails(prNumber int) (*PRDetails, error) {
|
||||
ctx := context.Background()
|
||||
ghPR, _, err := c.client.PullRequests.Get(ctx, c.owner, c.repo, prNumber)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get PR %d: %w", prNumber, err)
|
||||
}
|
||||
|
||||
// Reuse the existing logic to build the base PR object
|
||||
pr, err := c.buildPRWithCommits(ctx, ghPR)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to build PR details for %d: %w", prNumber, err)
|
||||
}
|
||||
|
||||
details := &PRDetails{
|
||||
PR: pr,
|
||||
State: getString(ghPR.State),
|
||||
Mergeable: ghPR.Mergeable != nil && *ghPR.Mergeable,
|
||||
}
|
||||
|
||||
return details, nil
|
||||
}
|
||||
|
||||
// buildPRWithCommits fetches commits and constructs a PR object from a GitHub API response
|
||||
func (c *Client) buildPRWithCommits(ctx context.Context, ghPR *github.PullRequest) (*PR, error) {
|
||||
commits, _, err := c.client.PullRequests.ListCommits(ctx, c.owner, c.repo, *ghPR.Number, nil)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to fetch commits for PR %d: %w", *ghPR.Number, err)
|
||||
}
|
||||
|
||||
return c.convertGitHubPR(ghPR, commits), nil
|
||||
}
|
||||
|
||||
// convertGitHubPR transforms GitHub API data into our internal PR struct (pure function)
|
||||
func (c *Client) convertGitHubPR(ghPR *github.PullRequest, commits []*github.RepositoryCommit) *PR {
|
||||
|
||||
result := &PR{
|
||||
Number: prNumber,
|
||||
Title: getString(pr.Title),
|
||||
Body: getString(pr.Body),
|
||||
URL: getString(pr.HTMLURL),
|
||||
Number: *ghPR.Number,
|
||||
Title: getString(ghPR.Title),
|
||||
Body: getString(ghPR.Body),
|
||||
URL: getString(ghPR.HTMLURL),
|
||||
Commits: make([]PRCommit, 0, len(commits)),
|
||||
}
|
||||
|
||||
if pr.MergedAt != nil {
|
||||
result.MergedAt = pr.MergedAt.Time
|
||||
if ghPR.MergedAt != nil {
|
||||
result.MergedAt = ghPR.MergedAt.Time
|
||||
}
|
||||
|
||||
if pr.User != nil {
|
||||
result.Author = getString(pr.User.Login)
|
||||
result.AuthorURL = getString(pr.User.HTMLURL)
|
||||
userType := getString(pr.User.Type) // GitHub API returns "User", "Organization", or "Bot"
|
||||
if ghPR.User != nil {
|
||||
result.Author = getString(ghPR.User.Login)
|
||||
result.AuthorURL = getString(ghPR.User.HTMLURL)
|
||||
userType := getString(ghPR.User.Type)
|
||||
|
||||
// Convert GitHub API type to lowercase
|
||||
switch userType {
|
||||
case "User":
|
||||
result.AuthorType = "user"
|
||||
@@ -137,12 +191,12 @@ func (c *Client) fetchSinglePR(ctx context.Context, prNumber int) (*PR, error) {
|
||||
case "Bot":
|
||||
result.AuthorType = "bot"
|
||||
default:
|
||||
result.AuthorType = "user" // Default fallback
|
||||
result.AuthorType = "user"
|
||||
}
|
||||
}
|
||||
|
||||
if pr.MergeCommitSHA != nil {
|
||||
result.MergeCommit = *pr.MergeCommitSHA
|
||||
if ghPR.MergeCommitSHA != nil {
|
||||
result.MergeCommit = *ghPR.MergeCommitSHA
|
||||
}
|
||||
|
||||
for _, commit := range commits {
|
||||
@@ -153,12 +207,34 @@ func (c *Client) fetchSinglePR(ctx context.Context, prNumber int) (*PR, error) {
|
||||
}
|
||||
if commit.Commit.Author != nil {
|
||||
prCommit.Author = getString(commit.Commit.Author.Name)
|
||||
prCommit.Email = getString(commit.Commit.Author.Email) // Extract author email from GitHub API response
|
||||
// Capture actual commit timestamp from GitHub API
|
||||
if commit.Commit.Author.Date != nil {
|
||||
prCommit.Date = commit.Commit.Author.Date.Time
|
||||
}
|
||||
}
|
||||
// Capture parent commit SHAs for merge detection
|
||||
if commit.Parents != nil {
|
||||
for _, parent := range commit.Parents {
|
||||
if parent.SHA != nil {
|
||||
prCommit.Parents = append(prCommit.Parents, *parent.SHA)
|
||||
}
|
||||
}
|
||||
}
|
||||
result.Commits = append(result.Commits, prCommit)
|
||||
}
|
||||
}
|
||||
|
||||
return result, nil
|
||||
return result
|
||||
}
|
||||
|
||||
func (c *Client) fetchSinglePR(ctx context.Context, prNumber int) (*PR, error) {
|
||||
ghPR, _, err := c.client.PullRequests.Get(ctx, c.owner, c.repo, prNumber)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return c.buildPRWithCommits(ctx, ghPR)
|
||||
}
|
||||
|
||||
func getString(s *string) string {
|
||||
@@ -332,6 +408,7 @@ func (c *Client) FetchAllMergedPRsGraphQL(since time.Time) ([]*PR, error) {
|
||||
SHA: commitNode.Commit.OID,
|
||||
Message: strings.TrimSpace(commitNode.Commit.Message),
|
||||
Author: commitNode.Commit.Author.Name,
|
||||
Date: commitNode.Commit.AuthoredDate, // Use actual commit timestamp
|
||||
}
|
||||
pr.Commits = append(pr.Commits, commit)
|
||||
}
|
||||
|
||||
59
cmd/generate_changelog/internal/github/email_test.go
Normal file
59
cmd/generate_changelog/internal/github/email_test.go
Normal file
@@ -0,0 +1,59 @@
|
||||
package github
|
||||
|
||||
import (
|
||||
"testing"
|
||||
"time"
|
||||
)
|
||||
|
||||
func TestPRCommitEmailHandling(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
commit PRCommit
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Valid email field",
|
||||
commit: PRCommit{
|
||||
SHA: "abc123",
|
||||
Message: "Fix bug in authentication",
|
||||
Author: "John Doe",
|
||||
Email: "john.doe@example.com",
|
||||
Date: time.Now(),
|
||||
Parents: []string{"def456"},
|
||||
},
|
||||
expected: "john.doe@example.com",
|
||||
},
|
||||
{
|
||||
name: "Empty email field",
|
||||
commit: PRCommit{
|
||||
SHA: "abc123",
|
||||
Message: "Fix bug in authentication",
|
||||
Author: "John Doe",
|
||||
Email: "",
|
||||
Date: time.Now(),
|
||||
Parents: []string{"def456"},
|
||||
},
|
||||
expected: "",
|
||||
},
|
||||
{
|
||||
name: "Email field with proper initialization",
|
||||
commit: PRCommit{
|
||||
SHA: "def789",
|
||||
Message: "Add new feature",
|
||||
Author: "Jane Smith",
|
||||
Email: "jane.smith@company.org",
|
||||
Date: time.Now(),
|
||||
Parents: []string{"ghi012"},
|
||||
},
|
||||
expected: "jane.smith@company.org",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
if tt.commit.Email != tt.expected {
|
||||
t.Errorf("Expected email %q, got %q", tt.expected, tt.commit.Email)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -15,10 +15,20 @@ type PR struct {
|
||||
MergeCommit string
|
||||
}
|
||||
|
||||
// PRDetails encapsulates all relevant information about a Pull Request.
|
||||
type PRDetails struct {
|
||||
*PR
|
||||
State string
|
||||
Mergeable bool
|
||||
}
|
||||
|
||||
type PRCommit struct {
|
||||
SHA string
|
||||
Message string
|
||||
Author string
|
||||
Email string // Author email from GitHub API, empty if not public
|
||||
Date time.Time // Timestamp field
|
||||
Parents []string // Parent commits (for merge detection)
|
||||
}
|
||||
|
||||
// GraphQL query structures for hasura client
|
||||
@@ -43,9 +53,10 @@ type PullRequestsQuery struct {
|
||||
Commits struct {
|
||||
Nodes []struct {
|
||||
Commit struct {
|
||||
OID string `graphql:"oid"`
|
||||
Message string
|
||||
Author struct {
|
||||
OID string `graphql:"oid"`
|
||||
Message string
|
||||
AuthoredDate time.Time `graphql:"authoredDate"`
|
||||
Author struct {
|
||||
Name string
|
||||
}
|
||||
}
|
||||
|
||||
81
cmd/generate_changelog/internal/release.go
Normal file
81
cmd/generate_changelog/internal/release.go
Normal file
@@ -0,0 +1,81 @@
|
||||
package internal
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/cache"
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/config"
|
||||
"github.com/google/go-github/v66/github"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
|
||||
type ReleaseManager struct {
|
||||
cache *cache.Cache
|
||||
githubToken string
|
||||
owner string
|
||||
repo string
|
||||
}
|
||||
|
||||
func NewReleaseManager(cfg *config.Config) (*ReleaseManager, error) {
|
||||
cache, err := cache.New(cfg.CacheFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create cache: %w", err)
|
||||
}
|
||||
|
||||
return &ReleaseManager{
|
||||
cache: cache,
|
||||
githubToken: cfg.GitHubToken,
|
||||
owner: "danielmiessler",
|
||||
repo: "fabric",
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (rm *ReleaseManager) Close() error {
|
||||
return rm.cache.Close()
|
||||
}
|
||||
|
||||
func (rm *ReleaseManager) UpdateReleaseDescription(version string) error {
|
||||
versions, err := rm.cache.GetVersions()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get versions from cache: %w", err)
|
||||
}
|
||||
|
||||
versionData, exists := versions[version]
|
||||
if !exists {
|
||||
return fmt.Errorf("version %s not found in versions table", version)
|
||||
}
|
||||
|
||||
if versionData.AISummary == "" {
|
||||
return fmt.Errorf("ai_summary is empty for version %s", version)
|
||||
}
|
||||
|
||||
releaseBody := fmt.Sprintf("## Changes\n\n%s", versionData.AISummary)
|
||||
|
||||
ctx := context.Background()
|
||||
var client *github.Client
|
||||
|
||||
if rm.githubToken != "" {
|
||||
ts := oauth2.StaticTokenSource(
|
||||
&oauth2.Token{AccessToken: rm.githubToken},
|
||||
)
|
||||
tc := oauth2.NewClient(ctx, ts)
|
||||
client = github.NewClient(tc)
|
||||
} else {
|
||||
client = github.NewClient(nil)
|
||||
}
|
||||
|
||||
release, _, err := client.Repositories.GetReleaseByTag(ctx, rm.owner, rm.repo, version)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get release for version %s: %w", version, err)
|
||||
}
|
||||
|
||||
release.Body = &releaseBody
|
||||
_, _, err = client.Repositories.EditRelease(ctx, rm.owner, rm.repo, *release.ID, release)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update release description for version %s: %w", version, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully updated release description for %s\n", version)
|
||||
return nil
|
||||
}
|
||||
@@ -5,8 +5,10 @@ import (
|
||||
"os"
|
||||
"path/filepath"
|
||||
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal"
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/changelog"
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/config"
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/util"
|
||||
"github.com/joho/godotenv"
|
||||
"github.com/spf13/cobra"
|
||||
)
|
||||
@@ -21,7 +23,8 @@ var rootCmd = &cobra.Command{
|
||||
Long: `A high-performance changelog generator that walks git history,
|
||||
collects version information and pull requests, and generates a
|
||||
comprehensive changelog in markdown format.`,
|
||||
RunE: run,
|
||||
RunE: run,
|
||||
SilenceUsage: true, // Don't show usage on runtime errors, only on flag errors
|
||||
}
|
||||
|
||||
func init() {
|
||||
@@ -36,18 +39,51 @@ func init() {
|
||||
rootCmd.Flags().StringVar(&cfg.GitHubToken, "token", "", "GitHub API token (or set GITHUB_TOKEN env var)")
|
||||
rootCmd.Flags().BoolVar(&cfg.ForcePRSync, "force-pr-sync", false, "Force a full PR sync from GitHub (ignores cache age)")
|
||||
rootCmd.Flags().BoolVar(&cfg.EnableAISummary, "ai-summarize", false, "Generate AI-enhanced summaries using Fabric")
|
||||
rootCmd.Flags().IntVar(&cfg.IncomingPR, "incoming-pr", 0, "Pre-process PR for changelog (provide PR number)")
|
||||
rootCmd.Flags().StringVar(&cfg.ProcessPRsVersion, "process-prs", "", "Process all incoming PR files for release (provide version like v1.4.262)")
|
||||
rootCmd.Flags().StringVar(&cfg.IncomingDir, "incoming-dir", "./cmd/generate_changelog/incoming", "Directory for incoming PR files")
|
||||
rootCmd.Flags().BoolVar(&cfg.Push, "push", false, "Enable automatic git push after creating an incoming entry")
|
||||
rootCmd.Flags().BoolVar(&cfg.SyncDB, "sync-db", false, "Synchronize and validate database integrity with git history and GitHub PRs")
|
||||
rootCmd.Flags().StringVar(&cfg.Release, "release", "", "Update GitHub release description with AI summary for version (e.g., v1.2.3)")
|
||||
}
|
||||
|
||||
func run(cmd *cobra.Command, args []string) error {
|
||||
if cfg.GitHubToken == "" {
|
||||
cfg.GitHubToken = os.Getenv("GITHUB_TOKEN")
|
||||
if cfg.IncomingPR > 0 && cfg.ProcessPRsVersion != "" {
|
||||
return fmt.Errorf("--incoming-pr and --process-prs are mutually exclusive flags")
|
||||
}
|
||||
|
||||
if cfg.Release != "" && (cfg.IncomingPR > 0 || cfg.ProcessPRsVersion != "" || cfg.SyncDB) {
|
||||
return fmt.Errorf("--release cannot be used with other processing flags")
|
||||
}
|
||||
|
||||
cfg.GitHubToken = util.GetTokenFromEnv(cfg.GitHubToken)
|
||||
|
||||
generator, err := changelog.New(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create changelog generator: %w", err)
|
||||
}
|
||||
|
||||
if cfg.IncomingPR > 0 {
|
||||
return generator.ProcessIncomingPR(cfg.IncomingPR)
|
||||
}
|
||||
|
||||
if cfg.ProcessPRsVersion != "" {
|
||||
return generator.CreateNewChangelogEntry(cfg.ProcessPRsVersion)
|
||||
}
|
||||
|
||||
if cfg.SyncDB {
|
||||
return generator.SyncDatabase()
|
||||
}
|
||||
|
||||
if cfg.Release != "" {
|
||||
releaseManager, err := internal.NewReleaseManager(cfg)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to create release manager: %w", err)
|
||||
}
|
||||
defer releaseManager.Close()
|
||||
return releaseManager.UpdateReleaseDescription(cfg.Release)
|
||||
}
|
||||
|
||||
output, err := generator.Generate()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to generate changelog: %w", err)
|
||||
@@ -77,8 +113,5 @@ func main() {
|
||||
}
|
||||
}
|
||||
|
||||
if err := rootCmd.Execute(); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "Error: %v\n", err)
|
||||
os.Exit(1)
|
||||
}
|
||||
rootCmd.Execute()
|
||||
}
|
||||
|
||||
31
cmd/generate_changelog/util/token.go
Normal file
31
cmd/generate_changelog/util/token.go
Normal file
@@ -0,0 +1,31 @@
|
||||
package util
|
||||
|
||||
import (
|
||||
"os"
|
||||
)
|
||||
|
||||
// GetTokenFromEnv returns a GitHub token based on the following precedence order:
|
||||
// 1. If tokenValue is non-empty, it is returned.
|
||||
// 2. Otherwise, if the GITHUB_TOKEN environment variable is set, its value is returned.
|
||||
// 3. Otherwise, if the GH_TOKEN environment variable is set, its value is returned.
|
||||
// 4. If none of the above are set, an empty string is returned.
|
||||
//
|
||||
// Example:
|
||||
//
|
||||
// os.Setenv("GITHUB_TOKEN", "abc")
|
||||
// os.Setenv("GH_TOKEN", "def")
|
||||
// GetTokenFromEnv("xyz") // returns "xyz"
|
||||
// GetTokenFromEnv("") // returns "abc"
|
||||
// os.Unsetenv("GITHUB_TOKEN")
|
||||
// GetTokenFromEnv("") // returns "def"
|
||||
// os.Unsetenv("GH_TOKEN")
|
||||
// GetTokenFromEnv("") // returns ""
|
||||
func GetTokenFromEnv(tokenValue string) string {
|
||||
if tokenValue == "" {
|
||||
tokenValue = os.Getenv("GITHUB_TOKEN")
|
||||
if tokenValue == "" {
|
||||
tokenValue = os.Getenv("GH_TOKEN")
|
||||
}
|
||||
}
|
||||
return tokenValue
|
||||
}
|
||||
@@ -1,47 +1,62 @@
|
||||
#compdef fabric
|
||||
#compdef fabric fabric-ai
|
||||
|
||||
# Zsh completion for fabric CLI
|
||||
# Place this file in a directory in your $fpath (e.g. /usr/local/share/zsh/site-functions)
|
||||
|
||||
_fabric_patterns() {
|
||||
local -a patterns
|
||||
patterns=(${(f)"$(fabric --listpatterns --shell-complete-list 2>/dev/null)"})
|
||||
local cmd=${words[1]}
|
||||
patterns=(${(f)"$($cmd --listpatterns --shell-complete-list 2>/dev/null)"})
|
||||
compadd -X "Patterns:" ${patterns}
|
||||
}
|
||||
|
||||
_fabric_models() {
|
||||
local -a models
|
||||
models=(${(f)"$(fabric --listmodels --shell-complete-list 2>/dev/null)"})
|
||||
local cmd=${words[1]}
|
||||
models=(${(f)"$($cmd --listmodels --shell-complete-list 2>/dev/null)"})
|
||||
compadd -X "Models:" ${models}
|
||||
}
|
||||
|
||||
_fabric_vendors() {
|
||||
local -a vendors
|
||||
local cmd=${words[1]}
|
||||
vendors=(${(f)"$($cmd --listvendors --shell-complete-list 2>/dev/null)"})
|
||||
compadd -X "Vendors:" ${vendors}
|
||||
}
|
||||
|
||||
_fabric_contexts() {
|
||||
local -a contexts
|
||||
contexts=(${(f)"$(fabric --listcontexts --shell-complete-list 2>/dev/null)"})
|
||||
local cmd=${words[1]}
|
||||
contexts=(${(f)"$($cmd --listcontexts --shell-complete-list 2>/dev/null)"})
|
||||
compadd -X "Contexts:" ${contexts}
|
||||
}
|
||||
|
||||
_fabric_sessions() {
|
||||
local -a sessions
|
||||
sessions=(${(f)"$(fabric --listsessions --shell-complete-list 2>/dev/null)"})
|
||||
local cmd=${words[1]}
|
||||
sessions=(${(f)"$($cmd --listsessions --shell-complete-list 2>/dev/null)"})
|
||||
compadd -X "Sessions:" ${sessions}
|
||||
}
|
||||
|
||||
_fabric_strategies() {
|
||||
local -a strategies
|
||||
strategies=(${(f)"$(fabric --liststrategies --shell-complete-list 2>/dev/null)"})
|
||||
local cmd=${words[1]}
|
||||
strategies=(${(f)"$($cmd --liststrategies --shell-complete-list 2>/dev/null)"})
|
||||
compadd -X "Strategies:" ${strategies}
|
||||
}
|
||||
|
||||
_fabric_extensions() {
|
||||
local -a extensions
|
||||
extensions=(${(f)"$(fabric --listextensions --shell-complete-list 2>/dev/null)"})
|
||||
local cmd=${words[1]}
|
||||
extensions=(${(f)"$($cmd --listextensions --shell-complete-list 2>/dev/null)"})
|
||||
compadd -X "Extensions:" ${extensions}
|
||||
'(-L --listmodels)'{-L,--listmodels}'[List all available models]:list models:_fabric_models' \
|
||||
'(-x --listcontexts)'{-x,--listcontexts}'[List all contexts]:list contexts:_fabric_contexts' \
|
||||
'(-X --listsessions)'{-X,--listsessions}'[List all sessions]:list sessions:_fabric_sessions' \
|
||||
'(--listextensions)--listextensions[List all registered extensions]' \
|
||||
'(--liststrategies)--liststrategies[List all strategies]:list strategies:_fabric_strategies' \
|
||||
'(--listvendors)--listvendors[List all vendors]' \
|
||||
vendors=(${(f)"$(fabric --listvendors 2>/dev/null)"})
|
||||
compadd -X "Vendors:" ${vendors}
|
||||
}
|
||||
|
||||
_fabric_gemini_voices() {
|
||||
local -a voices
|
||||
local cmd=${words[1]}
|
||||
voices=(${(f)"$($cmd --list-gemini-voices --shell-complete-list 2>/dev/null)"})
|
||||
compadd -X "Gemini TTS Voices:" ${voices}
|
||||
}
|
||||
|
||||
_fabric() {
|
||||
@@ -68,6 +83,7 @@ _fabric() {
|
||||
'(-U --updatepatterns)'{-U,--updatepatterns}'[Update patterns]' \
|
||||
'(-c --copy)'{-c,--copy}'[Copy to clipboard]' \
|
||||
'(-m --model)'{-m,--model}'[Choose model]:model:_fabric_models' \
|
||||
'(-V --vendor)'{-V,--vendor}'[Specify vendor for chosen model (e.g., -V "LM Studio" -m openai/gpt-oss-20b)]:vendor:_fabric_vendors' \
|
||||
'(--modelContextLength)--modelContextLength[Model context length (only affects ollama)]:length:' \
|
||||
'(-o --output)'{-o,--output}'[Output to file]:file:_files' \
|
||||
'(--output-session)--output-session[Output the entire session to the output file]' \
|
||||
@@ -79,10 +95,12 @@ _fabric() {
|
||||
'(--transcript-with-timestamps)--transcript-with-timestamps[Grab transcript from YouTube video with timestamps]' \
|
||||
'(--comments)--comments[Grab comments from YouTube video and send to chat]' \
|
||||
'(--metadata)--metadata[Output video metadata]' \
|
||||
'(--yt-dlp-args)--yt-dlp-args[Additional arguments to pass to yt-dlp]:yt-dlp args:' \
|
||||
'(-g --language)'{-g,--language}'[Specify the Language Code for the chat, e.g. -g=en -g=zh]:language:' \
|
||||
'(-u --scrape_url)'{-u,--scrape_url}'[Scrape website URL to markdown using Jina AI]:url:' \
|
||||
'(-q --scrape_question)'{-q,--scrape_question}'[Search question using Jina AI]:question:' \
|
||||
'(-e --seed)'{-e,--seed}'[Seed to be used for LMM generation]:seed:' \
|
||||
'(--thinking)--thinking[Set reasoning/thinking level]:level:(off low medium high)' \
|
||||
'(-w --wipecontext)'{-w,--wipecontext}'[Wipe context]:context:_fabric_contexts' \
|
||||
'(-W --wipesession)'{-W,--wipesession}'[Wipe session]:session:_fabric_sessions' \
|
||||
'(--printcontext)--printcontext[Print context]:context:_fabric_contexts' \
|
||||
@@ -96,7 +114,7 @@ _fabric() {
|
||||
'(--api-key)--api-key[API key used to secure server routes]:api-key:' \
|
||||
'(--config)--config[Path to YAML config file]:config file:_files -g "*.yaml *.yml"' \
|
||||
'(--version)--version[Print current version]' \
|
||||
'(--search)--search[Enable web search tool for supported models (Anthropic, OpenAI)]' \
|
||||
'(--search)--search[Enable web search tool for supported models (Anthropic, OpenAI, Gemini)]' \
|
||||
'(--search-location)--search-location[Set location for web search results]:location:' \
|
||||
'(--image-file)--image-file[Save generated image to specified file path]:image file:_files -g "*.png *.webp *.jpeg *.jpg"' \
|
||||
'(--image-size)--image-size[Image dimensions]:size:(1024x1024 1536x1024 1024x1536 auto)' \
|
||||
@@ -109,14 +127,17 @@ _fabric() {
|
||||
'(--strategy)--strategy[Choose a strategy from the available strategies]:strategy:_fabric_strategies' \
|
||||
'(--liststrategies)--liststrategies[List all strategies]' \
|
||||
'(--listvendors)--listvendors[List all vendors]' \
|
||||
'(--voice)--voice[TTS voice name for supported models]:voice:_fabric_gemini_voices' \
|
||||
'(--list-gemini-voices)--list-gemini-voices[List all available Gemini TTS voices]' \
|
||||
'(--shell-complete-list)--shell-complete-list[Output raw list without headers/formatting (for shell completion)]' \
|
||||
'(--suppress-think)--suppress-think[Suppress text enclosed in thinking tags]' \
|
||||
'(--think-start-tag)--think-start-tag[Start tag for thinking sections (default: <think>)]:start tag:' \
|
||||
'(--think-end-tag)--think-end-tag[End tag for thinking sections (default: </think>)]:end tag:' \
|
||||
'(--disable-responses-api)--disable-responses-api[Disable OpenAI Responses API (default: false)]' \
|
||||
'(--notification)--notification[Send desktop notification when command completes]' \
|
||||
'(--notification-command)--notification-command[Custom command to run for notifications]:notification command:' \
|
||||
'(-h --help)'{-h,--help}'[Show this help message]' \
|
||||
'*:arguments:'
|
||||
}
|
||||
|
||||
_fabric "$@"
|
||||
|
||||
|
||||
@@ -13,11 +13,11 @@ _fabric() {
|
||||
_get_comp_words_by_ref -n : cur prev words cword
|
||||
|
||||
# Define all possible options/flags
|
||||
local opts="--pattern -p --variable -v --context -C --session --attachment -a --setup -S --temperature -t --topp -T --stream -s --presencepenalty -P --raw -r --frequencypenalty -F --listpatterns -l --listmodels -L --listcontexts -x --listsessions -X --updatepatterns -U --copy -c --model -m --modelContextLength --output -o --output-session --latest -n --changeDefaultModel -d --youtube -y --playlist --transcript --transcript-with-timestamps --comments --metadata --language -g --scrape_url -u --scrape_question -q --seed -e --wipecontext -w --wipesession -W --printcontext --printsession --readability --input-has-vars --dry-run --serve --serveOllama --address --api-key --config --search --search-location --image-file --image-size --image-quality --image-compression --image-background --suppress-think --think-start-tag --think-end-tag --disable-responses-api --version --listextensions --addextension --rmextension --strategy --liststrategies --listvendors --shell-complete-list --help -h"
|
||||
local opts="--pattern -p --variable -v --context -C --session --attachment -a --setup -S --temperature -t --topp -T --stream -s --presencepenalty -P --raw -r --frequencypenalty -F --listpatterns -l --listmodels -L --listcontexts -x --listsessions -X --updatepatterns -U --copy -c --model -m --vendor -V --modelContextLength --output -o --output-session --latest -n --changeDefaultModel -d --youtube -y --playlist --transcript --transcript-with-timestamps --comments --metadata --yt-dlp-args --language -g --scrape_url -u --scrape_question -q --seed -e --thinking --wipecontext -w --wipesession -W --printcontext --printsession --readability --input-has-vars --dry-run --serve --serveOllama --address --api-key --config --search --search-location --image-file --image-size --image-quality --image-compression --image-background --suppress-think --think-start-tag --think-end-tag --disable-responses-api --voice --list-gemini-voices --notification --notification-command --version --listextensions --addextension --rmextension --strategy --liststrategies --listvendors --shell-complete-list --help -h"
|
||||
|
||||
# Helper function for dynamic completions
|
||||
_fabric_get_list() {
|
||||
fabric "$1" --shell-complete-list 2>/dev/null
|
||||
"${COMP_WORDS[0]}" "$1" --shell-complete-list 2>/dev/null
|
||||
}
|
||||
|
||||
# Handle completions based on the previous word
|
||||
@@ -38,6 +38,10 @@ _fabric() {
|
||||
COMPREPLY=($(compgen -W "$(_fabric_get_list --listmodels)" -- "${cur}"))
|
||||
return 0
|
||||
;;
|
||||
-V | --vendor)
|
||||
COMPREPLY=($(compgen -W "$(_fabric_get_list --listvendors)" -- "${cur}"))
|
||||
return 0
|
||||
;;
|
||||
-w | --wipecontext)
|
||||
COMPREPLY=($(compgen -W "$(_fabric_get_list --listcontexts)" -- "${cur}"))
|
||||
return 0
|
||||
@@ -54,6 +58,10 @@ _fabric() {
|
||||
COMPREPLY=($(compgen -W "$(_fabric_get_list --listsessions)" -- "${cur}"))
|
||||
return 0
|
||||
;;
|
||||
--thinking)
|
||||
COMPREPLY=($(compgen -W "off low medium high" -- "${cur}"))
|
||||
return 0
|
||||
;;
|
||||
--rmextension)
|
||||
COMPREPLY=($(compgen -W "$(_fabric_get_list --listextensions)" -- "${cur}"))
|
||||
return 0
|
||||
@@ -62,6 +70,10 @@ _fabric() {
|
||||
COMPREPLY=($(compgen -W "$(_fabric_get_list --liststrategies)" -- "${cur}"))
|
||||
return 0
|
||||
;;
|
||||
--voice)
|
||||
COMPREPLY=($(compgen -W "$(_fabric_get_list --list-gemini-voices)" -- "${cur}"))
|
||||
return 0
|
||||
;;
|
||||
# Options requiring file/directory paths
|
||||
-a | --attachment | -o | --output | --config | --addextension | --image-file)
|
||||
_filedir
|
||||
@@ -81,7 +93,7 @@ _fabric() {
|
||||
return 0
|
||||
;;
|
||||
# Options requiring simple arguments (no specific completion logic here)
|
||||
-v | --variable | -t | --temperature | -T | --topp | -P | --presencepenalty | -F | --frequencypenalty | --modelContextLength | -n | --latest | -y | --youtube | -g | --language | -u | --scrape_url | -q | --scrape_question | -e | --seed | --address | --api-key | --search-location | --image-compression | --think-start-tag | --think-end-tag)
|
||||
-v | --variable | -t | --temperature | -T | --topp | -P | --presencepenalty | -F | --frequencypenalty | --modelContextLength | -n | --latest | -y | --youtube | --yt-dlp-args | -g | --language | -u | --scrape_url | -q | --scrape_question | -e | --seed | --address | --api-key | --search-location | --image-compression | --think-start-tag | --think-end-tag | --notification-command)
|
||||
# No specific completion suggestions, user types the value
|
||||
return 0
|
||||
;;
|
||||
@@ -100,4 +112,4 @@ _fabric() {
|
||||
|
||||
}
|
||||
|
||||
complete -F _fabric fabric
|
||||
complete -F _fabric fabric fabric-ai
|
||||
|
||||
@@ -8,98 +8,127 @@
|
||||
|
||||
# Helper functions for dynamic completions
|
||||
function __fabric_get_patterns
|
||||
fabric --listpatterns --shell-complete-list 2>/dev/null
|
||||
set cmd (commandline -opc)[1]
|
||||
$cmd --listpatterns --shell-complete-list 2>/dev/null
|
||||
end
|
||||
|
||||
function __fabric_get_models
|
||||
fabric --listmodels --shell-complete-list 2>/dev/null
|
||||
set cmd (commandline -opc)[1]
|
||||
$cmd --listmodels --shell-complete-list 2>/dev/null
|
||||
end
|
||||
|
||||
function __fabric_get_vendors
|
||||
set cmd (commandline -opc)[1]
|
||||
$cmd --listvendors --shell-complete-list 2>/dev/null
|
||||
end
|
||||
|
||||
function __fabric_get_contexts
|
||||
fabric --listcontexts --shell-complete-list 2>/dev/null
|
||||
set cmd (commandline -opc)[1]
|
||||
$cmd --listcontexts --shell-complete-list 2>/dev/null
|
||||
end
|
||||
|
||||
function __fabric_get_sessions
|
||||
fabric --listsessions --shell-complete-list 2>/dev/null
|
||||
set cmd (commandline -opc)[1]
|
||||
$cmd --listsessions --shell-complete-list 2>/dev/null
|
||||
end
|
||||
|
||||
function __fabric_get_strategies
|
||||
fabric --liststrategies --shell-complete-list 2>/dev/null
|
||||
set cmd (commandline -opc)[1]
|
||||
$cmd --liststrategies --shell-complete-list 2>/dev/null
|
||||
end
|
||||
|
||||
function __fabric_get_extensions
|
||||
fabric --listextensions --shell-complete-list 2>/dev/null
|
||||
set cmd (commandline -opc)[1]
|
||||
$cmd --listextensions --shell-complete-list 2>/dev/null
|
||||
end
|
||||
|
||||
function __fabric_get_gemini_voices
|
||||
set cmd (commandline -opc)[1]
|
||||
$cmd --list-gemini-voices --shell-complete-list 2>/dev/null
|
||||
end
|
||||
|
||||
# Main completion function
|
||||
complete -c fabric -f
|
||||
function __fabric_register_completions
|
||||
set cmd $argv[1]
|
||||
complete -c $cmd -f
|
||||
|
||||
# Flag completions with arguments
|
||||
complete -c fabric -s p -l pattern -d "Choose a pattern from the available patterns" -a "(__fabric_get_patterns)"
|
||||
complete -c fabric -s v -l variable -d "Values for pattern variables, e.g. -v=#role:expert -v=#points:30"
|
||||
complete -c fabric -s C -l context -d "Choose a context from the available contexts" -a "(__fabric_get_contexts)"
|
||||
complete -c fabric -l session -d "Choose a session from the available sessions" -a "(__fabric_get_sessions)"
|
||||
complete -c fabric -s a -l attachment -d "Attachment path or URL (e.g. for OpenAI image recognition messages)" -r
|
||||
complete -c fabric -s t -l temperature -d "Set temperature (default: 0.7)"
|
||||
complete -c fabric -s T -l topp -d "Set top P (default: 0.9)"
|
||||
complete -c fabric -s P -l presencepenalty -d "Set presence penalty (default: 0.0)"
|
||||
complete -c fabric -s F -l frequencypenalty -d "Set frequency penalty (default: 0.0)"
|
||||
complete -c fabric -s m -l model -d "Choose model" -a "(__fabric_get_models)"
|
||||
complete -c fabric -l modelContextLength -d "Model context length (only affects ollama)"
|
||||
complete -c fabric -s o -l output -d "Output to file" -r
|
||||
complete -c fabric -s n -l latest -d "Number of latest patterns to list (default: 0)"
|
||||
complete -c fabric -s y -l youtube -d "YouTube video or play list URL to grab transcript, comments from it"
|
||||
complete -c fabric -s g -l language -d "Specify the Language Code for the chat, e.g. -g=en -g=zh"
|
||||
complete -c fabric -s u -l scrape_url -d "Scrape website URL to markdown using Jina AI"
|
||||
complete -c fabric -s q -l scrape_question -d "Search question using Jina AI"
|
||||
complete -c fabric -s e -l seed -d "Seed to be used for LMM generation"
|
||||
complete -c fabric -s w -l wipecontext -d "Wipe context" -a "(__fabric_get_contexts)"
|
||||
complete -c fabric -s W -l wipesession -d "Wipe session" -a "(__fabric_get_sessions)"
|
||||
complete -c fabric -l printcontext -d "Print context" -a "(__fabric_get_contexts)"
|
||||
complete -c fabric -l printsession -d "Print session" -a "(__fabric_get_sessions)"
|
||||
complete -c fabric -l address -d "The address to bind the REST API (default: :8080)"
|
||||
complete -c fabric -l api-key -d "API key used to secure server routes"
|
||||
complete -c fabric -l config -d "Path to YAML config file" -r -a "*.yaml *.yml"
|
||||
complete -c fabric -l search-location -d "Set location for web search results (e.g., 'America/Los_Angeles')"
|
||||
complete -c fabric -l image-file -d "Save generated image to specified file path (e.g., 'output.png')" -r -a "*.png *.webp *.jpeg *.jpg"
|
||||
complete -c fabric -l image-size -d "Image dimensions: 1024x1024, 1536x1024, 1024x1536, auto (default: auto)" -a "1024x1024 1536x1024 1024x1536 auto"
|
||||
complete -c fabric -l image-quality -d "Image quality: low, medium, high, auto (default: auto)" -a "low medium high auto"
|
||||
complete -c fabric -l image-compression -d "Compression level 0-100 for JPEG/WebP formats (default: not set)" -r
|
||||
complete -c fabric -l image-background -d "Background type: opaque, transparent (default: opaque, only for PNG/WebP)" -a "opaque transparent"
|
||||
complete -c fabric -l addextension -d "Register a new extension from config file path" -r -a "*.yaml *.yml"
|
||||
complete -c fabric -l rmextension -d "Remove a registered extension by name" -a "(__fabric_get_extensions)"
|
||||
complete -c fabric -l strategy -d "Choose a strategy from the available strategies" -a "(__fabric_get_strategies)"
|
||||
complete -c fabric -l think-start-tag -d "Start tag for thinking sections (default: <think>)"
|
||||
complete -c fabric -l think-end-tag -d "End tag for thinking sections (default: </think>)"
|
||||
# Flag completions with arguments
|
||||
complete -c $cmd -s p -l pattern -d "Choose a pattern from the available patterns" -a "(__fabric_get_patterns)"
|
||||
complete -c $cmd -s v -l variable -d "Values for pattern variables, e.g. -v=#role:expert -v=#points:30"
|
||||
complete -c $cmd -s C -l context -d "Choose a context from the available contexts" -a "(__fabric_get_contexts)"
|
||||
complete -c $cmd -l session -d "Choose a session from the available sessions" -a "(__fabric_get_sessions)"
|
||||
complete -c $cmd -s a -l attachment -d "Attachment path or URL (e.g. for OpenAI image recognition messages)" -r
|
||||
complete -c $cmd -s t -l temperature -d "Set temperature (default: 0.7)"
|
||||
complete -c $cmd -s T -l topp -d "Set top P (default: 0.9)"
|
||||
complete -c $cmd -s P -l presencepenalty -d "Set presence penalty (default: 0.0)"
|
||||
complete -c $cmd -s F -l frequencypenalty -d "Set frequency penalty (default: 0.0)"
|
||||
complete -c $cmd -s m -l model -d "Choose model" -a "(__fabric_get_models)"
|
||||
complete -c $cmd -s V -l vendor -d "Specify vendor for chosen model (e.g., -V \"LM Studio\" -m openai/gpt-oss-20b)" -a "(__fabric_get_vendors)"
|
||||
complete -c $cmd -l modelContextLength -d "Model context length (only affects ollama)"
|
||||
complete -c $cmd -s o -l output -d "Output to file" -r
|
||||
complete -c $cmd -s n -l latest -d "Number of latest patterns to list (default: 0)"
|
||||
complete -c $cmd -s y -l youtube -d "YouTube video or play list URL to grab transcript, comments from it"
|
||||
complete -c $cmd -s g -l language -d "Specify the Language Code for the chat, e.g. -g=en -g=zh"
|
||||
complete -c $cmd -s u -l scrape_url -d "Scrape website URL to markdown using Jina AI"
|
||||
complete -c $cmd -s q -l scrape_question -d "Search question using Jina AI"
|
||||
complete -c $cmd -s e -l seed -d "Seed to be used for LMM generation"
|
||||
complete -c $cmd -l thinking -d "Set reasoning/thinking level" -a "off low medium high"
|
||||
complete -c $cmd -s w -l wipecontext -d "Wipe context" -a "(__fabric_get_contexts)"
|
||||
complete -c $cmd -s W -l wipesession -d "Wipe session" -a "(__fabric_get_sessions)"
|
||||
complete -c $cmd -l printcontext -d "Print context" -a "(__fabric_get_contexts)"
|
||||
complete -c $cmd -l printsession -d "Print session" -a "(__fabric_get_sessions)"
|
||||
complete -c $cmd -l address -d "The address to bind the REST API (default: :8080)"
|
||||
complete -c $cmd -l api-key -d "API key used to secure server routes"
|
||||
complete -c $cmd -l config -d "Path to YAML config file" -r -a "*.yaml *.yml"
|
||||
complete -c $cmd -l search-location -d "Set location for web search results (e.g., 'America/Los_Angeles')"
|
||||
complete -c $cmd -l image-file -d "Save generated image to specified file path (e.g., 'output.png')" -r -a "*.png *.webp *.jpeg *.jpg"
|
||||
complete -c $cmd -l image-size -d "Image dimensions: 1024x1024, 1536x1024, 1024x1536, auto (default: auto)" -a "1024x1024 1536x1024 1024x1536 auto"
|
||||
complete -c $cmd -l image-quality -d "Image quality: low, medium, high, auto (default: auto)" -a "low medium high auto"
|
||||
complete -c $cmd -l image-compression -d "Compression level 0-100 for JPEG/WebP formats (default: not set)" -r
|
||||
complete -c $cmd -l image-background -d "Background type: opaque, transparent (default: opaque, only for PNG/WebP)" -a "opaque transparent"
|
||||
complete -c $cmd -l addextension -d "Register a new extension from config file path" -r -a "*.yaml *.yml"
|
||||
complete -c $cmd -l rmextension -d "Remove a registered extension by name" -a "(__fabric_get_extensions)"
|
||||
complete -c $cmd -l strategy -d "Choose a strategy from the available strategies" -a "(__fabric_get_strategies)"
|
||||
complete -c $cmd -l think-start-tag -d "Start tag for thinking sections (default: <think>)"
|
||||
complete -c $cmd -l think-end-tag -d "End tag for thinking sections (default: </think>)"
|
||||
complete -c $cmd -l voice -d "TTS voice name for supported models (e.g., Kore, Charon, Puck)" -a "(__fabric_get_gemini_voices)"
|
||||
complete -c $cmd -l notification-command -d "Custom command to run for notifications (overrides built-in notifications)"
|
||||
|
||||
# Boolean flags (no arguments)
|
||||
complete -c fabric -s S -l setup -d "Run setup for all reconfigurable parts of fabric"
|
||||
complete -c fabric -s s -l stream -d "Stream"
|
||||
complete -c fabric -s r -l raw -d "Use the defaults of the model without sending chat options"
|
||||
complete -c fabric -s l -l listpatterns -d "List all patterns"
|
||||
complete -c fabric -s L -l listmodels -d "List all available models"
|
||||
complete -c fabric -s x -l listcontexts -d "List all contexts"
|
||||
complete -c fabric -s X -l listsessions -d "List all sessions"
|
||||
complete -c fabric -s U -l updatepatterns -d "Update patterns"
|
||||
complete -c fabric -s c -l copy -d "Copy to clipboard"
|
||||
complete -c fabric -l output-session -d "Output the entire session to the output file"
|
||||
complete -c fabric -s d -l changeDefaultModel -d "Change default model"
|
||||
complete -c fabric -l playlist -d "Prefer playlist over video if both ids are present in the URL"
|
||||
complete -c fabric -l transcript -d "Grab transcript from YouTube video and send to chat"
|
||||
complete -c fabric -l transcript-with-timestamps -d "Grab transcript from YouTube video with timestamps"
|
||||
complete -c fabric -l comments -d "Grab comments from YouTube video and send to chat"
|
||||
complete -c fabric -l metadata -d "Output video metadata"
|
||||
complete -c fabric -l readability -d "Convert HTML input into a clean, readable view"
|
||||
complete -c fabric -l input-has-vars -d "Apply variables to user input"
|
||||
complete -c fabric -l dry-run -d "Show what would be sent to the model without actually sending it"
|
||||
complete -c fabric -l search -d "Enable web search tool for supported models (Anthropic, OpenAI)"
|
||||
complete -c fabric -l serve -d "Serve the Fabric Rest API"
|
||||
complete -c fabric -l serveOllama -d "Serve the Fabric Rest API with ollama endpoints"
|
||||
complete -c fabric -l version -d "Print current version"
|
||||
complete -c fabric -l listextensions -d "List all registered extensions"
|
||||
complete -c fabric -l liststrategies -d "List all strategies"
|
||||
complete -c fabric -l listvendors -d "List all vendors"
|
||||
complete -c fabric -l shell-complete-list -d "Output raw list without headers/formatting (for shell completion)"
|
||||
complete -c fabric -l suppress-think -d "Suppress text enclosed in thinking tags"
|
||||
complete -c fabric -l disable-responses-api -d "Disable OpenAI Responses API (default: false)"
|
||||
complete -c fabric -s h -l help -d "Show this help message"
|
||||
# Boolean flags (no arguments)
|
||||
complete -c $cmd -s S -l setup -d "Run setup for all reconfigurable parts of fabric"
|
||||
complete -c $cmd -s s -l stream -d "Stream"
|
||||
complete -c $cmd -s r -l raw -d "Use the defaults of the model without sending chat options"
|
||||
complete -c $cmd -s l -l listpatterns -d "List all patterns"
|
||||
complete -c $cmd -s L -l listmodels -d "List all available models"
|
||||
complete -c $cmd -s x -l listcontexts -d "List all contexts"
|
||||
complete -c $cmd -s X -l listsessions -d "List all sessions"
|
||||
complete -c $cmd -s U -l updatepatterns -d "Update patterns"
|
||||
complete -c $cmd -s c -l copy -d "Copy to clipboard"
|
||||
complete -c $cmd -l output-session -d "Output the entire session to the output file"
|
||||
complete -c $cmd -s d -l changeDefaultModel -d "Change default model"
|
||||
complete -c $cmd -l playlist -d "Prefer playlist over video if both ids are present in the URL"
|
||||
complete -c $cmd -l transcript -d "Grab transcript from YouTube video and send to chat"
|
||||
complete -c $cmd -l transcript-with-timestamps -d "Grab transcript from YouTube video with timestamps"
|
||||
complete -c $cmd -l comments -d "Grab comments from YouTube video and send to chat"
|
||||
complete -c $cmd -l metadata -d "Output video metadata"
|
||||
complete -c $cmd -l yt-dlp-args -d "Additional arguments to pass to yt-dlp (e.g. '--cookies-from-browser brave')"
|
||||
complete -c $cmd -l readability -d "Convert HTML input into a clean, readable view"
|
||||
complete -c $cmd -l input-has-vars -d "Apply variables to user input"
|
||||
complete -c $cmd -l dry-run -d "Show what would be sent to the model without actually sending it"
|
||||
complete -c $cmd -l search -d "Enable web search tool for supported models (Anthropic, OpenAI, Gemini)"
|
||||
complete -c $cmd -l serve -d "Serve the Fabric Rest API"
|
||||
complete -c $cmd -l serveOllama -d "Serve the Fabric Rest API with ollama endpoints"
|
||||
complete -c $cmd -l version -d "Print current version"
|
||||
complete -c $cmd -l listextensions -d "List all registered extensions"
|
||||
complete -c $cmd -l liststrategies -d "List all strategies"
|
||||
complete -c $cmd -l listvendors -d "List all vendors"
|
||||
complete -c $cmd -l list-gemini-voices -d "List all available Gemini TTS voices"
|
||||
complete -c $cmd -l shell-complete-list -d "Output raw list without headers/formatting (for shell completion)"
|
||||
complete -c $cmd -l suppress-think -d "Suppress text enclosed in thinking tags"
|
||||
complete -c $cmd -l disable-responses-api -d "Disable OpenAI Responses API (default: false)"
|
||||
complete -c $cmd -l notification -d "Send desktop notification when command completes"
|
||||
complete -c $cmd -s h -l help -d "Show this help message"
|
||||
end
|
||||
|
||||
__fabric_register_completions fabric
|
||||
__fabric_register_completions fabric-ai
|
||||
|
||||
503
completions/setup-completions.sh
Executable file
503
completions/setup-completions.sh
Executable file
@@ -0,0 +1,503 @@
|
||||
#!/bin/sh
|
||||
|
||||
# Fabric Shell Completions Setup Script
|
||||
# This script automatically installs shell completions for the fabric CLI
|
||||
# based on your current shell and the installed fabric command name.
|
||||
|
||||
set -e
|
||||
|
||||
# Global variables
|
||||
DRY_RUN=false
|
||||
# Base URL to fetch completion files when not available locally
|
||||
# Can be overridden via environment variable FABRIC_COMPLETIONS_BASE_URL
|
||||
FABRIC_COMPLETIONS_BASE_URL="${FABRIC_COMPLETIONS_BASE_URL:-https://raw.githubusercontent.com/danielmiessler/Fabric/refs/heads/main/completions}"
|
||||
TEMP_DIR=""
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
CYAN='\033[0;36m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Function to print colored output
|
||||
print_info() {
|
||||
printf "${BLUE}[INFO]${NC} %s\n" "$1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
printf "${GREEN}[SUCCESS]${NC} %s\n" "$1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
printf "${YELLOW}[WARNING]${NC} %s\n" "$1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
printf "${RED}[ERROR]${NC} %s\n" "$1"
|
||||
}
|
||||
|
||||
print_dry_run() {
|
||||
printf "${CYAN}[DRY-RUN]${NC} %s\n" "$1"
|
||||
}
|
||||
|
||||
# Function to execute commands with dry-run support
|
||||
execute_command() {
|
||||
cmd="$1"
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_dry_run "Would run: $cmd"
|
||||
return 0
|
||||
else
|
||||
eval "$cmd" 2>/dev/null
|
||||
fi
|
||||
}
|
||||
|
||||
# Simple downloader that prefers curl, falls back to wget
|
||||
to_github_raw_url() {
|
||||
in_url="$1"
|
||||
case "$in_url" in
|
||||
https://github.com/*/*/blob/*)
|
||||
# Convert blob URL to raw
|
||||
# https://github.com/{owner}/{repo}/blob/{ref}/path -> https://raw.githubusercontent.com/{owner}/{repo}/{ref}/path
|
||||
echo "$in_url" | sed -E 's#https://github.com/([^/]+)/([^/]+)/blob/([^/]+)/#https://raw.githubusercontent.com/\1/\2/\3/#'
|
||||
;;
|
||||
https://github.com/*/*/tree/*)
|
||||
# Convert tree URL base + file path to raw
|
||||
# https://github.com/{owner}/{repo}/tree/{ref}/path -> https://raw.githubusercontent.com/{owner}/{repo}/{ref}/path
|
||||
echo "$in_url" | sed -E 's#https://github.com/([^/]+)/([^/]+)/tree/([^/]+)/#https://raw.githubusercontent.com/\1/\2/\3/#'
|
||||
;;
|
||||
*)
|
||||
echo "$in_url"
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Simple downloader that prefers curl, falls back to wget
|
||||
download_file() {
|
||||
url="$1"
|
||||
dest="$2"
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_dry_run "Would download: $url -> $dest"
|
||||
return 0
|
||||
fi
|
||||
|
||||
eff_url="$(to_github_raw_url "$url")"
|
||||
|
||||
if command -v curl >/dev/null 2>&1; then
|
||||
curl -fsSL "$eff_url" -o "$dest"
|
||||
return $?
|
||||
elif command -v wget >/dev/null 2>&1; then
|
||||
wget -q "$eff_url" -O "$dest"
|
||||
return $?
|
||||
else
|
||||
print_error "Neither 'curl' nor 'wget' is available to download: $url"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Attempt to obtain completion files. If local copies are missing,
|
||||
# download them into a temporary directory and return that directory path.
|
||||
obtain_completion_files() {
|
||||
obf_script_dir="$1"
|
||||
obf_need_download=false
|
||||
|
||||
if [ ! -f "$obf_script_dir/_fabric" ] || [ ! -f "$obf_script_dir/fabric.bash" ] || [ ! -f "$obf_script_dir/fabric.fish" ]; then
|
||||
obf_need_download=true
|
||||
fi
|
||||
|
||||
if [ "$obf_need_download" = false ]; then
|
||||
echo "$obf_script_dir"
|
||||
return 0
|
||||
fi
|
||||
|
||||
# Note: write only to stderr in this function except for the final echo which returns the path
|
||||
printf "%s\n" "[INFO] Local completion files not found; will download from GitHub." 1>&2
|
||||
printf "%s\n" "[INFO] Source: $FABRIC_COMPLETIONS_BASE_URL" 1>&2
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
printf "%s\n" "[DRY-RUN] Would create temporary directory for downloads" 1>&2
|
||||
echo "$obf_script_dir" # Keep using original for dry-run copies
|
||||
return 0
|
||||
fi
|
||||
|
||||
TEMP_DIR="$(mktemp -d 2>/dev/null || mktemp -d -t fabric-completions)"
|
||||
if [ ! -d "$TEMP_DIR" ]; then
|
||||
print_error "Failed to create temporary directory for downloads."
|
||||
return 1
|
||||
fi
|
||||
|
||||
if ! download_file "$FABRIC_COMPLETIONS_BASE_URL/_fabric" "$TEMP_DIR/_fabric"; then
|
||||
print_error "Failed to download _fabric"
|
||||
return 1
|
||||
fi
|
||||
if [ ! -s "$TEMP_DIR/_fabric" ] || head -n1 "$TEMP_DIR/_fabric" | grep -qi "^<!DOCTYPE\|^<html"; then
|
||||
print_error "Downloaded _fabric appears invalid (empty or HTML). Check FABRIC_COMPLETIONS_BASE_URL."
|
||||
return 1
|
||||
fi
|
||||
if ! download_file "$FABRIC_COMPLETIONS_BASE_URL/fabric.bash" "$TEMP_DIR/fabric.bash"; then
|
||||
print_error "Failed to download fabric.bash"
|
||||
return 1
|
||||
fi
|
||||
if [ ! -s "$TEMP_DIR/fabric.bash" ] || head -n1 "$TEMP_DIR/fabric.bash" | grep -qi "^<!DOCTYPE\|^<html"; then
|
||||
print_error "Downloaded fabric.bash appears invalid (empty or HTML). Check FABRIC_COMPLETIONS_BASE_URL."
|
||||
return 1
|
||||
fi
|
||||
if ! download_file "$FABRIC_COMPLETIONS_BASE_URL/fabric.fish" "$TEMP_DIR/fabric.fish"; then
|
||||
print_error "Failed to download fabric.fish"
|
||||
return 1
|
||||
fi
|
||||
if [ ! -s "$TEMP_DIR/fabric.fish" ] || head -n1 "$TEMP_DIR/fabric.fish" | grep -qi "^<!DOCTYPE\|^<html"; then
|
||||
print_error "Downloaded fabric.fish appears invalid (empty or HTML). Check FABRIC_COMPLETIONS_BASE_URL."
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "$TEMP_DIR"
|
||||
}
|
||||
|
||||
# Ensure directory exists, try sudo on permission failure
|
||||
ensure_dir() {
|
||||
dir="$1"
|
||||
# Expand ~ if present
|
||||
case "$dir" in
|
||||
~/*)
|
||||
dir="$HOME${dir#~}"
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ -d "$dir" ]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_dry_run "Would run: mkdir -p \"$dir\""
|
||||
print_dry_run "If permission denied, would run: sudo mkdir -p \"$dir\""
|
||||
return 0
|
||||
fi
|
||||
|
||||
if mkdir -p "$dir" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
if command -v sudo >/dev/null 2>&1 && sudo mkdir -p "$dir" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
print_error "Failed to create directory: $dir"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Copy file with sudo fallback on permission failure
|
||||
install_file() {
|
||||
src="$1"
|
||||
dest="$2"
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_dry_run "Would run: cp \"$src\" \"$dest\""
|
||||
print_dry_run "If permission denied, would run: sudo cp \"$src\" \"$dest\""
|
||||
return 0
|
||||
fi
|
||||
|
||||
if cp "$src" "$dest" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
if command -v sudo >/dev/null 2>&1 && sudo cp "$src" "$dest" 2>/dev/null; then
|
||||
return 0
|
||||
fi
|
||||
print_error "Failed to install file to: $dest"
|
||||
return 1
|
||||
}
|
||||
|
||||
# Function to detect fabric command name
|
||||
detect_fabric_command() {
|
||||
if command -v fabric >/dev/null 2>&1; then
|
||||
echo "fabric"
|
||||
elif command -v fabric-ai >/dev/null 2>&1; then
|
||||
echo "fabric-ai"
|
||||
else
|
||||
print_error "Neither 'fabric' nor 'fabric-ai' command found in PATH"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to detect shell
|
||||
detect_shell() {
|
||||
if [ -n "$SHELL" ]; then
|
||||
basename "$SHELL"
|
||||
else
|
||||
print_warning "SHELL environment variable not set, defaulting to sh"
|
||||
echo "sh"
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to get script directory
|
||||
get_script_dir() {
|
||||
# Get the directory where this script is located
|
||||
script_path="$(readlink -f "$0" 2>/dev/null || realpath "$0" 2>/dev/null || echo "$0")"
|
||||
dirname "$script_path"
|
||||
}
|
||||
|
||||
# Function to setup Zsh completions
|
||||
setup_zsh_completions() {
|
||||
fabric_cmd="$1"
|
||||
script_dir="$2"
|
||||
completion_file="_${fabric_cmd}"
|
||||
|
||||
print_info "Setting up Zsh completions for '$fabric_cmd'..."
|
||||
|
||||
# Try to use existing $fpath first, then fall back to default directories
|
||||
zsh_dirs=""
|
||||
|
||||
# Check if user's shell is zsh and try to get fpath from it
|
||||
if [ "$(basename "$SHELL")" = "zsh" ] && command -v zsh >/dev/null 2>&1; then
|
||||
# Get fpath from zsh by sourcing user's .zshrc first
|
||||
fpath_output=$(zsh -c "source \$HOME/.zshrc 2>/dev/null && print -l \$fpath" 2>/dev/null | head -5 | tr '\n' ' ')
|
||||
if [ -n "$fpath_output" ] && [ "$fpath_output" != "" ]; then
|
||||
print_info "Using directories from zsh \$fpath"
|
||||
zsh_dirs="$fpath_output"
|
||||
fi
|
||||
fi
|
||||
|
||||
# If we couldn't get fpath or it's empty, use default directories
|
||||
if [ -z "$zsh_dirs" ] || [ "$zsh_dirs" = "" ]; then
|
||||
print_info "Using default zsh completion directories"
|
||||
zsh_dirs="/usr/local/share/zsh/site-functions /opt/homebrew/share/zsh/site-functions /usr/share/zsh/site-functions ~/.local/share/zsh/site-functions"
|
||||
fi
|
||||
|
||||
installed=false
|
||||
|
||||
for dir in $zsh_dirs; do
|
||||
# Create directory (with sudo fallback if needed)
|
||||
if ensure_dir "$dir"; then
|
||||
if install_file "$script_dir/_fabric" "$dir/$completion_file"; then
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_success "Would install Zsh completion to: $dir/$completion_file"
|
||||
else
|
||||
print_success "Installed Zsh completion to: $dir/$completion_file"
|
||||
fi
|
||||
installed=true
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$installed" = false ]; then
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_warning "Would attempt to install Zsh completions but no writable directory found."
|
||||
else
|
||||
print_error "Failed to install Zsh completions. Try running with sudo or check permissions."
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_info "Would suggest: Restart your shell or run 'autoload -U compinit && compinit' to enable completions."
|
||||
else
|
||||
print_info "Restart your shell or run 'autoload -U compinit && compinit' to enable completions."
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to setup Bash completions
|
||||
setup_bash_completions() {
|
||||
fabric_cmd="$1"
|
||||
script_dir="$2"
|
||||
completion_file="${fabric_cmd}.bash"
|
||||
|
||||
print_info "Setting up Bash completions for '$fabric_cmd'..."
|
||||
|
||||
# Try different completion directories
|
||||
bash_dirs="/etc/bash_completion.d /usr/local/etc/bash_completion.d /opt/homebrew/etc/bash_completion.d ~/.local/share/bash-completion/completions"
|
||||
installed=false
|
||||
|
||||
for dir in $bash_dirs; do
|
||||
if ensure_dir "$dir"; then
|
||||
if install_file "$script_dir/fabric.bash" "$dir/$completion_file"; then
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_success "Would install Bash completion to: $dir/$completion_file"
|
||||
else
|
||||
print_success "Installed Bash completion to: $dir/$completion_file"
|
||||
fi
|
||||
installed=true
|
||||
break
|
||||
fi
|
||||
fi
|
||||
done
|
||||
|
||||
if [ "$installed" = false ]; then
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_warning "Would attempt to install Bash completions but no writable directory found."
|
||||
else
|
||||
print_error "Failed to install Bash completions. Try running with sudo or check permissions."
|
||||
return 1
|
||||
fi
|
||||
fi
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_info "Would suggest: Restart your shell or run 'source ~/.bashrc' to enable completions."
|
||||
else
|
||||
print_info "Restart your shell or run 'source ~/.bashrc' to enable completions."
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to setup Fish completions
|
||||
setup_fish_completions() {
|
||||
fabric_cmd="$1"
|
||||
script_dir="$2"
|
||||
completion_file="${fabric_cmd}.fish"
|
||||
|
||||
print_info "Setting up Fish completions for '$fabric_cmd'..."
|
||||
|
||||
# Fish completion directory
|
||||
fish_dir="$HOME/.config/fish/completions"
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_dry_run "Would run: mkdir -p \"$fish_dir\""
|
||||
print_dry_run "Would run: cp \"$script_dir/fabric.fish\" \"$fish_dir/$completion_file\""
|
||||
print_success "Would install Fish completion to: $fish_dir/$completion_file"
|
||||
print_info "Fish will automatically load the completions (no restart needed)."
|
||||
elif mkdir -p "$fish_dir" 2>/dev/null; then
|
||||
if cp "$script_dir/fabric.fish" "$fish_dir/$completion_file"; then
|
||||
print_success "Installed Fish completion to: $fish_dir/$completion_file"
|
||||
print_info "Fish will automatically load the completions (no restart needed)."
|
||||
else
|
||||
print_error "Failed to copy Fish completion file."
|
||||
return 1
|
||||
fi
|
||||
else
|
||||
print_error "Failed to create Fish completions directory: $fish_dir"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Function to setup completions for other shells
|
||||
setup_other_shell_completions() {
|
||||
fabric_cmd="$1"
|
||||
shell_name="$2"
|
||||
script_dir="$3"
|
||||
|
||||
print_warning "Shell '$shell_name' is not directly supported."
|
||||
print_info "You can manually source the completion files:"
|
||||
print_info " Bash-compatible: source $script_dir/fabric.bash"
|
||||
print_info " Zsh-compatible: source $script_dir/_fabric"
|
||||
}
|
||||
|
||||
# Function to show help
|
||||
show_help() {
|
||||
cat << EOF
|
||||
Fabric Shell Completions Setup Script
|
||||
|
||||
USAGE:
|
||||
setup-completions.sh [OPTIONS]
|
||||
|
||||
OPTIONS:
|
||||
--dry-run Show what commands would be run without executing them
|
||||
--help Show this help message
|
||||
|
||||
DESCRIPTION:
|
||||
This script automatically installs shell completions for the fabric CLI
|
||||
based on your current shell and the installed fabric command name.
|
||||
|
||||
The script will use completion files from the same directory as the script
|
||||
when available. If they are not present (e.g., when running via curl), it
|
||||
will download them from GitHub:
|
||||
|
||||
$FABRIC_COMPLETIONS_BASE_URL
|
||||
|
||||
You can override the download source by setting
|
||||
FABRIC_COMPLETIONS_BASE_URL to your preferred location.
|
||||
|
||||
Supports: zsh, bash, fish
|
||||
|
||||
The script will:
|
||||
1. Detect whether 'fabric' or 'fabric-ai' is installed
|
||||
2. Detect your current shell from the SHELL environment variable
|
||||
3. Install the appropriate completion file with the correct name
|
||||
4. Try multiple standard completion directories
|
||||
|
||||
EXAMPLES:
|
||||
./setup-completions.sh # Install completions
|
||||
./setup-completions.sh --dry-run # Show what would be done
|
||||
FABRIC_COMPLETIONS_BASE_URL="https://raw.githubusercontent.com/<owner>/<repo>/main/completions" \\
|
||||
./setup-completions.sh # Override download source
|
||||
./setup-completions.sh --help # Show this help
|
||||
|
||||
EOF
|
||||
}
|
||||
|
||||
# Main function
|
||||
main() {
|
||||
# Parse command line arguments
|
||||
while [ $# -gt 0 ]; do
|
||||
case "$1" in
|
||||
--dry-run)
|
||||
DRY_RUN=true
|
||||
shift
|
||||
;;
|
||||
--help|-h)
|
||||
show_help
|
||||
exit 0
|
||||
;;
|
||||
*)
|
||||
print_error "Unknown option: $1"
|
||||
print_info "Use --help for usage information."
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
done
|
||||
|
||||
print_info "Fabric Shell Completions Setup"
|
||||
print_info "==============================="
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_info "DRY RUN MODE - Commands will be shown but not executed"
|
||||
print_info ""
|
||||
fi
|
||||
|
||||
# Get script directory and obtain completion files (local or downloaded)
|
||||
script_dir="$(get_script_dir)"
|
||||
script_dir="$(obtain_completion_files "$script_dir" || echo "")"
|
||||
if [ -z "$script_dir" ]; then
|
||||
print_error "Unable to obtain completion files. Aborting."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# If we downloaded into a temp dir, arrange cleanup at process exit
|
||||
if [ -n "$TEMP_DIR" ] && [ -d "$TEMP_DIR" ]; then
|
||||
trap 'if [ -n "$TEMP_DIR" ] && [ -d "$TEMP_DIR" ]; then rm -rf "$TEMP_DIR"; fi' EXIT INT TERM
|
||||
fi
|
||||
|
||||
# Detect fabric command
|
||||
fabric_cmd="$(detect_fabric_command)"
|
||||
print_info "Detected fabric command: $fabric_cmd"
|
||||
|
||||
# Detect shell
|
||||
shell_name="$(detect_shell)"
|
||||
print_info "Detected shell: $shell_name"
|
||||
|
||||
# Setup completions based on shell
|
||||
case "$shell_name" in
|
||||
zsh)
|
||||
setup_zsh_completions "$fabric_cmd" "$script_dir"
|
||||
;;
|
||||
bash)
|
||||
setup_bash_completions "$fabric_cmd" "$script_dir"
|
||||
;;
|
||||
fish)
|
||||
setup_fish_completions "$fabric_cmd" "$script_dir"
|
||||
;;
|
||||
*)
|
||||
setup_other_shell_completions "$fabric_cmd" "$shell_name" "$script_dir"
|
||||
;;
|
||||
esac
|
||||
|
||||
if [ "$DRY_RUN" = true ]; then
|
||||
print_success "Dry-run completed! The above commands would set up shell completions."
|
||||
print_info "Run without --dry-run to actually install the completions."
|
||||
else
|
||||
print_success "Shell completion setup completed!"
|
||||
print_info "You can now use tab completion with the '$fabric_cmd' command."
|
||||
fi
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -167,6 +167,8 @@ us the results in
|
||||
Select the model to use. NOTE: Will not work if you
|
||||
have set a default model. please use --clear to clear
|
||||
persistence before using this flag
|
||||
--vendor VENDOR, -V VENDOR
|
||||
Specify vendor for the selected model (e.g., -V "LM Studio" -m openai/gpt-oss-20b)
|
||||
--listmodels List all available models
|
||||
--remoteOllamaServer REMOTEOLLAMASERVER
|
||||
The URL of the remote ollamaserver to use. ONLY USE
|
||||
|
||||
@@ -1,23 +1,128 @@
|
||||
# IDENTITY and PURPOSE
|
||||
You are an AI assistant tasked with creating a new feature for a fabric command-line tool. Your primary responsibility is to develop a pattern that suggests appropriate fabric patterns or commands based on user input. You are knowledgeable about fabric commands and understand the need to expand the tool's functionality. Your role involves analyzing user requests, determining the most suitable fabric commands or patterns, and providing helpful suggestions to users.
|
||||
|
||||
You are an expert AI assistant specialized in the Fabric framework - an open-source tool for augmenting human capabilities with AI. Your primary responsibility is to analyze user requests and suggest the most appropriate fabric patterns or commands to accomplish their goals. You have comprehensive knowledge of all available patterns, their categories, capabilities, and use cases.
|
||||
|
||||
Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
|
||||
|
||||
# STEPS
|
||||
- Analyze the user's input to understand their specific needs and context
|
||||
- Determine the appropriate fabric pattern or command based on the user's request
|
||||
- Generate a response that suggests the relevant fabric command(s) or pattern(s)
|
||||
- Provide explanations or multiple options when applicable
|
||||
- If no specific command is found, suggest using `create_pattern`
|
||||
|
||||
## 1. ANALYZE USER INPUT
|
||||
|
||||
- Parse the user's request to understand their primary objective
|
||||
- Identify the type of content they're working with (text, code, data, etc.)
|
||||
- Determine the desired output format or outcome
|
||||
- Consider the user's level of expertise with fabric
|
||||
|
||||
## 2. CATEGORIZE THE REQUEST
|
||||
|
||||
Match the request to one or more of these primary categories:
|
||||
|
||||
- **AI** - AI-related patterns for model guidance, art prompts, evaluation
|
||||
- **ANALYSIS** - Analysis and evaluation of content, data, claims, debates
|
||||
- **BILL** - Legislative bill analysis and implications
|
||||
- **BUSINESS** - Business strategy, agreements, sales, presentations
|
||||
- **CLASSIFICATION** - Content categorization and tagging
|
||||
- **CONVERSION** - Format conversion between different data types
|
||||
- **CR THINKING** - Critical thinking, logical analysis, bias detection
|
||||
- **CREATIVITY** - Creative writing, essay generation, artistic content
|
||||
- **DEVELOPMENT** - Software development, coding, project design
|
||||
- **DEVOPS** - Infrastructure, deployment, pipeline management
|
||||
- **EXTRACT** - Information extraction from various content types
|
||||
- **GAMING** - RPG, D&D, gaming-related content creation
|
||||
- **LEARNING** - Educational content, tutorials, explanations
|
||||
- **OTHER** - Miscellaneous patterns that don't fit other categories
|
||||
- **RESEARCH** - Academic research, paper analysis, investigation
|
||||
- **REVIEW** - Evaluation and review of content, code, designs
|
||||
- **SECURITY** - Cybersecurity analysis, threat modeling, vulnerability assessment
|
||||
- **SELF** - Personal development, guidance, self-improvement
|
||||
- **STRATEGY** - Strategic analysis, planning, decision-making
|
||||
- **SUMMARIZE** - Content summarization at various levels of detail
|
||||
- **VISUALIZE** - Data visualization, diagrams, charts, graphics
|
||||
- **WISDOM** - Wisdom extraction, insights, life lessons
|
||||
- **WRITING** - Writing assistance, improvement, formatting
|
||||
|
||||
## 3. SUGGEST APPROPRIATE PATTERNS
|
||||
|
||||
- Recommend 1-3 most suitable patterns based on the analysis
|
||||
- Prioritize patterns that directly address the user's main objective
|
||||
- Consider alternative patterns for different approaches to the same goal
|
||||
- Include both primary and secondary pattern suggestions when relevant
|
||||
|
||||
## 4. PROVIDE CONTEXT AND USAGE
|
||||
|
||||
- Explain WHY each suggested pattern is appropriate
|
||||
- Include the exact fabric command syntax
|
||||
- Mention any important considerations or limitations
|
||||
- Suggest complementary patterns if applicable
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
- Only output Markdown
|
||||
- Provide suggestions for fabric commands or patterns based on the user's input
|
||||
- Include explanations or multiple options when appropriate
|
||||
- If suggesting `create_pattern`, include instructions for saving and using the new pattern
|
||||
- Format the output to be clear and easy to understand for users new to fabric
|
||||
- Ensure the response aligns with the goal of making fabric more accessible and user-friendly
|
||||
- Ensure you follow ALL these instructions when creating your output
|
||||
- Structure your response with clear headings and sections
|
||||
- Provide specific fabric command examples: `fabric --pattern pattern_name`
|
||||
- Include brief explanations of what each pattern does
|
||||
- If multiple patterns could work, rank them by relevance
|
||||
- For complex requests, suggest a workflow using multiple patterns
|
||||
- If no existing pattern fits perfectly, suggest `create_pattern` with specific guidance
|
||||
- Format the output to be actionable and easy to follow
|
||||
- Ensure suggestions align with making fabric more accessible and powerful
|
||||
|
||||
# PATTERN MATCHING GUIDELINES
|
||||
|
||||
## Common Request Types and Best Patterns
|
||||
|
||||
**AI**: ai, create_art_prompt, create_pattern, extract_mcp_servers, extract_wisdom_agents, generate_code_rules, improve_prompt, judge_output, rate_ai_response, rate_ai_result, raw_query, solve_with_cot, suggest_pattern, summarize_prompt
|
||||
|
||||
**ANALYSIS**: ai, analyze_answers, analyze_bill, analyze_bill_short, analyze_candidates, analyze_cfp_submission, analyze_claims, analyze_comments, analyze_debate, analyze_email_headers, analyze_incident, analyze_interviewer_techniques, analyze_logs, analyze_malware, analyze_military_strategy, analyze_mistakes, analyze_paper, analyze_paper_simple, analyze_patent, analyze_personality, analyze_presentation, analyze_product_feedback, analyze_proposition, analyze_prose, analyze_prose_json, analyze_prose_pinker, analyze_risk, analyze_sales_call, analyze_spiritual_text, analyze_tech_impact, analyze_terraform_plan, analyze_threat_report, analyze_threat_report_cmds, analyze_threat_report_trends, apply_ul_tags, check_agreement, compare_and_contrast, create_ai_jobs_analysis, create_idea_compass, create_investigation_visualization, create_prediction_block, create_recursive_outline, create_tags, dialog_with_socrates, extract_main_idea, extract_predictions, find_hidden_message, find_logical_fallacies, get_wow_per_minute, identify_dsrp_distinctions, identify_dsrp_perspectives, identify_dsrp_relationships, identify_dsrp_systems, identify_job_stories, label_and_rate, prepare_7s_strategy, provide_guidance, rate_content, rate_value, recommend_artists, recommend_talkpanel_topics, review_design, summarize_board_meeting, t_analyze_challenge_handling, t_check_dunning_kruger, t_check_metrics, t_describe_life_outlook, t_extract_intro_sentences, t_extract_panel_topics, t_find_blindspots, t_find_negative_thinking, t_red_team_thinking, t_threat_model_plans, t_year_in_review, write_hackerone_report
|
||||
|
||||
**BILL**: analyze_bill, analyze_bill_short
|
||||
|
||||
**BUSINESS**: check_agreement, create_ai_jobs_analysis, create_formal_email, create_hormozi_offer, create_loe_document, create_logo, create_newsletter_entry, create_prd, explain_project, extract_business_ideas, extract_product_features, extract_skills, extract_sponsors, identify_job_stories, prepare_7s_strategy, rate_value, t_check_metrics, t_create_h3_career, t_visualize_mission_goals_projects, t_year_in_review, transcribe_minutes
|
||||
|
||||
**CLASSIFICATION**: apply_ul_tags
|
||||
|
||||
**CONVERSION**: clean_text, convert_to_markdown, create_graph_from_input, export_data_as_csv, extract_videoid, get_youtube_rss, humanize, md_callout, sanitize_broken_html_to_markdown, to_flashcards, transcribe_minutes, translate, tweet, write_latex
|
||||
|
||||
**CR THINKING**: capture_thinkers_work, create_idea_compass, create_markmap_visualization, dialog_with_socrates, extract_alpha, extract_controversial_ideas, extract_extraordinary_claims, extract_predictions, extract_primary_problem, extract_wisdom_nometa, find_hidden_message, find_logical_fallacies, solve_with_cot, summarize_debate, t_analyze_challenge_handling, t_check_dunning_kruger, t_find_blindspots, t_find_negative_thinking, t_find_neglected_goals, t_red_team_thinking
|
||||
|
||||
**CREATIVITY**: create_mnemonic_phrases, write_essay
|
||||
|
||||
**DEVELOPMENT**: agility_story, analyze_prose_json, answer_interview_question, ask_secure_by_design_questions, ask_uncle_duke, coding_master, create_coding_feature, create_coding_project, create_command, create_design_document, create_git_diff_commit, create_mermaid_visualization, create_mermaid_visualization_for_github, create_pattern, create_sigma_rules, create_user_story, explain_code, explain_docs, export_data_as_csv, extract_algorithm_update_recommendations, extract_mcp_servers, extract_poc, generate_code_rules, get_youtube_rss, improve_prompt, official_pattern_template, recommend_pipeline_upgrades, refine_design_document, review_code, review_design, sanitize_broken_html_to_markdown, show_fabric_options_markmap, suggest_pattern, summarize_git_changes, summarize_git_diff, summarize_pull-requests, write_nuclei_template_rule, write_pull-request, write_semgrep_rule
|
||||
|
||||
**DEVOPS**: analyze_terraform_plan
|
||||
|
||||
**EXTRACT**: analyze_comments, create_aphorisms, create_tags, create_video_chapters, extract_algorithm_update_recommendations, extract_alpha, extract_article_wisdom, extract_book_ideas, extract_book_recommendations, extract_business_ideas, extract_controversial_ideas, extract_core_message, extract_ctf_writeup, extract_domains, extract_extraordinary_claims, extract_ideas, extract_insights, extract_insights_dm, extract_instructions, extract_jokes, extract_latest_video, extract_main_activities, extract_main_idea, extract_mcp_servers, extract_most_redeeming_thing, extract_patterns, extract_poc, extract_predictions, extract_primary_problem, extract_primary_solution, extract_product_features, extract_questions, extract_recipe, extract_recommendations, extract_references, extract_skills, extract_song_meaning, extract_sponsors, extract_videoid, extract_wisdom, extract_wisdom_agents, extract_wisdom_dm, extract_wisdom_nometa, extract_wisdom_short, generate_code_rules, t_extract_intro_sentences, t_extract_panel_topics
|
||||
|
||||
**GAMING**: create_npc, create_rpg_summary, summarize_rpg_session
|
||||
|
||||
**LEARNING**: analyze_answers, ask_uncle_duke, coding_master, create_diy, create_flash_cards, create_quiz, create_reading_plan, create_story_explanation, dialog_with_socrates, explain_code, explain_docs, explain_math, explain_project, explain_terms, extract_references, improve_academic_writing, provide_guidance, solve_with_cot, summarize_lecture, summarize_paper, to_flashcards, write_essay_pg
|
||||
|
||||
**OTHER**: extract_jokes
|
||||
|
||||
**RESEARCH**: analyze_candidates, analyze_claims, analyze_paper, analyze_paper_simple, analyze_patent, analyze_proposition, analyze_spiritual_text, analyze_tech_impact, capture_thinkers_work, create_academic_paper, extract_extraordinary_claims, extract_references, find_hidden_message, find_logical_fallacies, identify_dsrp_distinctions, identify_dsrp_perspectives, identify_dsrp_relationships, identify_dsrp_systems, improve_academic_writing, recommend_artists, summarize_paper, write_essay_pg, write_latex, write_micro_essay
|
||||
|
||||
**REVIEW**: analyze_cfp_submission, analyze_presentation, analyze_prose, get_wow_per_minute, judge_output, label_and_rate, rate_ai_response, rate_ai_result, rate_content, rate_value, review_code, review_design
|
||||
|
||||
**SECURITY**: analyze_email_headers, analyze_incident, analyze_logs, analyze_malware, analyze_risk, analyze_terraform_plan, analyze_threat_report, analyze_threat_report_cmds, analyze_threat_report_trends, ask_secure_by_design_questions, create_command, create_cyber_summary, create_graph_from_input, create_investigation_visualization, create_network_threat_landscape, create_report_finding, create_security_update, create_sigma_rules, create_stride_threat_model, create_threat_scenarios, create_ttrc_graph, create_ttrc_narrative, extract_ctf_writeup, improve_report_finding, recommend_pipeline_upgrades, review_code, t_red_team_thinking, t_threat_model_plans, write_hackerone_report, write_nuclei_template_rule, write_semgrep_rule
|
||||
|
||||
**SELF**: create_better_frame, create_diy, create_reading_plan, dialog_with_socrates, extract_article_wisdom, extract_book_ideas, extract_book_recommendations, extract_insights, extract_insights_dm, extract_most_redeeming_thing, extract_recipe, extract_recommendations, extract_song_meaning, extract_wisdom, extract_wisdom_dm, extract_wisdom_short, find_female_life_partner, provide_guidance, t_check_dunning_kruger, t_create_h3_career, t_describe_life_outlook, t_find_neglected_goals, t_give_encouragement
|
||||
|
||||
**STRATEGY**: analyze_military_strategy, create_better_frame, prepare_7s_strategy, t_analyze_challenge_handling, t_find_blindspots, t_find_negative_thinking, t_find_neglected_goals, t_red_team_thinking, t_threat_model_plans, t_visualize_mission_goals_projects
|
||||
|
||||
**SUMMARIZE**: capture_thinkers_work, create_5_sentence_summary, create_micro_summary, create_newsletter_entry, create_show_intro, create_summary, extract_core_message, extract_latest_video, extract_main_idea, summarize, summarize_board_meeting, summarize_debate, summarize_git_changes, summarize_git_diff, summarize_lecture, summarize_legislation, summarize_meeting, summarize_micro, summarize_newsletter, summarize_paper, summarize_pull-requests, summarize_rpg_session, youtube_summary
|
||||
|
||||
**VISUALIZE**: create_excalidraw_visualization, create_graph_from_input, create_idea_compass, create_investigation_visualization, create_keynote, create_logo, create_markmap_visualization, create_mermaid_visualization, create_mermaid_visualization_for_github, create_video_chapters, create_visualization, enrich_blog_post, show_fabric_options_markmap, t_visualize_mission_goals_projects
|
||||
|
||||
**WISDOM**: extract_alpha, extract_article_wisdom, extract_book_ideas, extract_insights, extract_most_redeeming_thing, extract_recommendations, extract_wisdom, extract_wisdom_dm, extract_wisdom_nometa, extract_wisdom_short
|
||||
|
||||
**WRITING**: analyze_prose_json, analyze_prose_pinker, apply_ul_tags, clean_text, compare_and_contrast, convert_to_markdown, create_5_sentence_summary, create_academic_paper, create_aphorisms, create_better_frame, create_design_document, create_diy, create_formal_email, create_hormozi_offer, create_keynote, create_micro_summary, create_newsletter_entry, create_prediction_block, create_prd, create_show_intro, create_story_explanation, create_summary, create_tags, create_user_story, enrich_blog_post, explain_docs, explain_terms, humanize, improve_academic_writing, improve_writing, label_and_rate, md_callout, official_pattern_template, recommend_talkpanel_topics, refine_design_document, summarize, summarize_debate, summarize_lecture, summarize_legislation, summarize_meeting, summarize_micro, summarize_newsletter, summarize_paper, summarize_rpg_session, t_create_opening_sentences, t_describe_life_outlook, t_extract_intro_sentences, t_extract_panel_topics, t_give_encouragement, t_year_in_review, transcribe_minutes, tweet, write_essay, write_essay_pg, write_hackerone_report, write_latex, write_micro_essay, write_pull-request
|
||||
|
||||
## Workflow Suggestions
|
||||
|
||||
- For complex analysis: First use an extract pattern, then an analyze pattern, finally a summarize pattern
|
||||
- For content creation: Use relevant create_patterns followed by improve_ patterns for refinement
|
||||
- For research projects: Combine extract_, analyze_, and summarize_ patterns in sequence
|
||||
|
||||
# INPUT
|
||||
INPUT:
|
||||
|
||||
INPUT:
|
||||
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,919 +0,0 @@
|
||||
# Suggest Pattern
|
||||
|
||||
## OVERVIEW
|
||||
|
||||
What It Does: Fabric is an open-source framework designed to augment human capabilities using AI, making it easier to integrate AI into daily tasks.
|
||||
|
||||
Why People Use It: Users leverage Fabric to seamlessly apply AI for solving everyday challenges, enhancing productivity, and fostering human creativity through technology.
|
||||
|
||||
## HOW TO USE IT
|
||||
|
||||
Most Common Syntax: The most common usage involves executing Fabric commands in the terminal, such as `fabric --pattern <PATTERN_NAME>`.
|
||||
|
||||
## COMMON USE CASES
|
||||
|
||||
For Summarizing Content: `fabric --pattern summarize`
|
||||
For Analyzing Claims: `fabric --pattern analyze_claims`
|
||||
For Extracting Wisdom from Videos: `fabric --pattern extract_wisdom`
|
||||
For creating custom patterns: `fabric --pattern create_pattern`
|
||||
|
||||
- One possible place to store them is ~/.config/custom-fabric-patterns.
|
||||
- Then when you want to use them, simply copy them into ~/.config/fabric/patterns.
|
||||
`cp -a ~/.config/custom-fabric-patterns/* ~/.config/fabric/patterns/`
|
||||
- Now you can run them with: `pbpaste | fabric -p your_custom_pattern`
|
||||
|
||||
## MOST IMPORTANT AND USED OPTIONS AND FEATURES
|
||||
|
||||
- **--pattern PATTERN, -p PATTERN**: Specifies the pattern (prompt) to use. Useful for applying specific AI prompts to your input.
|
||||
|
||||
- **--stream, -s**: Streams results in real-time. Ideal for getting immediate feedback from AI operations.
|
||||
|
||||
- **--update, -u**: Updates patterns. Ensures you're using the latest AI prompts for your tasks.
|
||||
|
||||
- **--model MODEL, -m MODEL**: Selects the AI model to use. Allows customization of the AI backend for different tasks.
|
||||
|
||||
- **--setup, -S**: Sets up your Fabric instance. Essential for first-time users to configure Fabric correctly.
|
||||
|
||||
- **--list, -l**: Lists available patterns. Helps users discover new AI prompts for various applications.
|
||||
|
||||
- **--context, -C**: Uses a Context file to add context to your pattern. Enhances the relevance of AI responses by providing additional background information.
|
||||
|
||||
## PATTERNS
|
||||
|
||||
**Key pattern to use: `suggest_pattern`** - suggests appropriate fabric patterns or commands based on user input.
|
||||
|
||||
### agility_story
|
||||
|
||||
Generate a user story and acceptance criteria in JSON format based on the given topic.
|
||||
|
||||
### ai
|
||||
|
||||
Interpret questions deeply and provide concise, insightful answers in Markdown bullet points.
|
||||
|
||||
### analyze_answers
|
||||
|
||||
Evaluate quiz answers for correctness based on learning objectives and generated quiz questions.
|
||||
|
||||
### analyze_bill
|
||||
|
||||
Analyzes legislation to identify overt and covert goals, examining bills for hidden agendas and true intentions.
|
||||
|
||||
### analyze_bill_short
|
||||
|
||||
Provides a concise analysis of legislation, identifying overt and covert goals in a brief, structured format.
|
||||
|
||||
### analyze_candidates
|
||||
|
||||
Compare and contrast two political candidates based on key issues and policies.
|
||||
|
||||
### analyze_cfp_submission
|
||||
|
||||
Review and evaluate conference speaking session submissions based on clarity, relevance, depth, and engagement potential.
|
||||
|
||||
### analyze_claims
|
||||
|
||||
Analyse and rate truth claims with evidence, counter-arguments, fallacies, and final recommendations.
|
||||
|
||||
### analyze_comments
|
||||
|
||||
Evaluate internet comments for content, categorize sentiment, and identify reasons for praise, criticism, and neutrality.
|
||||
|
||||
### analyze_debate
|
||||
|
||||
Rate debates on insight, emotionality, and present an unbiased, thorough analysis of arguments, agreements, and disagreements.
|
||||
|
||||
### analyze_email_headers
|
||||
|
||||
Provide cybersecurity analysis and actionable insights on SPF, DKIM, DMARC, and ARC email header results.
|
||||
|
||||
### analyze_incident
|
||||
|
||||
Efficiently extract and organize key details from cybersecurity breach articles, focusing on attack type, vulnerable components, attacker and target info, incident details, and remediation steps.
|
||||
|
||||
### analyze_interviewer_techniques
|
||||
|
||||
This exercise involves analyzing interviewer techniques, identifying their unique qualities, and succinctly articulating what makes them stand out in a clear, simple format.
|
||||
|
||||
### analyze_logs
|
||||
|
||||
Analyse server log files to identify patterns, anomalies, and issues, providing data-driven insights and recommendations for improving server reliability and performance.
|
||||
|
||||
### analyze_malware
|
||||
|
||||
Analyse malware details, extract key indicators, techniques, and potential detection strategies, and summarize findings concisely for a malware analyst's use in identifying and responding to threats.
|
||||
|
||||
### analyze_military_strategy
|
||||
|
||||
Analyse a historical battle, offering in-depth insights into strategic decisions, strengths, weaknesses, tactical approaches, logistical factors, pivotal moments, and consequences for a comprehensive military evaluation.
|
||||
|
||||
### analyze_mistakes
|
||||
|
||||
Analyse past mistakes in thinking patterns, map them to current beliefs, and offer recommendations to improve accuracy in predictions.
|
||||
|
||||
### analyze_paper
|
||||
|
||||
Analyses research papers by summarizing findings, evaluating rigor, and assessing quality to provide insights for documentation and review.
|
||||
|
||||
### analyze_paper_simple
|
||||
|
||||
Analyzes academic papers with a focus on primary findings, research quality, and study design evaluation.
|
||||
|
||||
### analyze_patent
|
||||
|
||||
Analyse a patent's field, problem, solution, novelty, inventive step, and advantages in detail while summarizing and extracting keywords.
|
||||
|
||||
### analyze_personality
|
||||
|
||||
Performs a deep psychological analysis of a person in the input, focusing on their behavior, language, and psychological traits.
|
||||
|
||||
### analyze_presentation
|
||||
|
||||
Reviews and critiques presentations by analyzing the content, speaker's underlying goals, self-focus, and entertainment value.
|
||||
|
||||
### analyze_product_feedback
|
||||
|
||||
A prompt for analyzing and organizing user feedback by identifying themes, consolidating similar comments, and prioritizing them based on usefulness.
|
||||
|
||||
### analyze_proposition
|
||||
|
||||
Analyzes a ballot proposition by identifying its purpose, impact, arguments for and against, and relevant background information.
|
||||
|
||||
### analyze_prose
|
||||
|
||||
Evaluates writing for novelty, clarity, and prose, providing ratings, improvement recommendations, and an overall score.
|
||||
|
||||
### analyze_prose_json
|
||||
|
||||
Evaluates writing for novelty, clarity, prose, and provides ratings, explanations, improvement suggestions, and an overall score in a JSON format.
|
||||
|
||||
### analyze_prose_pinker
|
||||
|
||||
Evaluates prose based on Steven Pinker's The Sense of Style, analyzing writing style, clarity, and bad writing elements.
|
||||
|
||||
### analyze_risk
|
||||
|
||||
Conducts a risk assessment of a third-party vendor, assigning a risk score and suggesting security controls based on analysis of provided documents and vendor website.
|
||||
|
||||
### analyze_sales_call
|
||||
|
||||
Rates sales call performance across multiple dimensions, providing scores and actionable feedback based on transcript analysis.
|
||||
|
||||
### analyze_spiritual_text
|
||||
|
||||
Compares and contrasts spiritual texts by analyzing claims and differences with the King James Bible.
|
||||
|
||||
### analyze_tech_impact
|
||||
|
||||
Analyzes the societal impact, ethical considerations, and sustainability of technology projects, evaluating their outcomes and benefits.
|
||||
|
||||
### analyze_terraform_plan
|
||||
|
||||
Analyzes Terraform plan outputs to assess infrastructure changes, security risks, cost implications, and compliance considerations.
|
||||
|
||||
### analyze_threat_report
|
||||
|
||||
Extracts surprising insights, trends, statistics, quotes, references, and recommendations from cybersecurity threat reports, summarizing key findings and providing actionable information.
|
||||
|
||||
### analyze_threat_report_cmds
|
||||
|
||||
Extract and synthesize actionable cybersecurity commands from provided materials, incorporating command-line arguments and expert insights for pentesters and non-experts.
|
||||
|
||||
### analyze_threat_report_trends
|
||||
|
||||
Extract up to 50 surprising, insightful, and interesting trends from a cybersecurity threat report in markdown format.
|
||||
|
||||
### answer_interview_question
|
||||
|
||||
Generates concise, tailored responses to technical interview questions, incorporating alternative approaches and evidence to demonstrate the candidate's expertise and experience.
|
||||
|
||||
### ask_secure_by_design_questions
|
||||
|
||||
Generates a set of security-focused questions to ensure a project is built securely by design, covering key components and considerations.
|
||||
|
||||
### ask_uncle_duke
|
||||
|
||||
Coordinates a team of AI agents to research and produce multiple software development solutions based on provided specifications, and conducts detailed code reviews to ensure adherence to best practices.
|
||||
|
||||
### capture_thinkers_work
|
||||
|
||||
Analyze philosophers or philosophies and provide detailed summaries about their teachings, background, works, advice, and related concepts in a structured template.
|
||||
|
||||
### check_agreement
|
||||
|
||||
Analyze contracts and agreements to identify important stipulations, issues, and potential gotchas, then summarize them in Markdown.
|
||||
|
||||
### clean_text
|
||||
|
||||
Fix broken or malformatted text by correcting line breaks, punctuation, capitalization, and paragraphs without altering content or spelling.
|
||||
|
||||
### coding_master
|
||||
|
||||
Explain a coding concept to a beginner, providing examples, and formatting code in markdown with specific output sections like ideas, recommendations, facts, and insights.
|
||||
|
||||
### compare_and_contrast
|
||||
|
||||
Compare and contrast a list of items in a markdown table, with items on the left and topics on top.
|
||||
|
||||
### convert_to_markdown
|
||||
|
||||
Convert content to clean, complete Markdown format, preserving all original structure, formatting, links, and code blocks without alterations.
|
||||
|
||||
### create_5_sentence_summary
|
||||
|
||||
Create concise summaries or answers to input at 5 different levels of depth, from 5 words to 1 word.
|
||||
|
||||
### create_academic_paper
|
||||
|
||||
Generate a high-quality academic paper in LaTeX format with clear concepts, structured content, and a professional layout.
|
||||
|
||||
### create_ai_jobs_analysis
|
||||
|
||||
Analyze job categories' susceptibility to automation, identify resilient roles, and provide strategies for personal adaptation to AI-driven changes in the workforce.
|
||||
|
||||
### create_aphorisms
|
||||
|
||||
Find and generate a list of brief, witty statements.
|
||||
|
||||
### create_art_prompt
|
||||
|
||||
Generates a detailed, compelling visual description of a concept, including stylistic references and direct AI instructions for creating art.
|
||||
|
||||
### create_better_frame
|
||||
|
||||
Identifies and analyzes different frames of interpreting reality, emphasizing the power of positive, productive lenses in shaping outcomes.
|
||||
|
||||
### create_coding_feature
|
||||
|
||||
Generates secure and composable code features using modern technology and best practices from project specifications.
|
||||
|
||||
### create_coding_project
|
||||
|
||||
Generate wireframes and starter code for any coding ideas that you have.
|
||||
|
||||
### create_command
|
||||
|
||||
Helps determine the correct parameters and switches for penetration testing tools based on a brief description of the objective.
|
||||
|
||||
### create_cyber_summary
|
||||
|
||||
Summarizes cybersecurity threats, vulnerabilities, incidents, and malware with a 25-word summary and categorized bullet points, after thoroughly analyzing and mapping the provided input.
|
||||
|
||||
### create_design_document
|
||||
|
||||
Creates a detailed design document for a system using the C4 model, addressing business and security postures, and including a system context diagram.
|
||||
|
||||
### create_diy
|
||||
|
||||
Creates structured "Do It Yourself" tutorial patterns by analyzing prompts, organizing requirements, and providing step-by-step instructions in Markdown format.
|
||||
|
||||
### create_excalidraw_visualization
|
||||
|
||||
Creates complex Excalidraw diagrams to visualize relationships between concepts and ideas in structured format.
|
||||
|
||||
### create_flash_cards
|
||||
|
||||
Creates flashcards for key concepts, definitions, and terms with question-answer format for educational purposes.
|
||||
|
||||
### create_formal_email
|
||||
|
||||
Crafts professional, clear, and respectful emails by analyzing context, tone, and purpose, ensuring proper structure and formatting.
|
||||
|
||||
### create_git_diff_commit
|
||||
|
||||
Generates Git commands and commit messages for reflecting changes in a repository, using conventional commits and providing concise shell commands for updates.
|
||||
|
||||
### create_graph_from_input
|
||||
|
||||
Generates a CSV file with progress-over-time data for a security program, focusing on relevant metrics and KPIs.
|
||||
|
||||
### create_hormozi_offer
|
||||
|
||||
Creates a customized business offer based on principles from Alex Hormozi's book, "$100M Offers."
|
||||
|
||||
### create_idea_compass
|
||||
|
||||
Organizes and structures ideas by exploring their definition, evidence, sources, and related themes or consequences.
|
||||
|
||||
### create_investigation_visualization
|
||||
|
||||
Creates detailed Graphviz visualizations of complex input, highlighting key aspects and providing clear, well-annotated diagrams for investigative analysis and conclusions.
|
||||
|
||||
### create_keynote
|
||||
|
||||
Creates TED-style keynote presentations with a clear narrative, structured slides, and speaker notes, emphasizing impactful takeaways and cohesive flow.
|
||||
|
||||
### create_loe_document
|
||||
|
||||
Creates detailed Level of Effort documents for estimating work effort, resources, and costs for tasks or projects.
|
||||
|
||||
### create_logo
|
||||
|
||||
Creates simple, minimalist company logos without text, generating AI prompts for vector graphic logos based on input.
|
||||
|
||||
### create_markmap_visualization
|
||||
|
||||
Transforms complex ideas into clear visualizations using MarkMap syntax, simplifying concepts into diagrams with relationships, boxes, arrows, and labels.
|
||||
|
||||
### create_mermaid_visualization
|
||||
|
||||
Creates detailed, standalone visualizations of concepts using Mermaid (Markdown) syntax, ensuring clarity and coherence in diagrams.
|
||||
|
||||
### create_mermaid_visualization_for_github
|
||||
|
||||
Creates standalone, detailed visualizations using Mermaid (Markdown) syntax to effectively explain complex concepts, ensuring clarity and precision.
|
||||
|
||||
### create_micro_summary
|
||||
|
||||
Summarizes content into a concise, 20-word summary with main points and takeaways, formatted in Markdown.
|
||||
|
||||
### create_mnemonic_phrases
|
||||
|
||||
Creates memorable mnemonic sentences from given words to aid in memory retention and learning.
|
||||
|
||||
### create_network_threat_landscape
|
||||
|
||||
Analyzes open ports and services from a network scan and generates a comprehensive, insightful, and detailed security threat report in Markdown.
|
||||
|
||||
### create_newsletter_entry
|
||||
|
||||
Condenses provided article text into a concise, objective, newsletter-style summary with a title in the style of Frontend Weekly.
|
||||
|
||||
### create_npc
|
||||
|
||||
Generates a detailed D&D 5E NPC, including background, flaws, stats, appearance, personality, goals, and more in Markdown format.
|
||||
|
||||
### create_pattern
|
||||
|
||||
Extracts, organizes, and formats LLM/AI prompts into structured sections, detailing the AI's role, instructions, output format, and any provided examples for clarity and accuracy.
|
||||
|
||||
### create_prd
|
||||
|
||||
Creates a precise Product Requirements Document (PRD) in Markdown based on input.
|
||||
|
||||
### create_prediction_block
|
||||
|
||||
Extracts and formats predictions from input into a structured Markdown block for a blog post.
|
||||
|
||||
### create_quiz
|
||||
|
||||
Generates review questions based on learning objectives from the input, adapted to the specified student level, and outputs them in a clear markdown format.
|
||||
|
||||
### create_reading_plan
|
||||
|
||||
Creates a three-phase reading plan based on an author or topic to help the user become significantly knowledgeable, including core, extended, and supplementary readings.
|
||||
|
||||
### create_recursive_outline
|
||||
|
||||
Breaks down complex tasks or projects into manageable, hierarchical components with recursive outlining for clarity and simplicity.
|
||||
|
||||
### create_report_finding
|
||||
|
||||
Creates a detailed, structured security finding report in markdown, including sections on Description, Risk, Recommendations, References, One-Sentence-Summary, and Quotes.
|
||||
|
||||
### create_rpg_summary
|
||||
|
||||
Summarizes an in-person RPG session with key events, combat details, player stats, and role-playing highlights in a structured format.
|
||||
|
||||
### create_security_update
|
||||
|
||||
Creates concise security updates for newsletters, covering stories, threats, advisories, vulnerabilities, and a summary of key issues.
|
||||
|
||||
### create_show_intro
|
||||
|
||||
Creates compelling short intros for podcasts, summarizing key topics and themes discussed in the episode.
|
||||
|
||||
### create_sigma_rules
|
||||
|
||||
Extracts Tactics, Techniques, and Procedures (TTPs) from security news and converts them into Sigma detection rules for host-based detections.
|
||||
|
||||
### create_story_explanation
|
||||
|
||||
Summarizes complex content in a clear, approachable story format that makes the concepts easy to understand.
|
||||
|
||||
### create_stride_threat_model
|
||||
|
||||
Create a STRIDE-based threat model for a system design, identifying assets, trust boundaries, data flows, and prioritizing threats with mitigations.
|
||||
|
||||
### create_summary
|
||||
|
||||
Summarizes content into a 20-word sentence, 10 main points (16 words max), and 5 key takeaways in Markdown format.
|
||||
|
||||
### create_tags
|
||||
|
||||
Identifies at least 5 tags from text content for mind mapping tools, including authors and existing tags if present.
|
||||
|
||||
### create_threat_scenarios
|
||||
|
||||
Identifies likely attack methods for any system by providing a narrative-based threat model, balancing risk and opportunity.
|
||||
|
||||
### create_ttrc_graph
|
||||
|
||||
Creates a CSV file showing the progress of Time to Remediate Critical Vulnerabilities over time using given data.
|
||||
|
||||
### create_ttrc_narrative
|
||||
|
||||
Creates a persuasive narrative highlighting progress in reducing the Time to Remediate Critical Vulnerabilities metric over time.
|
||||
|
||||
### create_upgrade_pack
|
||||
|
||||
Extracts world model and task algorithm updates from content, providing beliefs about how the world works and task performance.
|
||||
|
||||
### create_user_story
|
||||
|
||||
Writes concise and clear technical user stories for new features in complex software programs, formatted for all stakeholders.
|
||||
|
||||
### create_video_chapters
|
||||
|
||||
Extracts interesting topics and timestamps from a transcript, providing concise summaries of key moments.
|
||||
|
||||
### create_visualization
|
||||
|
||||
Transforms complex ideas into visualizations using intricate ASCII art, simplifying concepts where necessary.
|
||||
|
||||
### dialog_with_socrates
|
||||
|
||||
Engages in deep, meaningful dialogues to explore and challenge beliefs using the Socratic method.
|
||||
|
||||
### enrich_blog_post
|
||||
|
||||
Enhances Markdown blog files by applying instructions to improve structure, visuals, and readability for HTML rendering.
|
||||
|
||||
### explain_code
|
||||
|
||||
Explains code, security tool output, configuration text, and answers questions based on the provided input.
|
||||
|
||||
### explain_docs
|
||||
|
||||
Improves and restructures tool documentation into clear, concise instructions, including overviews, usage, use cases, and key features.
|
||||
|
||||
### explain_math
|
||||
|
||||
Helps you understand mathematical concepts in a clear and engaging way.
|
||||
|
||||
### explain_project
|
||||
|
||||
Summarizes project documentation into clear, concise sections covering the project, problem, solution, installation, usage, and examples.
|
||||
|
||||
### explain_terms
|
||||
|
||||
Produces a glossary of advanced terms from content, providing a definition, analogy, and explanation of why each term matters.
|
||||
|
||||
### export_data_as_csv
|
||||
|
||||
Extracts and outputs all data structures from the input in properly formatted CSV data.
|
||||
|
||||
### extract_algorithm_update_recommendations
|
||||
|
||||
Extracts concise, practical algorithm update recommendations from the input and outputs them in a bulleted list.
|
||||
|
||||
### extract_article_wisdom
|
||||
|
||||
Extracts surprising, insightful, and interesting information from content, categorizing it into sections like summary, ideas, quotes, facts, references, and recommendations.
|
||||
|
||||
### extract_book_ideas
|
||||
|
||||
Extracts and outputs 50 to 100 of the most surprising, insightful, and interesting ideas from a book's content.
|
||||
|
||||
### extract_book_recommendations
|
||||
|
||||
Extracts and outputs 50 to 100 practical, actionable recommendations from a book's content.
|
||||
|
||||
### extract_business_ideas
|
||||
|
||||
Extracts top business ideas from content and elaborates on the best 10 with unique differentiators.
|
||||
|
||||
### extract_controversial_ideas
|
||||
|
||||
Extracts and outputs controversial statements and supporting quotes from the input in a structured Markdown list.
|
||||
|
||||
### extract_core_message
|
||||
|
||||
Extracts and outputs a clear, concise sentence that articulates the core message of a given text or body of work.
|
||||
|
||||
### extract_ctf_writeup
|
||||
|
||||
Extracts a short writeup from a warstory-like text about a cyber security engagement.
|
||||
|
||||
### extract_domains
|
||||
|
||||
Extracts domains and URLs from content to identify sources used for articles, newsletters, and other publications.
|
||||
|
||||
### extract_extraordinary_claims
|
||||
|
||||
Extracts and outputs a list of extraordinary claims from conversations, focusing on scientifically disputed or false statements.
|
||||
|
||||
### extract_ideas
|
||||
|
||||
Extracts and outputs all the key ideas from input, presented as 15-word bullet points in Markdown.
|
||||
|
||||
### extract_insights
|
||||
|
||||
Extracts and outputs the most powerful and insightful ideas from text, formatted as 16-word bullet points in the INSIGHTS section, also IDEAS section.
|
||||
|
||||
### extract_insights_dm
|
||||
|
||||
Extracts and outputs all valuable insights and a concise summary of the content, including key points and topics discussed.
|
||||
|
||||
### extract_instructions
|
||||
|
||||
Extracts clear, actionable step-by-step instructions and main objectives from instructional video transcripts, organizing them into a concise list.
|
||||
|
||||
### extract_jokes
|
||||
|
||||
Extracts jokes from text content, presenting each joke with its punchline in separate bullet points.
|
||||
|
||||
### extract_latest_video
|
||||
|
||||
Extracts the latest video URL from a YouTube RSS feed and outputs the URL only.
|
||||
|
||||
### extract_main_activities
|
||||
|
||||
Extracts key events and activities from transcripts or logs, providing a summary of what happened.
|
||||
|
||||
### extract_main_idea
|
||||
|
||||
Extracts the main idea and key recommendation from the input, summarizing them in 15-word sentences.
|
||||
|
||||
### extract_most_redeeming_thing
|
||||
|
||||
Extracts the most redeeming aspect from an input, summarizing it in a single 15-word sentence.
|
||||
|
||||
### extract_patterns
|
||||
|
||||
Extracts and analyzes recurring, surprising, and insightful patterns from input, providing detailed analysis and advice for builders.
|
||||
|
||||
### extract_poc
|
||||
|
||||
Extracts proof of concept URLs and validation methods from security reports, providing the URL and command to run.
|
||||
|
||||
### extract_predictions
|
||||
|
||||
Extracts predictions from input, including specific details such as date, confidence level, and verification method.
|
||||
|
||||
### extract_primary_problem
|
||||
|
||||
Extracts the primary problem with the world as presented in a given text or body of work.
|
||||
|
||||
### extract_primary_solution
|
||||
|
||||
Extracts the primary solution for the world as presented in a given text or body of work.
|
||||
|
||||
### extract_product_features
|
||||
|
||||
Extracts and outputs a list of product features from the provided input in a bulleted format.
|
||||
|
||||
### extract_questions
|
||||
|
||||
Extracts and outputs all questions asked by the interviewer in a conversation or interview.
|
||||
|
||||
### extract_recipe
|
||||
|
||||
Extracts and outputs a recipe with a short meal description, ingredients with measurements, and preparation steps.
|
||||
|
||||
### extract_recommendations
|
||||
|
||||
Extracts and outputs concise, practical recommendations from a given piece of content in a bulleted list.
|
||||
|
||||
### extract_references
|
||||
|
||||
Extracts and outputs a bulleted list of references to art, stories, books, literature, and other sources from content.
|
||||
|
||||
### extract_skills
|
||||
|
||||
Extracts and classifies skills from a job description into a table, separating each skill and classifying it as either hard or soft.
|
||||
|
||||
### extract_song_meaning
|
||||
|
||||
Analyzes a song to provide a summary of its meaning, supported by detailed evidence from lyrics, artist commentary, and fan analysis.
|
||||
|
||||
### extract_sponsors
|
||||
|
||||
Extracts and lists official sponsors and potential sponsors from a provided transcript.
|
||||
|
||||
### extract_videoid
|
||||
|
||||
Extracts and outputs the video ID from any given URL.
|
||||
|
||||
### extract_wisdom
|
||||
|
||||
Extracts surprising, insightful, and interesting information from text on topics like human flourishing, AI, learning, and more.
|
||||
|
||||
### extract_wisdom_agents
|
||||
|
||||
Extracts valuable insights, ideas, quotes, and references from content, emphasizing topics like human flourishing, AI, learning, and technology.
|
||||
|
||||
### extract_wisdom_dm
|
||||
|
||||
Extracts all valuable, insightful, and thought-provoking information from content, focusing on topics like human flourishing, AI, learning, and technology.
|
||||
|
||||
### extract_wisdom_nometa
|
||||
|
||||
Extracts insights, ideas, quotes, habits, facts, references, and recommendations from content, focusing on human flourishing, AI, technology, and related topics.
|
||||
|
||||
### find_female_life_partner
|
||||
|
||||
Analyzes criteria for finding a female life partner and provides clear, direct, and poetic descriptions.
|
||||
|
||||
### find_hidden_message
|
||||
|
||||
Extracts overt and hidden political messages, justifications, audience actions, and a cynical analysis from content.
|
||||
|
||||
### find_logical_fallacies
|
||||
|
||||
Identifies and analyzes fallacies in arguments, classifying them as formal or informal with detailed reasoning.
|
||||
|
||||
### get_wow_per_minute
|
||||
|
||||
Determines the wow-factor of content per minute based on surprise, novelty, insight, value, and wisdom, measuring how rewarding the content is for the viewer.
|
||||
|
||||
### get_youtube_rss
|
||||
|
||||
Returns the RSS URL for a given YouTube channel based on the channel ID or URL.
|
||||
|
||||
### humanize
|
||||
|
||||
Rewrites AI-generated text to sound natural, conversational, and easy to understand, maintaining clarity and simplicity.
|
||||
|
||||
### identify_dsrp_distinctions
|
||||
|
||||
Encourages creative, systems-based thinking by exploring distinctions, boundaries, and their implications, drawing on insights from prominent systems thinkers.
|
||||
|
||||
### identify_dsrp_perspectives
|
||||
|
||||
Explores the concept of distinctions in systems thinking, focusing on how boundaries define ideas, influence understanding, and reveal or obscure insights.
|
||||
|
||||
### identify_dsrp_relationships
|
||||
|
||||
Encourages exploration of connections, distinctions, and boundaries between ideas, inspired by systems thinkers to reveal new insights and patterns in complex systems.
|
||||
|
||||
### identify_dsrp_systems
|
||||
|
||||
Encourages organizing ideas into systems of parts and wholes, inspired by systems thinkers to explore relationships and how changes in organization impact meaning and understanding.
|
||||
|
||||
### identify_job_stories
|
||||
|
||||
Identifies key job stories or requirements for roles.
|
||||
|
||||
### improve_academic_writing
|
||||
|
||||
Refines text into clear, concise academic language while improving grammar, coherence, and clarity, with a list of changes.
|
||||
|
||||
### improve_prompt
|
||||
|
||||
Improves an LLM/AI prompt by applying expert prompt writing strategies for better results and clarity.
|
||||
|
||||
### improve_report_finding
|
||||
|
||||
Improves a penetration test security finding by providing detailed descriptions, risks, recommendations, references, quotes, and a concise summary in markdown format.
|
||||
|
||||
### improve_writing
|
||||
|
||||
Refines text by correcting grammar, enhancing style, improving clarity, and maintaining the original meaning.
|
||||
|
||||
### judge_output
|
||||
|
||||
Evaluates Honeycomb queries by judging their effectiveness, providing critiques and outcomes based on language nuances and analytics relevance.
|
||||
|
||||
### label_and_rate
|
||||
|
||||
Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
|
||||
|
||||
### md_callout
|
||||
|
||||
Classifies content and generates a markdown callout based on the provided text, selecting the most appropriate type.
|
||||
|
||||
### official_pattern_template
|
||||
|
||||
Template to use if you want to create new fabric patterns.
|
||||
|
||||
### prepare_7s_strategy
|
||||
|
||||
Prepares a comprehensive briefing document from 7S's strategy capturing organizational profile, strategic elements, and market dynamics with clear, concise, and organized content.
|
||||
|
||||
### provide_guidance
|
||||
|
||||
Provides psychological and life coaching advice, including analysis, recommendations, and potential diagnoses, with a compassionate and honest tone.
|
||||
|
||||
### rate_ai_response
|
||||
|
||||
Rates the quality of AI responses by comparing them to top human expert performance, assigning a letter grade, reasoning, and providing a 1-100 score based on the evaluation.
|
||||
|
||||
### rate_ai_result
|
||||
|
||||
Assesses the quality of AI/ML/LLM work by deeply analyzing content, instructions, and output, then rates performance based on multiple dimensions, including coverage, creativity, and interdisciplinary thinking.
|
||||
|
||||
### rate_content
|
||||
|
||||
Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
|
||||
|
||||
### rate_value
|
||||
|
||||
Produces the best possible output by deeply analyzing and understanding the input and its intended purpose.
|
||||
|
||||
### raw_query
|
||||
|
||||
Fully digests and contemplates the input to produce the best possible result based on understanding the sender's intent.
|
||||
|
||||
### recommend_artists
|
||||
|
||||
Recommends a personalized festival schedule with artists aligned to your favorite styles and interests, including rationale.
|
||||
|
||||
### recommend_pipeline_upgrades
|
||||
|
||||
Optimizes vulnerability-checking pipelines by incorporating new information and improving their efficiency, with detailed explanations of changes.
|
||||
|
||||
### recommend_talkpanel_topics
|
||||
|
||||
Produces a clean set of proposed talks or panel talking points for a person based on their interests and goals, formatted for submission to a conference organizer.
|
||||
|
||||
### refine_design_document
|
||||
|
||||
Refines a design document based on a design review by analyzing, mapping concepts, and implementing changes using valid Markdown.
|
||||
|
||||
### review_design
|
||||
|
||||
Reviews and analyzes architecture design, focusing on clarity, component design, system integrations, security, performance, scalability, and data management.
|
||||
|
||||
### sanitize_broken_html_to_markdown
|
||||
|
||||
Converts messy HTML into clean, properly formatted Markdown, applying custom styling and ensuring compatibility with Vite.
|
||||
|
||||
### show_fabric_options_markmap
|
||||
|
||||
Visualizes the functionality of the Fabric framework by representing its components, commands, and features based on the provided input.
|
||||
|
||||
### solve_with_cot
|
||||
|
||||
Provides detailed, step-by-step responses with chain of thought reasoning, using structured thinking, reflection, and output sections.
|
||||
|
||||
### suggest_pattern
|
||||
|
||||
Suggests appropriate fabric patterns or commands based on user input, providing clear explanations and options for users.
|
||||
|
||||
### summarize
|
||||
|
||||
Summarizes content into a 20-word sentence, main points, and takeaways, formatted with numbered lists in Markdown.
|
||||
|
||||
### summarize_board_meeting
|
||||
|
||||
Creates formal meeting notes from board meeting transcripts for corporate governance documentation.
|
||||
|
||||
### summarize_debate
|
||||
|
||||
Summarizes debates, identifies primary disagreement, extracts arguments, and provides analysis of evidence and argument strength to predict outcomes.
|
||||
|
||||
### summarize_git_changes
|
||||
|
||||
Summarizes recent project updates from the last 7 days, focusing on key changes with enthusiasm.
|
||||
|
||||
### summarize_git_diff
|
||||
|
||||
Summarizes and organizes Git diff changes with clear, succinct commit messages and bullet points.
|
||||
|
||||
### summarize_lecture
|
||||
|
||||
Extracts relevant topics, definitions, and tools from lecture transcripts, providing structured summaries with timestamps and key takeaways.
|
||||
|
||||
### summarize_legislation
|
||||
|
||||
Summarizes complex political proposals and legislation by analyzing key points, proposed changes, and providing balanced, positive, and cynical characterizations.
|
||||
|
||||
### summarize_meeting
|
||||
|
||||
Analyzes meeting transcripts to extract a structured summary, including an overview, key points, tasks, decisions, challenges, timeline, references, and next steps.
|
||||
|
||||
### summarize_micro
|
||||
|
||||
Summarizes content into a 20-word sentence, 3 main points, and 3 takeaways, formatted in clear, concise Markdown.
|
||||
|
||||
### summarize_newsletter
|
||||
|
||||
Extracts the most meaningful, interesting, and useful content from a newsletter, summarizing key sections such as content, opinions, tools, companies, and follow-up items in clear, structured Markdown.
|
||||
|
||||
### summarize_paper
|
||||
|
||||
Summarizes an academic paper by detailing its title, authors, technical approach, distinctive features, experimental setup, results, advantages, limitations, and conclusion in a clear, structured format using human-readable Markdown.
|
||||
|
||||
### summarize_prompt
|
||||
|
||||
Summarizes AI chat prompts by describing the primary function, unique approach, and expected output in a concise paragraph. The summary is focused on the prompt's purpose without unnecessary details or formatting.
|
||||
|
||||
### summarize_pull-requests
|
||||
|
||||
Summarizes pull requests for a coding project by providing a summary and listing the top PRs with human-readable descriptions.
|
||||
|
||||
### summarize_rpg_session
|
||||
|
||||
Summarizes a role-playing game session by extracting key events, combat stats, character changes, quotes, and more.
|
||||
|
||||
### t_analyze_challenge_handling
|
||||
|
||||
Provides 8-16 word bullet points evaluating how well challenges are being addressed, calling out any lack of effort.
|
||||
|
||||
### t_check_metrics
|
||||
|
||||
Analyzes deep context from the TELOS file and input instruction, then provides a wisdom-based output while considering metrics and KPIs to assess recent improvements.
|
||||
|
||||
### t_create_h3_career
|
||||
|
||||
Summarizes context and produces wisdom-based output by deeply analyzing both the TELOS File and the input instruction, considering the relationship between the two.
|
||||
|
||||
### t_create_opening_sentences
|
||||
|
||||
Describes from TELOS file the person's identity, goals, and actions in 4 concise, 32-word bullet points, humbly.
|
||||
|
||||
### t_describe_life_outlook
|
||||
|
||||
Describes from TELOS file a person's life outlook in 5 concise, 16-word bullet points.
|
||||
|
||||
### t_extract_intro_sentences
|
||||
|
||||
Summarizes from TELOS file a person's identity, work, and current projects in 5 concise and grounded bullet points.
|
||||
|
||||
### t_extract_panel_topics
|
||||
|
||||
Creates 5 panel ideas with titles and descriptions based on deep context from a TELOS file and input.
|
||||
|
||||
### t_find_blindspots
|
||||
|
||||
Identify potential blindspots in thinking, frames, or models that may expose the individual to error or risk.
|
||||
|
||||
### t_find_negative_thinking
|
||||
|
||||
Analyze a TELOS file and input to identify negative thinking in documents or journals, followed by tough love encouragement.
|
||||
|
||||
### t_find_neglected_goals
|
||||
|
||||
Analyze a TELOS file and input instructions to identify goals or projects that have not been worked on recently.
|
||||
|
||||
### t_give_encouragement
|
||||
|
||||
Analyze a TELOS file and input instructions to evaluate progress, provide encouragement, and offer recommendations for continued effort.
|
||||
|
||||
### t_red_team_thinking
|
||||
|
||||
Analyze a TELOS file and input instructions to red-team thinking, models, and frames, then provide recommendations for improvement.
|
||||
|
||||
### t_threat_model_plans
|
||||
|
||||
Analyze a TELOS file and input instructions to create threat models for a life plan and recommend improvements.
|
||||
|
||||
### t_visualize_mission_goals_projects
|
||||
|
||||
Analyze a TELOS file and input instructions to create an ASCII art diagram illustrating the relationship of missions, goals, and projects.
|
||||
|
||||
### t_year_in_review
|
||||
|
||||
Analyze a TELOS file to create insights about a person or entity, then summarize accomplishments and visualizations in bullet points.
|
||||
|
||||
### to_flashcards
|
||||
|
||||
Create Anki flashcards from a given text, focusing on concise, optimized questions and answers without external context.
|
||||
|
||||
### transcribe_minutes
|
||||
|
||||
Extracts (from meeting transcription) meeting minutes, identifying actionables, insightful ideas, decisions, challenges, and next steps in a structured format.
|
||||
|
||||
### translate
|
||||
|
||||
Translates sentences or documentation into the specified language code while maintaining the original formatting and tone.
|
||||
|
||||
### tweet
|
||||
|
||||
Provides a step-by-step guide on crafting engaging tweets with emojis, covering Twitter basics, account creation, features, and audience targeting.
|
||||
|
||||
### write_essay
|
||||
|
||||
Writes essays in the style of a specified author, embodying their unique voice, vocabulary, and approach. Uses `author_name` variable.
|
||||
|
||||
### write_essay_pg
|
||||
|
||||
Writes concise, clear essays in the style of Paul Graham, focusing on simplicity, clarity, and illumination of the provided topic.
|
||||
|
||||
### write_hackerone_report
|
||||
|
||||
Generates concise, clear, and reproducible bug bounty reports, detailing vulnerability impact, steps to reproduce, and exploit details for triagers.
|
||||
|
||||
### write_latex
|
||||
|
||||
Generates syntactically correct LaTeX code for a new.tex document, ensuring proper formatting and compatibility with pdflatex.
|
||||
|
||||
### write_micro_essay
|
||||
|
||||
Writes concise, clear, and illuminating essays on the given topic in the style of Paul Graham.
|
||||
|
||||
### write_nuclei_template_rule
|
||||
|
||||
Generates Nuclei YAML templates for detecting vulnerabilities using HTTP requests, matchers, extractors, and dynamic data extraction.
|
||||
|
||||
### write_pull-request
|
||||
|
||||
Drafts detailed pull request descriptions, explaining changes, providing reasoning, and identifying potential bugs from the git diff command output.
|
||||
|
||||
### write_semgrep_rule
|
||||
|
||||
Creates accurate and working Semgrep rules based on input, following syntax guidelines and specific language considerations.
|
||||
|
||||
### youtube_summary
|
||||
|
||||
Create concise, timestamped Youtube video summaries that highlight key points.
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Write 8 16-word bullets describing how well or poorly I'm addressing my challenges. Call me out if I'm not putting work into them, and/or if you can see evidence of them affecting me in my journal or elsewhere.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Check this person's Metrics or KPIs (M's or K's) to see their current state and if they've been improved recently.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Analyze everything in my TELOS file and think about what I could and should do after my legacy corporate / technical skills are automated away. What can I contribute that's based on human-to-human interaction and exchanges of value?
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Write 4 32-word bullets describing who I am and what I do in a non-douchey way. Use the who I am, the problem I see in the world, and what I'm doing about it as the template. Something like:
|
||||
a. I'm a programmer by trade, and one thing that really bothers me is kids being so stuck inside of tech and games. So I started a school where I teach kids to build things with their hands.
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Write 5 16-word bullets describing this person's life outlook.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Write 5 16-word bullets describing who this person is, what they do, and what they're working on. The goal is to concisely and confidently project who they are while being humble and grounded.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Write 5 48-word bullet points, each including a 3-5 word panel title, that would be wonderful panels for this person to participate on.
|
||||
5. Write them so that they'd be good panels for others to participate in as well, not just me.
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Write 8 16-word bullets describing possible blindspots in my thinking, i.e., flaws in my frames or models that might leave me exposed to error or risk.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Write 4 16-word bullets identifying negative thinking either in my main document or in my journal.
|
||||
5. Add some tough love encouragement (not fluff) to help get me out of that mindset.
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Write 5 16-word bullets describing which of their goals and/or projects don't seem to have been worked on recently.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Write 8 16-word bullets looking at what I'm trying to do, and any progress I've made, and give some encouragement on the positive aspects and recommendations to continue the work.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Write 4 16-word bullets red-teaming my thinking, models, frames, etc, especially as evidenced throughout my journal.
|
||||
5. Give a set of recommendations on how to fix the issues identified in the red-teaming.
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Write 8 16-word bullets threat modeling my life plan and what could go wrong.
|
||||
5. Provide recommendations on how to address the threats and improve the life plan.
|
||||
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Create an ASCII art diagram of the relationship my missions, goals, and projects.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
@@ -6,7 +6,7 @@ You are an expert at understanding deep context about a person or entity, and th
|
||||
|
||||
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
|
||||
2. Deeply study the input instruction or question.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
|
||||
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible output for the person who sent the input.
|
||||
4. Write 8 16-word bullets describing what you accomplished this year.
|
||||
5. End with an ASCII art visualization of what you worked on and accomplished vs. what you didn't work on or finish.
|
||||
|
||||
|
||||
@@ -45,7 +45,7 @@ Follow the following structure:
|
||||
|
||||
- Deeply understand the relationship between the HTTP requests provided. Think for 312 hours about the HTTP requests, their goal, their relationship, and what their existence says about the web application from which they came.
|
||||
|
||||
- Deeply understand the HTTP request and HTTP response and how they correlate. Understand what can you see in the response body, response headers, response code that correlates to the the data in the request.
|
||||
- Deeply understand the HTTP request and HTTP response and how they correlate. Understand what can you see in the response body, response headers, response code that correlates to the data in the request.
|
||||
|
||||
- Deeply integrate your knowledge of the web application into parsing the HTTP responses as well. Integrate all knowledge consumed at this point together.
|
||||
|
||||
|
||||
373
docs/Automated-ChangeLog.md
Normal file
373
docs/Automated-ChangeLog.md
Normal file
@@ -0,0 +1,373 @@
|
||||
# Automated CHANGELOG Entry System for CI/CD
|
||||
|
||||
## Overview
|
||||
|
||||
This document outlines a comprehensive system for automatically generating and maintaining CHANGELOG.md entries during the CI/CD process. The system builds upon the existing `generate_changelog` tool and integrates seamlessly with GitHub's pull request workflow.
|
||||
|
||||
## Current State Analysis
|
||||
|
||||
### Existing Infrastructure
|
||||
|
||||
The `generate_changelog` tool already provides:
|
||||
|
||||
- **High-performance Git history walking** with one-pass algorithm
|
||||
- **GitHub API integration** with GraphQL optimization and smart caching
|
||||
- **SQLite-based caching** for instant incremental updates
|
||||
- **AI-powered summaries** using Fabric integration
|
||||
- **Concurrent processing** for optimal performance
|
||||
- **Version detection** from git tags and commit patterns
|
||||
|
||||
### Key Components
|
||||
|
||||
- **Main entry point**: `cmd/generate_changelog/main.go`
|
||||
- **Core generation logic**: `internal/changelog/generator.go`
|
||||
- **AI summarization**: `internal/changelog/summarize.go`
|
||||
- **Caching system**: `internal/cache/cache.go`
|
||||
- **GitHub integration**: `internal/github/client.go`
|
||||
- **Git operations**: `internal/git/walker.go`
|
||||
|
||||
## Proposed Automated System
|
||||
|
||||
### Developer Workflow
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[Developer creates feature branch] --> B[Codes feature]
|
||||
B --> C[Creates Pull Request]
|
||||
C --> D[PR is open and ready]
|
||||
D --> E[Developer runs: generate_changelog --incoming-pr XXXX]
|
||||
E --> F[Tool validates PR is open/mergeable]
|
||||
F --> G[Tool creates incoming/XXXX.txt with AI summary]
|
||||
G --> H[Auto-commit and push to branch]
|
||||
H --> I[PR includes pre-processed changelog entry]
|
||||
I --> J[PR gets reviewed and merged]
|
||||
```
|
||||
|
||||
### CI/CD Integration
|
||||
|
||||
```mermaid
|
||||
graph TD
|
||||
A[PR merged to main] --> B[Version bump workflow triggered]
|
||||
B --> C[generate_changelog --process-prs]
|
||||
C --> D[Scan incoming/ directory]
|
||||
D --> E[Concatenate all incoming/*.txt files]
|
||||
E --> F[Insert new version at top of CHANGELOG.md]
|
||||
F --> G[Store entry in versions table]
|
||||
G --> H[git rm incoming/*.txt files]
|
||||
H --> I[git add CHANGELOG.md and changelog.db, done by the tool]
|
||||
I --> J[Increment version number]
|
||||
J --> K[Commit and tag release]
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
### Phase 1: Pre-Processing PRs
|
||||
|
||||
#### New Command: `--incoming-pr`
|
||||
|
||||
**Usage**: `generate_changelog --incoming-pr 1672`
|
||||
|
||||
**Functionality**:
|
||||
|
||||
1. **Validation**:
|
||||
- Verify PR exists and is open
|
||||
- Check PR is mergeable
|
||||
- Ensure branch is up-to-date
|
||||
- Verify that current git repo is clean (everything committed); do not continue otherwise.
|
||||
|
||||
2. **Content Generation**:
|
||||
- Extract PR metadata (title, author, description)
|
||||
- Collect all commit messages from the PR
|
||||
- Use existing `SummarizeVersionContent` function for AI enhancement
|
||||
- Format as standard changelog entry
|
||||
|
||||
3. **File Creation**:
|
||||
- Generate `./cmd/generate_changelog/incoming/{PR#}.txt`
|
||||
- Include PR header: `### PR [#1672](url) by [author](profile): Title` (as is done currently in the code)
|
||||
- Consider extracting the existing header code for PRs into a helper function for re-use.
|
||||
- Include the AI-summarized changes (generated when we ran all the commit messages through `SummarizeVersionContent`)
|
||||
|
||||
4. **Auto-commit**:
|
||||
- Commit file with message: `chore: incoming 1672 changelog entry`
|
||||
- Optionally push to current branch (use `--push` flag)
|
||||
|
||||
(The PR is now completely ready to be merged with integrated CHANGELOG entry updating)
|
||||
|
||||
#### File Format Example
|
||||
|
||||
```markdown
|
||||
### PR [#1672](https://github.com/danielmiessler/Fabric/pull/1672) by [ksylvan](https://github.com/ksylvan): Changelog Generator Enhancement
|
||||
|
||||
- Added automated CI/CD integration for changelog generation
|
||||
- Implemented pre-processing of PR entries during development
|
||||
- Enhanced caching system for better performance
|
||||
- Added validation for mergeable PR states
|
||||
```
|
||||
|
||||
### Phase 2: Release Processing
|
||||
|
||||
#### New Command: `--process-prs`
|
||||
|
||||
**Usage**: `generate_changelog --process-prs`
|
||||
|
||||
**Integration Point**: `.github/workflows/update-version-and-create-tag.yml`
|
||||
|
||||
(we can do this AFTER the "Update gomod2nix.toml file" step in the workflow, where we
|
||||
already have generated the next version in the "version.nix" file)
|
||||
|
||||
**Functionality**:
|
||||
|
||||
1. **Discovery**: Scan `./cmd/generate_changelog/incoming/` directory
|
||||
2. **Aggregation**: Read and concatenate all `*.txt` files
|
||||
3. **Version Creation**: Generate new version header with current date
|
||||
4. **CHANGELOG Update**: Insert new version at top of existing CHANGELOG.md
|
||||
5. **Database Update**: Store complete entry in `versions` table as `ai_summary`
|
||||
6. **Cleanup**: Remove all processed incoming files
|
||||
7. **Stage Changes**: Add modified files to git staging area
|
||||
|
||||
#### Example Output in CHANGELOG.md
|
||||
|
||||
```markdown
|
||||
# Changelog
|
||||
|
||||
## v1.4.259 (2025-07-18)
|
||||
|
||||
### PR [#1672](https://github.com/danielmiessler/Fabric/pull/1672) by [ksylvan](https://github.com/ksylvan): Changelog Generator Enhancement
|
||||
|
||||
- Added automated CI/CD integration for changelog generation
|
||||
- Implemented pre-processing of PR entries during development
|
||||
- Enhanced caching system for better performance
|
||||
|
||||
### PR [#1671](https://github.com/danielmiessler/Fabric/pull/1671) by [contributor](https://github.com/contributor): Bug Fix
|
||||
|
||||
- Fixed memory leak in caching system
|
||||
- Improved error handling for GitHub API failures
|
||||
|
||||
## v1.4.258 (2025-07-14)
|
||||
[... rest of file ...]
|
||||
```
|
||||
|
||||
## Technical Implementation
|
||||
|
||||
### Configuration Extensions
|
||||
|
||||
Add to `internal/config/config.go`:
|
||||
|
||||
```go
|
||||
type Config struct {
|
||||
// ... existing fields
|
||||
IncomingPR int // PR number for --incoming-pr
|
||||
ProcessPRsVersion string // Flag for --process-prs (new version string)
|
||||
IncomingDir string // Directory for incoming files (default: ./cmd/generate_changelog/incoming/)
|
||||
}
|
||||
```
|
||||
|
||||
### New Command Line Flags
|
||||
|
||||
```go
|
||||
rootCmd.Flags().IntVar(&cfg.IncomingPR, "incoming-pr", 0, "Pre-process PR for changelog (provide PR number)")
|
||||
rootCmd.Flags().StringVar(&cfg.ProcessPRsVersion, "process-prs", "", "Process all incoming PR files for release (provide version like v1.4.262)")
|
||||
rootCmd.Flags().StringVar(&cfg.IncomingDir, "incoming-dir", "./cmd/generate_changelog/incoming", "Directory for incoming PR files")
|
||||
```
|
||||
|
||||
### Core Logic Extensions
|
||||
|
||||
#### PR Pre-processing
|
||||
|
||||
```go
|
||||
func (g *Generator) ProcessIncomingPR(prNumber int) error {
|
||||
// 1. Validate PR state via GitHub API
|
||||
pr, err := g.ghClient.GetPR(prNumber)
|
||||
if err != nil || pr.State != "open" || !pr.Mergeable {
|
||||
return fmt.Errorf("PR %d is not in valid state for processing", prNumber)
|
||||
}
|
||||
|
||||
// 2. Generate changelog content using existing logic
|
||||
content := g.formatPR(pr)
|
||||
|
||||
// 3. Apply AI summarization if enabled
|
||||
if g.cfg.EnableAISummary {
|
||||
content, _ = SummarizeVersionContent(content)
|
||||
}
|
||||
|
||||
// 4. Write to incoming file
|
||||
filename := filepath.Join(g.cfg.IncomingDir, fmt.Sprintf("%d.txt", prNumber))
|
||||
err = os.WriteFile(filename, []byte(content), 0644)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to write incoming file: %w", err)
|
||||
}
|
||||
|
||||
// 5. Auto-commit and push
|
||||
return g.commitAndPushIncoming(prNumber, filename)
|
||||
}
|
||||
```
|
||||
|
||||
#### Release Processing
|
||||
|
||||
```go
|
||||
func (g *Generator) ProcessIncomingPRs(version string) error {
|
||||
// 1. Scan incoming directory
|
||||
files, err := filepath.Glob(filepath.Join(g.cfg.IncomingDir, "*.txt"))
|
||||
if err != nil || len(files) == 0 {
|
||||
return fmt.Errorf("no incoming PR files found")
|
||||
}
|
||||
|
||||
// 2. Read and concatenate all files
|
||||
var content strings.Builder
|
||||
for _, file := range files {
|
||||
data, err := os.ReadFile(file)
|
||||
if err == nil {
|
||||
content.WriteString(string(data))
|
||||
content.WriteString("\n")
|
||||
}
|
||||
}
|
||||
|
||||
// 3. Generate version entry
|
||||
entry := fmt.Sprintf("\n## %s (%s)\n\n%s",
|
||||
version, time.Now().Format("2006-01-02"), content.String())
|
||||
|
||||
// 4. Update CHANGELOG.md
|
||||
err = g.insertVersionAtTop(entry)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update CHANGELOG.md: %w", err)
|
||||
}
|
||||
|
||||
// 5. Update database
|
||||
err = g.cache.SaveVersionEntry(version, content.String())
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to save to database: %w", err)
|
||||
}
|
||||
|
||||
// 6. Cleanup incoming files
|
||||
for _, file := range files {
|
||||
os.Remove(file)
|
||||
}
|
||||
|
||||
return nil
|
||||
}
|
||||
```
|
||||
|
||||
## Workflow Integration
|
||||
|
||||
### GitHub Actions Modification
|
||||
|
||||
Update `.github/workflows/update-version-and-create-tag.yml`.
|
||||
|
||||
```yaml
|
||||
- name: Generate Changelog Entry
|
||||
run: |
|
||||
# Process all incoming PR entries
|
||||
./cmd/generate_changelog/generate_changelog --process-prs
|
||||
|
||||
# The tool will make the needed changes in the CHANGELOG.md,
|
||||
# and the changelog.db, and will remove the PR#.txt file(s)
|
||||
# In effect, doing the following:
|
||||
# 1. Generate the new CHANGELOG (and store the entry in the changelog.db)
|
||||
# 2. git add CHANGELOG.md
|
||||
# 3. git add ./cmd/generate_changelog/changelog.db
|
||||
# 4. git rm -rf ./cmd/generate_changelog/incoming/
|
||||
#
|
||||
```
|
||||
|
||||
### Developer Instructions
|
||||
|
||||
1. **During Development**:
|
||||
|
||||
```bash
|
||||
# After PR is ready for review (commit locally only)
|
||||
generate_changelog --incoming-pr 1672 --ai-summarize
|
||||
|
||||
# Or to automatically push to remote
|
||||
generate_changelog --incoming-pr 1672 --ai-summarize --push
|
||||
```
|
||||
|
||||
2. **Validation**:
|
||||
- Check that `incoming/1672.txt` was created
|
||||
- Verify auto-commit occurred
|
||||
- Confirm file is included in PR
|
||||
- Scan the file and make any changes you need to the auto-generated summary
|
||||
|
||||
## Benefits
|
||||
|
||||
### For Developers
|
||||
|
||||
- **Automated changelog entries** - no manual CHANGELOG.md editing
|
||||
- **AI-enhanced summaries** - professional, consistent formatting
|
||||
- **Early visibility** - changelog content visible during PR review
|
||||
- **Reduced merge conflicts** - no multiple PRs editing CHANGELOG.md
|
||||
|
||||
### For Project Maintainers
|
||||
|
||||
- **Consistent formatting** - all entries follow same structure
|
||||
- **Complete coverage** - no missed changelog entries
|
||||
- **Automated releases** - seamless integration with version bumps
|
||||
- **Historical accuracy** - each PR's contribution properly documented
|
||||
|
||||
### For CI/CD
|
||||
|
||||
- **Deterministic process** - reliable, repeatable changelog generation
|
||||
- **Performance optimized** - leverages existing caching and AI systems
|
||||
- **Error resilience** - validates PR states before processing
|
||||
- **Clean integration** - minimal changes to existing workflows
|
||||
|
||||
## Implementation Strategy
|
||||
|
||||
### Phase 1: Implement Developer Tooling
|
||||
|
||||
- [x] Add new command line flags and configuration
|
||||
- [x] Implement `--incoming-pr` functionality
|
||||
- [x] Add validation for PR states and git status
|
||||
- [x] Create auto-commit logic
|
||||
|
||||
### Phase 2: Integration (CI/CD) Readiness
|
||||
|
||||
- [x] Implement `--process-prs` functionality
|
||||
- [x] Add CHANGELOG.md insertion logic
|
||||
- [x] Update database storage for version entries
|
||||
|
||||
### Phase 3: Deployment
|
||||
|
||||
- [x] Update GitHub Actions workflow
|
||||
- [x] Create developer documentation in ./docs/ directory
|
||||
- [x] Test full end-to-end workflow (the PR that includes these modifications can be its first production test)
|
||||
|
||||
### Phase 4: Adoption
|
||||
|
||||
- [ ] Train development team - Consider creating a full tutorial blog post/page to fully walk developers through the process.
|
||||
- [ ] Monitor first few releases
|
||||
- [ ] Gather feedback and iterate
|
||||
- [ ] Document lessons learned
|
||||
|
||||
## Error Handling
|
||||
|
||||
### PR Validation Failures
|
||||
|
||||
- **Closed/Merged PR**: Error with suggestion to check PR status
|
||||
- **Non-mergeable PR**: Error with instruction to resolve conflicts
|
||||
- **Missing PR**: Error with verification of PR number
|
||||
|
||||
### File System Issues
|
||||
|
||||
- **Permission errors**: Clear error with directory permission requirements
|
||||
- **Disk space**: Graceful handling with cleanup suggestions
|
||||
- **Network failures**: Retry logic with exponential backoff
|
||||
|
||||
### Git Operations
|
||||
|
||||
- **Commit failures**: Check for dirty working directory
|
||||
- **Push failures**: Handle authentication and remote issues
|
||||
- **Merge conflicts**: Clear instructions for manual resolution
|
||||
|
||||
## Future Enhancements
|
||||
|
||||
### Advanced Features
|
||||
|
||||
- **Custom categorization** - group changes by type (feat/fix/docs)
|
||||
- **Breaking change detection** - special handling for BREAKING CHANGE commits
|
||||
- **Release notes generation** - enhanced formatting for GitHub releases (our release pages are pretty bare)
|
||||
|
||||
## Conclusion
|
||||
|
||||
This automated changelog system builds upon the robust foundation of the existing `generate_changelog` tool while providing a seamless developer experience and reliable CI/CD integration. By pre-processing PR entries during development and aggregating them during releases, we achieve both accuracy and automation without sacrificing quality or developer productivity.
|
||||
|
||||
The phased approach ensures smooth adoption while the extensive error handling and validation provide confidence in production deployment. The system's design leverages existing infrastructure and patterns, making it a natural evolution of the current changelog generation capabilities.
|
||||
195
docs/Automated-Changelog-Usage.md
Normal file
195
docs/Automated-Changelog-Usage.md
Normal file
@@ -0,0 +1,195 @@
|
||||
# Automated Changelog System - Developer Guide
|
||||
|
||||
This guide explains how to use the new automated changelog system for the Fabric project.
|
||||
|
||||
## Overview
|
||||
|
||||
The automated changelog system allows developers to pre-process their PR changelog entries during development, which are then automatically aggregated during the release process. This eliminates manual CHANGELOG.md editing and reduces merge conflicts.
|
||||
|
||||
## Developer Workflow
|
||||
|
||||
### Step 1: Create Your Feature Branch and PR
|
||||
|
||||
Work on your feature as usual and create a pull request.
|
||||
|
||||
### Step 2: Generate Changelog Entry
|
||||
|
||||
Once your PR is ready for review, generate a changelog entry:
|
||||
|
||||
```bash
|
||||
cd cmd/generate_changelog
|
||||
go build -o generate_changelog .
|
||||
./generate_changelog --incoming-pr YOUR_PR_NUMBER
|
||||
```
|
||||
|
||||
For example, if your PR number is 1672:
|
||||
|
||||
```bash
|
||||
./generate_changelog --incoming-pr 1672
|
||||
```
|
||||
|
||||
### Step 3: Validation
|
||||
|
||||
The tool will validate:
|
||||
|
||||
- ✅ PR exists and is open
|
||||
- ✅ PR is mergeable (no conflicts)
|
||||
- ✅ Your working directory is clean
|
||||
|
||||
If any validation fails, fix the issues and try again.
|
||||
|
||||
### Step 4: Review Generated Entry
|
||||
|
||||
The tool will:
|
||||
|
||||
1. Create `./cmd/generate_changelog/incoming/1672.txt`
|
||||
2. Generate an AI-enhanced summary (if `--ai-summarize` is enabled)
|
||||
3. Auto-commit the file to your branch (use `--push` to also push to remote)
|
||||
|
||||
Review the generated file and edit if needed:
|
||||
|
||||
```bash
|
||||
cat ./cmd/generate_changelog/incoming/1672.txt
|
||||
```
|
||||
|
||||
### Step 5: Include in PR
|
||||
|
||||
The incoming changelog entry is now part of your PR and will be reviewed along with your code changes.
|
||||
|
||||
## Example Generated Entry
|
||||
|
||||
```markdown
|
||||
### PR [#1672](https://github.com/danielmiessler/fabric/pull/1672) by [ksylvan](https://github.com/ksylvan): Changelog Generator Enhancement
|
||||
|
||||
- Added automated CI/CD integration for changelog generation
|
||||
- Implemented pre-processing of PR entries during development
|
||||
- Enhanced caching system for better performance
|
||||
- Added validation for mergeable PR states
|
||||
```
|
||||
|
||||
## Command Options
|
||||
|
||||
### `--incoming-pr`
|
||||
|
||||
Pre-process a specific PR for changelog generation.
|
||||
|
||||
**Usage**: `./generate_changelog --incoming-pr PR_NUMBER`
|
||||
|
||||
**Requirements**:
|
||||
|
||||
- PR must be open
|
||||
- PR must be mergeable (no conflicts)
|
||||
- Working directory must be clean (no uncommitted changes)
|
||||
- GitHub token must be available (`GITHUB_TOKEN` env var or `--token` flag)
|
||||
|
||||
**Mutual Exclusivity**: Cannot be used with `--process-prs` flag
|
||||
|
||||
### `--incoming-dir`
|
||||
|
||||
Specify custom directory for incoming PR files (default: `./cmd/generate_changelog/incoming`).
|
||||
|
||||
**Usage**: `./generate_changelog --incoming-pr 1672 --incoming-dir ./custom/path`
|
||||
|
||||
### `--process-prs`
|
||||
|
||||
Process all incoming PR files for release aggregation. Used by CI/CD during release creation.
|
||||
|
||||
**Usage**: `./generate_changelog --process-prs {new_version_string}`
|
||||
|
||||
**Mutual Exclusivity**: Cannot be used with `--incoming-pr` flag
|
||||
|
||||
### `--ai-summarize`
|
||||
|
||||
Enable AI-enhanced summaries using Fabric integration.
|
||||
|
||||
**Usage**: `./generate_changelog --incoming-pr 1672 --ai-summarize`
|
||||
|
||||
### `--push`
|
||||
|
||||
Enable automatic git push after creating an incoming entry. By default, the commit is created locally but not pushed to the remote repository.
|
||||
|
||||
**Usage**: `./generate_changelog --incoming-pr 1672 --push`
|
||||
|
||||
**Note**: When using `--push`, ensure you have proper authentication configured (SSH keys or GITHUB_TOKEN environment variable).
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "PR is not open"
|
||||
|
||||
Your PR has been closed or merged. Only open PRs can be processed.
|
||||
|
||||
### "PR is not mergeable"
|
||||
|
||||
Your PR has merge conflicts or other issues preventing it from being merged. Resolve conflicts and ensure the PR is in a mergeable state.
|
||||
|
||||
### "Working directory is not clean"
|
||||
|
||||
You have uncommitted changes. Commit or stash them before running the tool.
|
||||
|
||||
### "Failed to fetch PR"
|
||||
|
||||
Check your GitHub token and network connection. Ensure the PR number exists.
|
||||
|
||||
## CI/CD Integration
|
||||
|
||||
The system automatically processes all incoming PR files during the release workflow. No manual intervention is required.
|
||||
|
||||
When a release is created:
|
||||
|
||||
1. All `incoming/*.txt` files are aggregated using `--process-prs`
|
||||
2. Version is detected from `version.nix` or latest git tag
|
||||
3. A new version entry is created in CHANGELOG.md
|
||||
4. Incoming files are cleaned up (removed)
|
||||
5. Changes are staged for the release commit (CHANGELOG.md and cache file)
|
||||
|
||||
## Best Practices
|
||||
|
||||
1. **Run early**: Generate your changelog entry as soon as your PR is ready for review
|
||||
2. **Review content**: Always review the generated entry and edit if necessary
|
||||
3. **Keep it updated**: If you make significant changes to your PR, regenerate the entry
|
||||
4. **Use AI summaries**: Enable `--ai-summarize` for more professional, consistent formatting
|
||||
|
||||
## Advanced Usage
|
||||
|
||||
### Custom GitHub Token
|
||||
|
||||
```bash
|
||||
./generate_changelog --incoming-pr 1672 --token YOUR_GITHUB_TOKEN
|
||||
```
|
||||
|
||||
### Custom Repository Path
|
||||
|
||||
```bash
|
||||
./generate_changelog --incoming-pr 1672 --repo /path/to/repo
|
||||
```
|
||||
|
||||
### Disable Caching
|
||||
|
||||
```bash
|
||||
./generate_changelog --incoming-pr 1672 --no-cache
|
||||
```
|
||||
|
||||
### Enable Auto-Push
|
||||
|
||||
```bash
|
||||
./generate_changelog --incoming-pr 1672 --push
|
||||
```
|
||||
|
||||
This creates the commit locally and pushes it to the remote repository. By default, commits are only created locally, allowing you to review changes before pushing manually.
|
||||
|
||||
**Authentication**: The tool automatically detects GitHub repositories and uses the GITHUB_TOKEN environment variable for authentication when pushing. For SSH repositories, ensure your SSH keys are properly configured.
|
||||
|
||||
## Integration with Existing Workflow
|
||||
|
||||
This system is fully backward compatible. The existing changelog generation continues to work unchanged. The new features are opt-in and only activated when using the new flags.
|
||||
|
||||
## Support
|
||||
|
||||
If you encounter issues:
|
||||
|
||||
1. Check this documentation
|
||||
2. Verify your GitHub token has appropriate permissions
|
||||
3. Ensure your PR meets the validation requirements
|
||||
4. Check the tool's help: `./generate_changelog --help`
|
||||
|
||||
For bugs or feature requests, please create an issue in the repository.
|
||||
183
docs/Desktop-Notifications.md
Normal file
183
docs/Desktop-Notifications.md
Normal file
@@ -0,0 +1,183 @@
|
||||
# Desktop Notifications
|
||||
|
||||
Fabric supports desktop notifications to alert you when commands complete, which is especially useful for long-running tasks or when you're multitasking.
|
||||
|
||||
## Quick Start
|
||||
|
||||
Enable notifications with the `--notification` flag:
|
||||
|
||||
```bash
|
||||
fabric --pattern summarize --notification < article.txt
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### Command Line Options
|
||||
|
||||
- `--notification`: Enable desktop notifications when command completes
|
||||
- `--notification-command`: Use a custom notification command instead of built-in notifications
|
||||
|
||||
### YAML Configuration
|
||||
|
||||
Add notification settings to your `~/.config/fabric/config.yaml`:
|
||||
|
||||
```yaml
|
||||
# Enable notifications by default
|
||||
notification: true
|
||||
|
||||
# Optional: Custom notification command
|
||||
notificationCommand: 'notify-send --urgency=normal "$1" "$2"'
|
||||
```
|
||||
|
||||
## Platform Support
|
||||
|
||||
### macOS
|
||||
|
||||
- **Default**: Uses `osascript` (built into macOS)
|
||||
- **Enhanced**: Install `terminal-notifier` for better notifications:
|
||||
|
||||
```bash
|
||||
brew install terminal-notifier
|
||||
```
|
||||
|
||||
### Linux
|
||||
|
||||
- **Requirement**: Install `notify-send`:
|
||||
|
||||
```bash
|
||||
# Ubuntu/Debian
|
||||
sudo apt install libnotify-bin
|
||||
|
||||
# Fedora
|
||||
sudo dnf install libnotify
|
||||
```
|
||||
|
||||
### Windows
|
||||
|
||||
- **Default**: Uses PowerShell message boxes (built-in)
|
||||
|
||||
## Custom Notification Commands
|
||||
|
||||
The `--notification-command` flag allows you to use custom notification scripts or commands. The command receives the title as `$1` and message as `$2` as shell positional arguments.
|
||||
|
||||
**Security Note**: The title and message content are properly escaped to prevent command injection attacks from AI-generated output containing shell metacharacters.
|
||||
|
||||
### Examples
|
||||
|
||||
**macOS with custom sound:**
|
||||
|
||||
```bash
|
||||
fabric --pattern analyze_claims --notification-command 'osascript -e "display notification \"$2\" with title \"$1\" sound name \"Ping\""' < document.txt
|
||||
```
|
||||
|
||||
**Linux with urgency levels:**
|
||||
|
||||
```bash
|
||||
fabric --pattern extract_wisdom --notification-command 'notify-send --urgency=critical "$1" "$2"' < video-transcript.txt
|
||||
```
|
||||
|
||||
**Custom script:**
|
||||
|
||||
```bash
|
||||
fabric --pattern summarize --notification-command '/path/to/my-notification-script.sh "$1" "$2"' < report.pdf
|
||||
```
|
||||
|
||||
**Testing your custom command:**
|
||||
|
||||
```bash
|
||||
# Test that $1 and $2 are passed correctly
|
||||
fabric --pattern raw_query --notification-command 'echo "Title: $1, Message: $2"' "test input"
|
||||
```
|
||||
|
||||
## Notification Content
|
||||
|
||||
Notifications include:
|
||||
|
||||
- **Title**: "Fabric Command Complete" or "Fabric: [pattern] Complete"
|
||||
- **Message**: Brief summary of the output (first 100 characters)
|
||||
|
||||
For long outputs, the message is truncated with "..." to fit notification display limits.
|
||||
|
||||
## Use Cases
|
||||
|
||||
### Long-Running Tasks
|
||||
|
||||
```bash
|
||||
# Process large document with notifications
|
||||
fabric --pattern analyze_paper --notification < research-paper.pdf
|
||||
|
||||
# Extract wisdom from long video with alerts
|
||||
fabric -y "https://youtube.com/watch?v=..." --pattern extract_wisdom --notification
|
||||
```
|
||||
|
||||
### Background Processing
|
||||
|
||||
```bash
|
||||
# Process multiple files and get notified when each completes
|
||||
for file in *.txt; do
|
||||
fabric --pattern summarize --notification < "$file" &
|
||||
done
|
||||
```
|
||||
|
||||
### Integration with Other Tools
|
||||
|
||||
```bash
|
||||
# Combine with other commands
|
||||
curl -s "https://api.example.com/data" | \
|
||||
fabric --pattern analyze_data --notification --output results.md
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### No Notifications Appearing
|
||||
|
||||
1. **Check system notifications are enabled** for Terminal/your shell
|
||||
2. **Verify notification tools are installed**:
|
||||
- macOS: `which osascript` (should exist)
|
||||
- Linux: `which notify-send`
|
||||
- Windows: `where.exe powershell`
|
||||
|
||||
3. **Test with simple command**:
|
||||
|
||||
```bash
|
||||
echo "test" | fabric --pattern raw_query --notification --dry-run
|
||||
```
|
||||
|
||||
### Notification Permission Issues
|
||||
|
||||
On some systems, you may need to grant notification permissions to your terminal application:
|
||||
|
||||
- **macOS**: System Preferences → Security & Privacy → Privacy → Notifications → Enable for Terminal
|
||||
- **Linux**: Depends on desktop environment; usually automatic
|
||||
- **Windows**: Usually works by default
|
||||
|
||||
### Custom Commands Not Working
|
||||
|
||||
- Ensure your custom notification command is executable
|
||||
- Test the command manually with sample arguments
|
||||
- Check that all required dependencies are installed
|
||||
|
||||
## Advanced Configuration
|
||||
|
||||
### Environment-Specific Settings
|
||||
|
||||
Create different configuration files for different environments:
|
||||
|
||||
```bash
|
||||
# Work computer (quieter notifications)
|
||||
fabric --config ~/.config/fabric/work-config.yaml --notification
|
||||
|
||||
# Personal computer (with sound)
|
||||
fabric --config ~/.config/fabric/personal-config.yaml --notification
|
||||
```
|
||||
|
||||
### Integration with Task Management
|
||||
|
||||
```bash
|
||||
# Custom script that also logs to task management system
|
||||
notificationCommand: '/usr/local/bin/fabric-notify-and-log.sh "$1" "$2"'
|
||||
```
|
||||
|
||||
## Examples
|
||||
|
||||
See `docs/notification-config.yaml` for a complete configuration example with various notification command options.
|
||||
155
docs/Gemini-TTS.md
Normal file
155
docs/Gemini-TTS.md
Normal file
@@ -0,0 +1,155 @@
|
||||
# Gemini Text-to-Speech (TTS) Guide
|
||||
|
||||
Fabric supports Google Gemini's text-to-speech (TTS) capabilities, allowing you to convert text into high-quality audio using various AI-generated voices.
|
||||
|
||||
## Overview
|
||||
|
||||
The Gemini TTS feature in Fabric allows you to:
|
||||
|
||||
- Convert text input into audio using Google's Gemini TTS models
|
||||
- Choose from 30+ different AI voices with varying characteristics
|
||||
- Generate high-quality WAV audio files
|
||||
- Integrate TTS generation into your existing Fabric workflows
|
||||
|
||||
## Usage
|
||||
|
||||
### Basic TTS Generation
|
||||
|
||||
To generate audio from text using TTS:
|
||||
|
||||
```bash
|
||||
# Basic TTS with default voice (Kore)
|
||||
echo "Hello, this is a test of Gemini TTS" | fabric -m gemini-2.5-flash-preview-tts -o output.wav
|
||||
|
||||
# Using a specific voice
|
||||
echo "Hello, this is a test with the Charon voice" | fabric -m gemini-2.5-flash-preview-tts --voice Charon -o output.wav
|
||||
|
||||
# Using TTS with a pattern
|
||||
fabric -p summarize --voice Puck -m gemini-2.5-flash-preview-tts -o summary.wav < document.txt
|
||||
```
|
||||
|
||||
### Voice Selection
|
||||
|
||||
Use the `--voice` flag to specify which voice to use for TTS generation:
|
||||
|
||||
```bash
|
||||
fabric -m gemini-2.5-flash-preview-tts --voice Zephyr -o output.wav "Your text here"
|
||||
```
|
||||
|
||||
If no voice is specified, the default voice "Kore" will be used.
|
||||
|
||||
## Available Voices
|
||||
|
||||
Gemini TTS supports 30+ different voices, each with unique characteristics:
|
||||
|
||||
### Popular Voices
|
||||
|
||||
- **Kore** - Firm and confident (default)
|
||||
- **Charon** - Informative and clear
|
||||
- **Puck** - Upbeat and energetic
|
||||
- **Zephyr** - Bright and cheerful
|
||||
- **Leda** - Youthful and energetic
|
||||
- **Aoede** - Breezy and natural
|
||||
|
||||
### Complete Voice List
|
||||
|
||||
- Kore, Charon, Puck, Fenrir, Aoede, Leda, Orus, Zephyr
|
||||
- Autonoe, Callirhoe, Despina, Erinome, Gacrux, Laomedeia
|
||||
- Pulcherrima, Sulafat, Vindemiatrix, Achernar, Achird
|
||||
- Algenib, Algieba, Alnilam, Enceladus, Iapetus, Rasalgethi
|
||||
- Sadachbia, Zubenelgenubi, Vega, Capella, Lyra
|
||||
|
||||
### Listing Available Voices
|
||||
|
||||
To see all available voices with descriptions:
|
||||
|
||||
```bash
|
||||
# List all voices with characteristics
|
||||
fabric --list-gemini-voices
|
||||
|
||||
# List voice names only (for shell completion)
|
||||
fabric --list-gemini-voices --shell-complete-list
|
||||
```
|
||||
|
||||
## Rate Limits
|
||||
|
||||
Google Gemini TTS has usage quotas that vary by plan:
|
||||
|
||||
### Free Tier
|
||||
|
||||
- **15 requests per day** per project per TTS model
|
||||
- Quota resets daily
|
||||
- Applies to all TTS models (e.g., `gemini-2.5-flash-preview-tts`)
|
||||
|
||||
### Rate Limit Errors
|
||||
|
||||
If you exceed your quota, you'll see an error like:
|
||||
|
||||
```text
|
||||
Error 429: You exceeded your current quota, please check your plan and billing details
|
||||
```
|
||||
|
||||
**Solutions:**
|
||||
|
||||
- Wait for daily quota reset (typically at midnight UTC)
|
||||
- Upgrade to a paid plan for higher limits
|
||||
- Use TTS generation strategically for important content
|
||||
|
||||
For current rate limits and pricing, visit: <https://ai.google.dev/gemini-api/docs/rate-limits>
|
||||
|
||||
## Configuration
|
||||
|
||||
### Command Line Options
|
||||
|
||||
- `--voice <voice_name>` - Specify the TTS voice to use
|
||||
- `-o <filename.wav>` - Output audio file (required for TTS models)
|
||||
- `-m <tts_model>` - Specify a TTS-capable model (e.g., `gemini-2.5-flash-preview-tts`)
|
||||
|
||||
### YAML Configuration
|
||||
|
||||
You can also set a default voice in your Fabric configuration file (`~/.config/fabric/config.yaml`):
|
||||
|
||||
```yaml
|
||||
voice: "Charon" # Set your preferred default voice
|
||||
```
|
||||
|
||||
## Requirements
|
||||
|
||||
- Valid Google Gemini API key configured in Fabric
|
||||
- TTS-capable Gemini model (models containing "tts" in the name)
|
||||
- Audio output must be specified with `-o filename.wav`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
#### Error: "TTS model requires audio output"
|
||||
|
||||
- Solution: Always specify an output file with `-o filename.wav` when using TTS models
|
||||
|
||||
#### Error: "Invalid voice 'X'"
|
||||
|
||||
- Solution: Check that the voice name is spelled correctly and matches one of the supported voices listed above
|
||||
|
||||
#### Error: "TTS generation failed"
|
||||
|
||||
- Solution: Verify your Gemini API key is valid and you have sufficient quota
|
||||
|
||||
### Getting Help
|
||||
|
||||
For additional help with TTS features:
|
||||
|
||||
```bash
|
||||
fabric --help
|
||||
```
|
||||
|
||||
## Technical Details
|
||||
|
||||
- **Audio Format**: WAV files with 24kHz sample rate, 16-bit depth, mono channel
|
||||
- **Language Support**: Automatic language detection for 24+ languages
|
||||
- **Model Requirements**: Models must contain "tts", "preview-tts", or "text-to-speech" in the name
|
||||
- **Voice Selection**: Uses Google's PrebuiltVoiceConfig system for consistent voice quality
|
||||
|
||||
---
|
||||
|
||||
For more information about Fabric, visit the [main documentation](../README.md).
|
||||
140
docs/Shell-Completions.md
Normal file
140
docs/Shell-Completions.md
Normal file
@@ -0,0 +1,140 @@
|
||||
# Shell Completions for Fabric
|
||||
|
||||
Fabric comes with shell completion support for Zsh, Bash, and Fish shells. These completions provide intelligent tab-completion for commands, flags, patterns, models, contexts, and more.
|
||||
|
||||
## Quick Setup (Automated)
|
||||
|
||||
You can install completions without cloning the repo:
|
||||
|
||||
```bash
|
||||
# No-clone install (Zsh/Bash/Fish supported)
|
||||
curl -fsSL https://raw.githubusercontent.com/danielmiessler/Fabric/refs/heads/main/completions/setup-completions.sh | sh
|
||||
|
||||
# Optional: dry-run first
|
||||
curl -fsSL https://raw.githubusercontent.com/danielmiessler/Fabric/refs/heads/main/completions/setup-completions.sh | sh -s -- --dry-run
|
||||
|
||||
# Optional: override the download source
|
||||
FABRIC_COMPLETIONS_BASE_URL="https://raw.githubusercontent.com/danielmiessler/Fabric/refs/heads/main/completions" \
|
||||
sh -c "$(curl -fsSL https://raw.githubusercontent.com/danielmiessler/Fabric/refs/heads/main/completions/setup-completions.sh)"
|
||||
```
|
||||
|
||||
Or, if you have the repository locally:
|
||||
|
||||
```bash
|
||||
# Run the automated setup script from a cloned repo
|
||||
./completions/setup-completions.sh
|
||||
|
||||
# Or see what it would do first
|
||||
./completions/setup-completions.sh --dry-run
|
||||
```
|
||||
|
||||
The script will:
|
||||
|
||||
- Detect whether you have `fabric` or `fabric-ai` installed
|
||||
- Detect your current shell (zsh, bash, or fish)
|
||||
- Use your existing `$fpath` directories (for zsh) or standard completion directories
|
||||
- Install the completion file with the correct name
|
||||
- Provide instructions for enabling the completions
|
||||
|
||||
If the completion files aren't present locally (e.g., when running via `curl`), the script will automatically download them from GitHub.
|
||||
|
||||
For manual installation or troubleshooting, see the detailed instructions below.
|
||||
|
||||
## Manual Installation
|
||||
|
||||
### Zsh
|
||||
|
||||
1. Copy the completion file to a directory in your `$fpath`:
|
||||
|
||||
```bash
|
||||
sudo cp completions/_fabric /usr/local/share/zsh/site-functions/
|
||||
```
|
||||
|
||||
2. **Important**: If you installed fabric as `fabric-ai`, create a symlink so completions work:
|
||||
|
||||
```bash
|
||||
sudo ln -s /usr/local/share/zsh/site-functions/_fabric /usr/local/share/zsh/site-functions/_fabric-ai
|
||||
```
|
||||
|
||||
3. Restart your shell or reload completions:
|
||||
|
||||
```bash
|
||||
autoload -U compinit && compinit
|
||||
```
|
||||
|
||||
### Bash
|
||||
|
||||
1. Copy the completion file to a standard completion directory:
|
||||
|
||||
```bash
|
||||
# System-wide installation
|
||||
sudo cp completions/fabric.bash /etc/bash_completion.d/
|
||||
|
||||
# Or user-specific installation
|
||||
mkdir -p ~/.local/share/bash-completion/completions/
|
||||
cp completions/fabric.bash ~/.local/share/bash-completion/completions/fabric
|
||||
```
|
||||
|
||||
2. **Important**: If you installed fabric as `fabric-ai`, create a symlink:
|
||||
|
||||
```bash
|
||||
# For system-wide installation
|
||||
sudo ln -s /etc/bash_completion.d/fabric.bash /etc/bash_completion.d/fabric-ai.bash
|
||||
|
||||
# Or for user-specific installation
|
||||
ln -s ~/.local/share/bash-completion/completions/fabric ~/.local/share/bash-completion/completions/fabric-ai
|
||||
```
|
||||
|
||||
3. Restart your shell or source the completion:
|
||||
|
||||
```bash
|
||||
source ~/.bashrc
|
||||
```
|
||||
|
||||
### Fish
|
||||
|
||||
1. Copy the completion file to Fish's completion directory:
|
||||
|
||||
```bash
|
||||
mkdir -p ~/.config/fish/completions
|
||||
cp completions/fabric.fish ~/.config/fish/completions/
|
||||
```
|
||||
|
||||
2. **Important**: If you installed fabric as `fabric-ai`, create a symlink:
|
||||
|
||||
```bash
|
||||
ln -s ~/.config/fish/completions/fabric.fish ~/.config/fish/completions/fabric-ai.fish
|
||||
```
|
||||
|
||||
3. Fish will automatically load the completions (no restart needed).
|
||||
|
||||
## Features
|
||||
|
||||
The completions provide intelligent suggestions for:
|
||||
|
||||
- **Patterns**: Tab-complete available patterns with `-p` or `--pattern`
|
||||
- **Models**: Tab-complete available models with `-m` or `--model`
|
||||
- **Contexts**: Tab-complete contexts for context-related flags
|
||||
- **Sessions**: Tab-complete sessions for session-related flags
|
||||
- **Strategies**: Tab-complete available strategies
|
||||
- **Extensions**: Tab-complete registered extensions
|
||||
- **Gemini Voices**: Tab-complete TTS voices for `--voice`
|
||||
- **File paths**: Smart file completion for attachment, output, and config options
|
||||
- **Flag completion**: All available command-line flags and options
|
||||
|
||||
## Alternative Installation Method
|
||||
|
||||
You can also source the completion files directly in your shell's configuration file:
|
||||
|
||||
- **Zsh**: Add to `~/.zshrc`: `source /path/to/fabric/completions/_fabric`
|
||||
- **Bash**: Add to `~/.bashrc`: `source /path/to/fabric/completions/fabric.bash`
|
||||
- **Fish**: The file-based installation method above is preferred for Fish
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
- If completions don't work, ensure the completion files have proper permissions
|
||||
- For Zsh, verify that the completion directory is in your `$fpath`
|
||||
- If you renamed the fabric binary, make sure to create the appropriate symlinks as described above
|
||||
- Restart your shell after installation to ensure completions are loaded
|
||||
|
||||
The completion system dynamically queries the fabric command for current patterns, models, and other resources, so your completions will always be up-to-date with your fabric installation.
|
||||
298
docs/YouTube-Processing.md
Normal file
298
docs/YouTube-Processing.md
Normal file
@@ -0,0 +1,298 @@
|
||||
# YouTube Processing with Fabric
|
||||
|
||||
Fabric provides powerful YouTube video processing capabilities that allow you to extract transcripts, comments, and metadata from YouTube videos and playlists. This guide covers all the available options and common use cases.
|
||||
|
||||
## Prerequisites
|
||||
|
||||
- **yt-dlp**: Required for transcript extraction. Install on MacOS with:
|
||||
|
||||
```bash
|
||||
brew install yt-dlp
|
||||
```
|
||||
|
||||
Or use the package manager of your choice for your operating system.
|
||||
|
||||
See the [yt-dlp wiki page](https://github.com/yt-dlp/yt-dlp/wiki/Installation) for your specific installation instructions.
|
||||
|
||||
- **YouTube API Key** (optional): Only needed for comments and metadata extraction. Configure with:
|
||||
|
||||
```bash
|
||||
fabric --setup
|
||||
```
|
||||
|
||||
## Basic Usage
|
||||
|
||||
### Extract Transcript
|
||||
|
||||
Extract a video transcript and process it with a pattern:
|
||||
|
||||
```bash
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --pattern summarize
|
||||
```
|
||||
|
||||
### Extract Transcript with Timestamps
|
||||
|
||||
Get transcript with timestamps preserved:
|
||||
|
||||
```bash
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --transcript-with-timestamps --pattern extract_wisdom
|
||||
```
|
||||
|
||||
### Extract Comments
|
||||
|
||||
Get video comments (requires YouTube API key):
|
||||
|
||||
```bash
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --comments --pattern analyze_claims
|
||||
```
|
||||
|
||||
### Extract Metadata
|
||||
|
||||
Get video metadata as JSON:
|
||||
|
||||
```bash
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --metadata
|
||||
```
|
||||
|
||||
## Advanced Options
|
||||
|
||||
### Custom yt-dlp Arguments
|
||||
|
||||
Pass additional arguments to yt-dlp for advanced functionality. **User-provided arguments take precedence** over built-in fabric arguments, giving you full control:
|
||||
|
||||
```bash
|
||||
# Use browser cookies for age-restricted or private videos
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --yt-dlp-args="--cookies-from-browser brave"
|
||||
|
||||
# Override language selection (takes precedence over -g flag)
|
||||
fabric -g en -y "https://www.youtube.com/watch?v=VIDEO_ID" --yt-dlp-args="--sub-langs es,fr"
|
||||
|
||||
# Use specific format
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --yt-dlp-args="--format best"
|
||||
|
||||
# Handle rate limiting (slow down requests)
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --yt-dlp-args="--sleep-requests 1"
|
||||
|
||||
# Multiple arguments (use quotes)
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --yt-dlp-args="--cookies-from-browser firefox --write-info-json"
|
||||
|
||||
# Combine rate limiting with authentication
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --yt-dlp-args="--cookies-from-browser brave --sleep-requests 1"
|
||||
|
||||
# Override subtitle format (takes precedence over built-in --sub-format vtt)
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --yt-dlp-args="--sub-format srt"
|
||||
```
|
||||
|
||||
#### Argument Precedence
|
||||
|
||||
Fabric constructs the yt-dlp command in this order:
|
||||
|
||||
1. **Built-in base arguments** (`--write-auto-subs`, `--skip-download`, etc.)
|
||||
2. **Language selection** (from `-g` flag): `--sub-langs LANGUAGE`
|
||||
3. **User arguments** (from `--yt-dlp-args`): **These override any conflicting built-in arguments**
|
||||
4. **Video URL**
|
||||
|
||||
This means you can override any built-in behavior by specifying it in `--yt-dlp-args`.
|
||||
|
||||
### Playlist Processing
|
||||
|
||||
Process entire playlists:
|
||||
|
||||
```bash
|
||||
# Process all videos in a playlist
|
||||
fabric -y "https://www.youtube.com/playlist?list=PLAYLIST_ID" --playlist --pattern summarize
|
||||
|
||||
# Save playlist videos to CSV
|
||||
fabric -y "https://www.youtube.com/playlist?list=PLAYLIST_ID" --playlist -o playlist.csv
|
||||
```
|
||||
|
||||
### Language Support
|
||||
|
||||
Specify transcript language:
|
||||
|
||||
```bash
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" -g es --pattern translate
|
||||
```
|
||||
|
||||
## Combining Options
|
||||
|
||||
You can combine multiple YouTube processing options:
|
||||
|
||||
```bash
|
||||
# Get transcript, comments, and metadata
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" \
|
||||
--transcript \
|
||||
--comments \
|
||||
--metadata \
|
||||
--pattern comprehensive_analysis
|
||||
```
|
||||
|
||||
## Output Options
|
||||
|
||||
### Save to File
|
||||
|
||||
```bash
|
||||
# Save output to file
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --pattern summarize -o summary.md
|
||||
|
||||
# Save entire session including input
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --pattern summarize --output-session -o full_session.md
|
||||
```
|
||||
|
||||
### Stream Output
|
||||
|
||||
Get real-time streaming output:
|
||||
|
||||
```bash
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --pattern summarize --stream
|
||||
```
|
||||
|
||||
## Common Use Cases
|
||||
|
||||
### Content Analysis
|
||||
|
||||
```bash
|
||||
# Analyze video content for key insights
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --pattern extract_wisdom
|
||||
|
||||
# Check claims made in the video
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --pattern analyze_claims
|
||||
```
|
||||
|
||||
### Educational Content
|
||||
|
||||
```bash
|
||||
# Create study notes from educational videos
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --pattern create_study_notes
|
||||
|
||||
# Extract key concepts and definitions
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --pattern extract_concepts
|
||||
```
|
||||
|
||||
### Meeting/Conference Processing
|
||||
|
||||
```bash
|
||||
# Summarize conference talks with timestamps
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" \
|
||||
--transcript-with-timestamps \
|
||||
--pattern meeting_summary
|
||||
|
||||
# Extract action items from recorded meetings
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --pattern extract_action_items
|
||||
```
|
||||
|
||||
### Content Creation
|
||||
|
||||
```bash
|
||||
# Create social media posts from video content
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --pattern create_social_posts
|
||||
|
||||
# Generate blog post from video transcript
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" --pattern write_blog_post
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### Common Issues
|
||||
|
||||
1. **"yt-dlp not found"**: Install yt-dlp using pip or your package manager
|
||||
2. **Age-restricted videos**: Use `--yt-dlp-args="--cookies-from-browser BROWSER"`
|
||||
3. **No subtitles available**: Some videos don't have auto-generated subtitles
|
||||
4. **API rate limits**: YouTube API has daily quotas for comments/metadata
|
||||
5. **HTTP 429 errors**: YouTube is rate limiting subtitle requests
|
||||
|
||||
### Error Messages
|
||||
|
||||
- **"YouTube is not configured"**: Run `fabric --setup` to configure YouTube API
|
||||
- **"yt-dlp failed"**: Check video URL and try with `--yt-dlp-args` for authentication
|
||||
- **"No transcript content found"**: Video may not have subtitles available
|
||||
- **"HTTP Error 429: Too Many Requests"**: YouTube rate limit exceeded. This is increasingly common. Solutions:
|
||||
- **Wait 10-30 minutes and try again** (most effective)
|
||||
- Use longer sleep: `--yt-dlp-args="--sleep-requests 5"`
|
||||
- Try with browser cookies: `--yt-dlp-args="--cookies-from-browser brave --sleep-requests 5"`
|
||||
- **Try a different video** - some videos are less restricted
|
||||
- **Use a VPN** - different IP address may help
|
||||
- **Try without language specification** - let yt-dlp choose any available language
|
||||
- **Try English instead** - `fabric -g en` (English subtitles may be less rate-limited)
|
||||
|
||||
### Language Fallback Behavior
|
||||
|
||||
When you specify a language (e.g., `-g es` for Spanish) but that language isn't available or fails to download:
|
||||
|
||||
1. **Automatic fallback**: Fabric automatically retries without language specification
|
||||
2. **Smart file detection**: If the fallback downloads a different language (e.g., English), Fabric will automatically detect and use it
|
||||
3. **No manual intervention needed**: The process is transparent to the user
|
||||
|
||||
```bash
|
||||
# Even if Spanish isn't available, this will work with whatever language yt-dlp finds
|
||||
fabric -g es -y "https://youtube.com/watch?v=VIDEO_ID" --pattern summarize
|
||||
```
|
||||
|
||||
## Configuration
|
||||
|
||||
### YAML Configuration
|
||||
|
||||
You can set default yt-dlp arguments in your config file (`~/.config/fabric/config.yaml`):
|
||||
|
||||
```yaml
|
||||
ytDlpArgs: "--cookies-from-browser brave --write-info-json"
|
||||
```
|
||||
|
||||
### Environment Variables
|
||||
|
||||
Set up your YouTube API key:
|
||||
|
||||
```bash
|
||||
export FABRIC_YOUTUBE_API_KEY="your_api_key_here"
|
||||
```
|
||||
|
||||
## Tips and Best Practices
|
||||
|
||||
1. **Use specific patterns**: Choose patterns that match your use case for better results
|
||||
2. **Combine with other tools**: Pipe output to other commands or save to files for further processing
|
||||
3. **Batch processing**: Use playlists to process multiple videos efficiently
|
||||
4. **Authentication**: Use browser cookies for accessing private or age-restricted content
|
||||
5. **Language support**: Specify language codes for better transcript accuracy
|
||||
6. **Rate limiting**: If you encounter 429 errors, use `--sleep-requests 1` to slow down requests
|
||||
7. **Persistent settings**: Set common yt-dlp args in your config file to avoid repeating them
|
||||
8. **Argument precedence**: Use `--yt-dlp-args` to override any built-in behavior when needed
|
||||
9. **Testing**: Use `yt-dlp --list-subs URL` to see available subtitle languages before processing
|
||||
|
||||
## Examples
|
||||
|
||||
### Quick Video Summary
|
||||
|
||||
```bash
|
||||
fabric -y "https://www.youtube.com/watch?v=dQw4w9WgXcQ" --pattern summarize --stream
|
||||
```
|
||||
|
||||
### Detailed Analysis with Authentication
|
||||
|
||||
```bash
|
||||
fabric -y "https://www.youtube.com/watch?v=VIDEO_ID" \
|
||||
--yt-dlp-args="--cookies-from-browser chrome" \
|
||||
--transcript-with-timestamps \
|
||||
--comments \
|
||||
--pattern comprehensive_analysis \
|
||||
-o analysis.md
|
||||
```
|
||||
|
||||
### Playlist Processing
|
||||
|
||||
```bash
|
||||
fabric -y "https://www.youtube.com/playlist?list=PLrAXtmRdnEQy6nuLvVUxpDnx4C0823vBN" \
|
||||
--playlist \
|
||||
--pattern extract_wisdom \
|
||||
-o playlist_wisdom.md
|
||||
```
|
||||
|
||||
### Override Built-in Language Selection
|
||||
|
||||
```bash
|
||||
# Built-in language selection (-g es) is overridden by user args
|
||||
fabric -g es -y "https://www.youtube.com/watch?v=VIDEO_ID" \
|
||||
--yt-dlp-args="--sub-langs fr,de,en" \
|
||||
--pattern translate
|
||||
```
|
||||
|
||||
For more patterns and advanced usage, see the main [Fabric documentation](../README.md).
|
||||
BIN
docs/images/svelte-preview.png
Normal file
BIN
docs/images/svelte-preview.png
Normal file
Binary file not shown.
|
After Width: | Height: | Size: 1.1 MiB |
21
docs/notification-config.yaml
Normal file
21
docs/notification-config.yaml
Normal file
@@ -0,0 +1,21 @@
|
||||
# Example Fabric configuration with notification support
|
||||
# Save this to ~/.config/fabric/config.yaml to use as defaults
|
||||
|
||||
# Enable notifications by default for all commands
|
||||
notification: true
|
||||
|
||||
# Optional: Use a custom notification command
|
||||
# Examples:
|
||||
# macOS with custom sound:
|
||||
# notificationCommand: 'osascript -e "display notification \"$2\" with title \"$1\" sound name \"Ping\""'
|
||||
#
|
||||
# Linux with custom urgency:
|
||||
# notificationCommand: 'notify-send --urgency=normal "$1" "$2"'
|
||||
#
|
||||
# Custom script:
|
||||
# notificationCommand: '/path/to/custom-notification-script.sh "$1" "$2"'
|
||||
|
||||
# Other common settings
|
||||
model: "gpt-4o"
|
||||
temperature: 0.7
|
||||
stream: true
|
||||
36
docs/voices/README.md
Normal file
36
docs/voices/README.md
Normal file
@@ -0,0 +1,36 @@
|
||||
# Voice Samples
|
||||
|
||||
This directory contains sample audio files demonstrating different Gemini TTS voices.
|
||||
|
||||
## Sample Files
|
||||
|
||||
Each voice sample says "The quick brown fox jumped over the lazy dog" to demonstrate the voice characteristics:
|
||||
|
||||
- **Kore.wav** - Firm and confident (default voice)
|
||||
- **Charon.wav** - Informative and clear
|
||||
- **Vega.wav** - Smooth and pleasant
|
||||
- **Capella.wav** - Warm and welcoming
|
||||
- **Achird.wav** - Friendly and approachable
|
||||
- **Lyra.wav** - Melodic and expressive
|
||||
|
||||
## Generating Samples
|
||||
|
||||
To generate these samples, use the following commands:
|
||||
|
||||
```bash
|
||||
# Generate each voice sample
|
||||
echo "The quick brown fox jumped over the lazy dog" | fabric -m gemini-2.5-flash-preview-tts --voice Kore -o docs/voices/Kore.wav
|
||||
echo "The quick brown fox jumped over the lazy dog" | fabric -m gemini-2.5-flash-preview-tts --voice Charon -o docs/voices/Charon.wav
|
||||
echo "The quick brown fox jumped over the lazy dog" | fabric -m gemini-2.5-flash-preview-tts --voice Vega -o docs/voices/Vega.wav
|
||||
echo "The quick brown fox jumped over the lazy dog" | fabric -m gemini-2.5-flash-preview-tts --voice Capella -o docs/voices/Capella.wav
|
||||
echo "The quick brown fox jumped over the lazy dog" | fabric -m gemini-2.5-flash-preview-tts --voice Achird -o docs/voices/Achird.wav
|
||||
echo "The quick brown fox jumped over the lazy dog" | fabric -m gemini-2.5-flash-preview-tts --voice Lyra -o docs/voices/Lyra.wav
|
||||
```
|
||||
|
||||
## Audio Format
|
||||
|
||||
- **Format**: WAV (uncompressed)
|
||||
- **Sample Rate**: 24kHz
|
||||
- **Bit Depth**: 16-bit
|
||||
- **Channels**: Mono
|
||||
- **Approximate Size**: ~500KB per sample
|
||||
16
go.mod
16
go.mod
@@ -5,7 +5,7 @@ go 1.24.0
|
||||
toolchain go1.24.2
|
||||
|
||||
require (
|
||||
github.com/anthropics/anthropic-sdk-go v1.4.0
|
||||
github.com/anthropics/anthropic-sdk-go v1.9.1
|
||||
github.com/atotto/clipboard v0.1.4
|
||||
github.com/aws/aws-sdk-go-v2 v1.36.4
|
||||
github.com/aws/aws-sdk-go-v2/config v1.27.27
|
||||
@@ -15,11 +15,11 @@ require (
|
||||
github.com/gin-gonic/gin v1.10.1
|
||||
github.com/go-git/go-git/v5 v5.16.2
|
||||
github.com/go-shiori/go-readability v0.0.0-20250217085726-9f5bf5ca7612
|
||||
github.com/google/generative-ai-go v0.20.1
|
||||
github.com/google/go-github/v66 v66.0.0
|
||||
github.com/hasura/go-graphql-client v0.14.4
|
||||
github.com/jessevdk/go-flags v1.6.1
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||
github.com/mattn/go-sqlite3 v1.14.28
|
||||
github.com/ollama/ollama v0.9.0
|
||||
github.com/openai/openai-go v1.8.2
|
||||
@@ -35,13 +35,16 @@ require (
|
||||
gopkg.in/yaml.v3 v3.0.1
|
||||
)
|
||||
|
||||
require (
|
||||
github.com/google/go-cmp v0.7.0 // indirect
|
||||
github.com/gorilla/websocket v1.5.3 // indirect
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.121.2 // indirect
|
||||
cloud.google.com/go/ai v0.12.1 // indirect
|
||||
cloud.google.com/go/auth v0.16.2 // indirect
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
|
||||
cloud.google.com/go/compute/metadata v0.7.0 // indirect
|
||||
cloud.google.com/go/longrunning v0.6.7 // indirect
|
||||
dario.cat/mergo v1.0.2 // indirect
|
||||
github.com/Microsoft/go-winio v0.6.2 // indirect
|
||||
github.com/ProtonMail/go-crypto v1.3.0 // indirect
|
||||
@@ -109,18 +112,17 @@ require (
|
||||
github.com/ugorji/go/codec v1.2.14 // indirect
|
||||
github.com/xanzy/ssh-agent v0.3.3 // indirect
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
|
||||
go.opentelemetry.io/otel v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/metric v1.36.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.36.0 // indirect
|
||||
golang.org/x/arch v0.18.0 // indirect
|
||||
golang.org/x/crypto v0.39.0 // indirect
|
||||
golang.org/x/crypto v0.40.0 // indirect
|
||||
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b // indirect
|
||||
golang.org/x/net v0.41.0 // indirect
|
||||
golang.org/x/sync v0.16.0 // indirect
|
||||
golang.org/x/sys v0.34.0 // indirect
|
||||
golang.org/x/time v0.12.0 // indirect
|
||||
google.golang.org/genai v1.17.0
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
|
||||
google.golang.org/grpc v1.73.0 // indirect
|
||||
|
||||
28
go.sum
28
go.sum
@@ -1,15 +1,11 @@
|
||||
cloud.google.com/go v0.121.2 h1:v2qQpN6Dx9x2NmwrqlesOt3Ys4ol5/lFZ6Mg1B7OJCg=
|
||||
cloud.google.com/go v0.121.2/go.mod h1:nRFlrHq39MNVWu+zESP2PosMWA0ryJw8KUBZ2iZpxbw=
|
||||
cloud.google.com/go/ai v0.12.1 h1:m1n/VjUuHS+pEO/2R4/VbuuEIkgk0w67fDQvFaMngM0=
|
||||
cloud.google.com/go/ai v0.12.1/go.mod h1:5vIPNe1ZQsVZqCliXIPL4QnhObQQY4d9hAGHdVc4iw4=
|
||||
cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4=
|
||||
cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
|
||||
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
|
||||
cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU=
|
||||
cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
|
||||
cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE=
|
||||
cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY=
|
||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
@@ -21,8 +17,8 @@ github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kk
|
||||
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
|
||||
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
|
||||
github.com/anthropics/anthropic-sdk-go v1.4.0 h1:fU1jKxYbQdQDiEXCxeW5XZRIOwKevn/PMg8Ay1nnUx0=
|
||||
github.com/anthropics/anthropic-sdk-go v1.4.0/go.mod h1:AapDW22irxK2PSumZiQXYUFvsdQgkwIWlpESweWZI/c=
|
||||
github.com/anthropics/anthropic-sdk-go v1.9.1 h1:raRhZKmayVSVZtLpLDd6IsMXvxLeeSU03/2IBTerWlg=
|
||||
github.com/anthropics/anthropic-sdk-go v1.9.1/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE=
|
||||
github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de h1:FxWPpzIjnTlhPwqqXc4/vE0f7GvRjuAsbW+HOIe8KnA=
|
||||
github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de/go.mod h1:DCaWoUhZrYW9p1lxo/cm8EmUOOzAPSEZNGF2DK1dJgw=
|
||||
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
|
||||
@@ -126,8 +122,6 @@ github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8J
|
||||
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
|
||||
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
|
||||
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
|
||||
github.com/google/generative-ai-go v0.20.1 h1:6dEIujpgN2V0PgLhr6c/M1ynRdc7ARtiIDPFzj45uNQ=
|
||||
github.com/google/generative-ai-go v0.20.1/go.mod h1:TjOnZJmZKzarWbjUJgy+r3Ee7HGBRVLhOIgupnwR4Bg=
|
||||
github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
|
||||
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
|
||||
@@ -145,6 +139,8 @@ github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU
|
||||
github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
|
||||
github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=
|
||||
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
|
||||
github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg=
|
||||
github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE=
|
||||
github.com/hasura/go-graphql-client v0.14.4 h1:bYU7/+V50T2YBGdNQXt6l4f2cMZPECPUd8cyCR+ixtw=
|
||||
github.com/hasura/go-graphql-client v0.14.4/go.mod h1:jfSZtBER3or+88Q9vFhWHiFMPppfYILRyl+0zsgPIIw=
|
||||
github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8=
|
||||
@@ -157,6 +153,8 @@ github.com/joho/godotenv v1.5.1 h1:7eLL/+HRGLY0ldzfGMeQkb7vMd0as4CfYvUVzLqw0N0=
|
||||
github.com/joho/godotenv v1.5.1/go.mod h1:f4LDr5Voq0i2e/R5DDNOoa2zzDfwtkZa6DnEwAbqwq4=
|
||||
github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNUXsshfwJMBgNA0RU6/i7WVaAegv3PtuIHPMs=
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
|
||||
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
|
||||
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
|
||||
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
|
||||
@@ -249,8 +247,6 @@ github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI
|
||||
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
|
||||
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ=
|
||||
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
|
||||
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
|
||||
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
|
||||
@@ -272,8 +268,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY
|
||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
|
||||
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
|
||||
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
|
||||
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
|
||||
golang.org/x/crypto v0.40.0 h1:r4x+VvoG5Fm+eJcxMaY8CQM7Lb0l1lsmjGBQ6s8BfKM=
|
||||
golang.org/x/crypto v0.40.0/go.mod h1:Qr1vMER5WyS2dfPHAlsOj01wgLbsyWtFn/aY+5+ZdxY=
|
||||
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b h1:QoALfVG9rhQ/M7vYDScfPdWjGL9dlsVVM5VGh7aKoAA=
|
||||
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ=
|
||||
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
|
||||
@@ -331,8 +327,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
|
||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
|
||||
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
|
||||
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
|
||||
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
|
||||
golang.org/x/term v0.33.0 h1:NuFncQrRcaRvVmgRkvM3j/F00gWIAlcmlB8ACEKmGIg=
|
||||
golang.org/x/term v0.33.0/go.mod h1:s18+ql9tYWp1IfpV9DmCtQDDSRBUjKaw9M1eAv5UeF0=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
@@ -345,8 +341,6 @@ golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
||||
golang.org/x/text v0.27.0 h1:4fGWRpyh641NLlecmyl4LOe6yDdfaYNrGb2zdfo4JV4=
|
||||
golang.org/x/text v0.27.0/go.mod h1:1D28KMCvyooCX9hBiosv5Tz/+YLxj0j7XhWjpSUF7CU=
|
||||
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
|
||||
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
|
||||
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
|
||||
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
|
||||
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
|
||||
@@ -357,6 +351,8 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T
|
||||
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
|
||||
google.golang.org/api v0.236.0 h1:CAiEiDVtO4D/Qja2IA9VzlFrgPnK3XVMmRoJZlSWbc0=
|
||||
google.golang.org/api v0.236.0/go.mod h1:X1WF9CU2oTc+Jml1tiIxGmWFK/UZezdqEu09gcxZAj4=
|
||||
google.golang.org/genai v1.17.0 h1:lXYSnWShPYjxTouxRj0zF8RsNmSF+SKo7SQ7dM35NlI=
|
||||
google.golang.org/genai v1.17.0/go.mod h1:QPj5NGJw+3wEOHg+PrsWwJKvG6UC84ex5FR7qAYsN/M=
|
||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78=
|
||||
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk=
|
||||
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
|
||||
|
||||
@@ -3,11 +3,14 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/db/fsdb"
|
||||
"github.com/danielmiessler/fabric/internal/tools/notifications"
|
||||
)
|
||||
|
||||
// handleChatProcessing handles the main chat processing logic
|
||||
@@ -18,7 +21,7 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
|
||||
var chatter *core.Chatter
|
||||
if chatter, err = registry.GetChatter(currentFlags.Model, currentFlags.ModelContextLength,
|
||||
currentFlags.Strategy, currentFlags.Stream, currentFlags.DryRun); err != nil {
|
||||
currentFlags.Vendor, currentFlags.Strategy, currentFlags.Stream, currentFlags.DryRun); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
@@ -35,6 +38,40 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
if chatOptions, err = currentFlags.BuildChatOptions(); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Check if user is requesting audio output or using a TTS model
|
||||
isAudioOutput := currentFlags.Output != "" && IsAudioFormat(currentFlags.Output)
|
||||
isTTSModel := isTTSModel(currentFlags.Model)
|
||||
|
||||
if isTTSModel && !isAudioOutput {
|
||||
err = fmt.Errorf("TTS model '%s' requires audio output. Please specify an audio output file with -o flag (e.g., -o output.wav)", currentFlags.Model)
|
||||
return
|
||||
}
|
||||
|
||||
if isAudioOutput && !isTTSModel {
|
||||
err = fmt.Errorf("audio output file '%s' specified but model '%s' is not a TTS model. Please use a TTS model like gemini-2.5-flash-preview-tts", currentFlags.Output, currentFlags.Model)
|
||||
return
|
||||
}
|
||||
|
||||
// For TTS models, check if output file already exists BEFORE processing
|
||||
if isTTSModel && isAudioOutput {
|
||||
outputFile := currentFlags.Output
|
||||
// Add .wav extension if not provided
|
||||
if filepath.Ext(outputFile) == "" {
|
||||
outputFile += ".wav"
|
||||
}
|
||||
if _, err = os.Stat(outputFile); err == nil {
|
||||
err = fmt.Errorf("file %s already exists. Please choose a different filename or remove the existing file", outputFile)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Set audio options in chat config
|
||||
chatOptions.AudioOutput = isAudioOutput
|
||||
if isAudioOutput {
|
||||
chatOptions.AudioFormat = "wav" // Default to WAV format
|
||||
}
|
||||
|
||||
if session, err = chatter.Send(chatReq, chatOptions); err != nil {
|
||||
return
|
||||
}
|
||||
@@ -42,8 +79,13 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
result := session.GetLastMessage().Content
|
||||
|
||||
if !currentFlags.Stream || currentFlags.SuppressThink {
|
||||
// print the result if it was not streamed already or suppress-think disabled streaming output
|
||||
fmt.Println(result)
|
||||
// For TTS models with audio output, show a user-friendly message instead of raw data
|
||||
if isTTSModel && isAudioOutput && strings.HasPrefix(result, "FABRIC_AUDIO_DATA:") {
|
||||
fmt.Printf("TTS audio generated successfully and saved to: %s\n", currentFlags.Output)
|
||||
} else {
|
||||
// print the result if it was not streamed already or suppress-think disabled streaming output
|
||||
fmt.Println(result)
|
||||
}
|
||||
}
|
||||
|
||||
// if the copy flag is set, copy the message to the clipboard
|
||||
@@ -59,8 +101,85 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
sessionAsString := session.String()
|
||||
err = CreateOutputFile(sessionAsString, currentFlags.Output)
|
||||
} else {
|
||||
err = CreateOutputFile(result, currentFlags.Output)
|
||||
// For TTS models, we need to handle audio output differently
|
||||
if isTTSModel && isAudioOutput {
|
||||
// Check if result contains actual audio data
|
||||
if strings.HasPrefix(result, "FABRIC_AUDIO_DATA:") {
|
||||
// Extract the binary audio data
|
||||
audioData := result[len("FABRIC_AUDIO_DATA:"):]
|
||||
err = CreateAudioOutputFile([]byte(audioData), currentFlags.Output)
|
||||
} else {
|
||||
// Fallback for any error messages or unexpected responses
|
||||
err = CreateOutputFile(result, currentFlags.Output)
|
||||
}
|
||||
} else {
|
||||
err = CreateOutputFile(result, currentFlags.Output)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Send notification if requested
|
||||
if chatOptions.Notification {
|
||||
if err = sendNotification(chatOptions, chatReq.PatternName, result); err != nil {
|
||||
// Log notification error but don't fail the main command
|
||||
fmt.Fprintf(os.Stderr, "Failed to send notification: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
// sendNotification sends a desktop notification about command completion.
|
||||
//
|
||||
// When truncating the result for notification display, this function counts Unicode code points,
|
||||
// not grapheme clusters. As a result, complex emoji or accented characters with multiple combining
|
||||
// characters may be truncated improperly. This is a limitation of the current implementation.
|
||||
func sendNotification(options *domain.ChatOptions, patternName, result string) error {
|
||||
title := "Fabric Command Complete"
|
||||
if patternName != "" {
|
||||
title = fmt.Sprintf("Fabric: %s Complete", patternName)
|
||||
}
|
||||
|
||||
// Limit message length for notification display (counts Unicode code points)
|
||||
message := "Command completed successfully"
|
||||
if result != "" {
|
||||
maxLength := 100
|
||||
runes := []rune(result)
|
||||
if len(runes) > maxLength {
|
||||
message = fmt.Sprintf("Output: %s...", string(runes[:maxLength]))
|
||||
} else {
|
||||
message = fmt.Sprintf("Output: %s", result)
|
||||
}
|
||||
// Clean up newlines for notification display
|
||||
message = strings.ReplaceAll(message, "\n", " ")
|
||||
}
|
||||
|
||||
// Use custom notification command if provided
|
||||
if options.NotificationCommand != "" {
|
||||
// SECURITY: Pass title and message as proper shell positional arguments $1 and $2
|
||||
// This matches the documented interface where custom commands receive title and message as shell variables
|
||||
cmd := exec.Command("sh", "-c", options.NotificationCommand+" \"$1\" \"$2\"", "--", title, message)
|
||||
|
||||
// For debugging: capture and display output from custom commands
|
||||
cmd.Stdout = os.Stdout
|
||||
cmd.Stderr = os.Stderr
|
||||
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
// Use built-in notification system
|
||||
notificationManager := notifications.NewNotificationManager()
|
||||
if !notificationManager.IsAvailable() {
|
||||
return fmt.Errorf("no notification system available")
|
||||
}
|
||||
|
||||
return notificationManager.Send(title, message)
|
||||
}
|
||||
|
||||
// isTTSModel checks if the model is a text-to-speech model
|
||||
func isTTSModel(modelName string) bool {
|
||||
lowerModel := strings.ToLower(modelName)
|
||||
return strings.Contains(lowerModel, "tts") ||
|
||||
strings.Contains(lowerModel, "preview-tts") ||
|
||||
strings.Contains(lowerModel, "text-to-speech")
|
||||
}
|
||||
|
||||
166
internal/cli/chat_test.go
Normal file
166
internal/cli/chat_test.go
Normal file
@@ -0,0 +1,166 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
)
|
||||
|
||||
func TestSendNotification_SecurityEscaping(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
title string
|
||||
message string
|
||||
command string
|
||||
expectError bool
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "Normal content",
|
||||
title: "Test Title",
|
||||
message: "Test message content",
|
||||
command: `echo "Title: $1, Message: $2"`,
|
||||
expectError: false,
|
||||
description: "Normal content should work fine",
|
||||
},
|
||||
{
|
||||
name: "Content with backticks",
|
||||
title: "Test Title",
|
||||
message: "Test `whoami` injection",
|
||||
command: `echo "Title: $1, Message: $2"`,
|
||||
expectError: false,
|
||||
description: "Backticks should be escaped and not executed",
|
||||
},
|
||||
{
|
||||
name: "Content with semicolon injection",
|
||||
title: "Test Title",
|
||||
message: "Test; echo INJECTED; echo end",
|
||||
command: `echo "Title: $1, Message: $2"`,
|
||||
expectError: false,
|
||||
description: "Semicolon injection should be prevented",
|
||||
},
|
||||
{
|
||||
name: "Content with command substitution",
|
||||
title: "Test Title",
|
||||
message: "Test $(whoami) injection",
|
||||
command: `echo "Title: $1, Message: $2"`,
|
||||
expectError: false,
|
||||
description: "Command substitution should be escaped",
|
||||
},
|
||||
{
|
||||
name: "Content with quote injection",
|
||||
title: "Test Title",
|
||||
message: "Test ' || echo INJECTED || echo ' end",
|
||||
command: `echo "Title: $1, Message: $2"`,
|
||||
expectError: false,
|
||||
description: "Quote injection should be prevented",
|
||||
},
|
||||
{
|
||||
name: "Content with newlines",
|
||||
title: "Test Title",
|
||||
message: "Line 1\nLine 2\nLine 3",
|
||||
command: `echo "Title: $1, Message: $2"`,
|
||||
expectError: false,
|
||||
description: "Newlines should be handled safely",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
options := &domain.ChatOptions{
|
||||
NotificationCommand: tt.command,
|
||||
Notification: true,
|
||||
}
|
||||
|
||||
// This test mainly verifies that the function doesn't panic
|
||||
// and properly escapes dangerous content. The actual command
|
||||
// execution is tested separately in integration tests.
|
||||
err := sendNotification(options, "test_pattern", tt.message)
|
||||
|
||||
if tt.expectError && err == nil {
|
||||
t.Errorf("Expected error for %s, but got none", tt.description)
|
||||
}
|
||||
|
||||
if !tt.expectError && err != nil {
|
||||
t.Errorf("Unexpected error for %s: %v", tt.description, err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendNotification_TitleGeneration(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
patternName string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "No pattern name",
|
||||
patternName: "",
|
||||
expected: "Fabric Command Complete",
|
||||
},
|
||||
{
|
||||
name: "With pattern name",
|
||||
patternName: "summarize",
|
||||
expected: "Fabric: summarize Complete",
|
||||
},
|
||||
{
|
||||
name: "Pattern with special characters",
|
||||
patternName: "test_pattern-v2",
|
||||
expected: "Fabric: test_pattern-v2 Complete",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
options := &domain.ChatOptions{
|
||||
NotificationCommand: `echo "Title: $1"`,
|
||||
Notification: true,
|
||||
}
|
||||
|
||||
// We're testing the title generation logic
|
||||
// The actual notification command would echo the title
|
||||
err := sendNotification(options, tt.patternName, "test message")
|
||||
|
||||
// The function should not error for valid inputs
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendNotification_MessageTruncation(t *testing.T) {
|
||||
longMessage := strings.Repeat("A", 150) // 150 characters
|
||||
shortMessage := "Short message"
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
message string
|
||||
expected string
|
||||
}{
|
||||
{
|
||||
name: "Short message",
|
||||
message: shortMessage,
|
||||
},
|
||||
{
|
||||
name: "Long message truncation",
|
||||
message: longMessage,
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
options := &domain.ChatOptions{
|
||||
NotificationCommand: `echo "Message: $2"`,
|
||||
Notification: true,
|
||||
}
|
||||
|
||||
err := sendNotification(options, "test", tt.message)
|
||||
if err != nil {
|
||||
t.Errorf("Unexpected error: %v", err)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
@@ -113,11 +113,11 @@ func processYoutubeVideo(
|
||||
}
|
||||
}
|
||||
if flags.YouTubeTranscriptWithTimestamps {
|
||||
if transcript, err = registry.YouTube.GrabTranscriptWithTimestamps(videoId, language); err != nil {
|
||||
if transcript, err = registry.YouTube.GrabTranscriptWithTimestampsWithArgs(videoId, language, flags.YtDlpArgs); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if transcript, err = registry.YouTube.GrabTranscript(videoId, language); err != nil {
|
||||
if transcript, err = registry.YouTube.GrabTranscriptWithArgs(videoId, language, flags.YtDlpArgs); err != nil {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
@@ -20,73 +20,82 @@ import (
|
||||
)
|
||||
|
||||
// Flags create flags struct. the users flags go into this, this will be passed to the chat struct in cli
|
||||
// Chat parameter defaults set in the struct tags must match domain.Default* constants
|
||||
|
||||
type Flags struct {
|
||||
Pattern string `short:"p" long:"pattern" yaml:"pattern" description:"Choose a pattern from the available patterns" default:""`
|
||||
PatternVariables map[string]string `short:"v" long:"variable" description:"Values for pattern variables, e.g. -v=#role:expert -v=#points:30"`
|
||||
Context string `short:"C" long:"context" description:"Choose a context from the available contexts" default:""`
|
||||
Session string `long:"session" description:"Choose a session from the available sessions"`
|
||||
Attachments []string `short:"a" long:"attachment" description:"Attachment path or URL (e.g. for OpenAI image recognition messages)"`
|
||||
Setup bool `short:"S" long:"setup" description:"Run setup for all reconfigurable parts of fabric"`
|
||||
Temperature float64 `short:"t" long:"temperature" yaml:"temperature" description:"Set temperature" default:"0.7"`
|
||||
TopP float64 `short:"T" long:"topp" yaml:"topp" description:"Set top P" default:"0.9"`
|
||||
Stream bool `short:"s" long:"stream" yaml:"stream" description:"Stream"`
|
||||
PresencePenalty float64 `short:"P" long:"presencepenalty" yaml:"presencepenalty" description:"Set presence penalty" default:"0.0"`
|
||||
Raw bool `short:"r" long:"raw" yaml:"raw" description:"Use the defaults of the model without sending chat options (like temperature etc.) and use the user role instead of the system role for patterns."`
|
||||
FrequencyPenalty float64 `short:"F" long:"frequencypenalty" yaml:"frequencypenalty" description:"Set frequency penalty" default:"0.0"`
|
||||
ListPatterns bool `short:"l" long:"listpatterns" description:"List all patterns"`
|
||||
ListAllModels bool `short:"L" long:"listmodels" description:"List all available models"`
|
||||
ListAllContexts bool `short:"x" long:"listcontexts" description:"List all contexts"`
|
||||
ListAllSessions bool `short:"X" long:"listsessions" description:"List all sessions"`
|
||||
UpdatePatterns bool `short:"U" long:"updatepatterns" description:"Update patterns"`
|
||||
Message string `hidden:"true" description:"Messages to send to chat"`
|
||||
Copy bool `short:"c" long:"copy" description:"Copy to clipboard"`
|
||||
Model string `short:"m" long:"model" yaml:"model" description:"Choose model"`
|
||||
ModelContextLength int `long:"modelContextLength" yaml:"modelContextLength" description:"Model context length (only affects ollama)"`
|
||||
Output string `short:"o" long:"output" description:"Output to file" default:""`
|
||||
OutputSession bool `long:"output-session" description:"Output the entire session (also a temporary one) to the output file"`
|
||||
LatestPatterns string `short:"n" long:"latest" description:"Number of latest patterns to list" default:"0"`
|
||||
ChangeDefaultModel bool `short:"d" long:"changeDefaultModel" description:"Change default model"`
|
||||
YouTube string `short:"y" long:"youtube" description:"YouTube video or play list \"URL\" to grab transcript, comments from it and send to chat or print it put to the console and store it in the output file"`
|
||||
YouTubePlaylist bool `long:"playlist" description:"Prefer playlist over video if both ids are present in the URL"`
|
||||
YouTubeTranscript bool `long:"transcript" description:"Grab transcript from YouTube video and send to chat (it is used per default)."`
|
||||
YouTubeTranscriptWithTimestamps bool `long:"transcript-with-timestamps" description:"Grab transcript from YouTube video with timestamps and send to chat"`
|
||||
YouTubeComments bool `long:"comments" description:"Grab comments from YouTube video and send to chat"`
|
||||
YouTubeMetadata bool `long:"metadata" description:"Output video metadata"`
|
||||
Language string `short:"g" long:"language" description:"Specify the Language Code for the chat, e.g. -g=en -g=zh" default:""`
|
||||
ScrapeURL string `short:"u" long:"scrape_url" description:"Scrape website URL to markdown using Jina AI"`
|
||||
ScrapeQuestion string `short:"q" long:"scrape_question" description:"Search question using Jina AI"`
|
||||
Seed int `short:"e" long:"seed" yaml:"seed" description:"Seed to be used for LMM generation"`
|
||||
WipeContext string `short:"w" long:"wipecontext" description:"Wipe context"`
|
||||
WipeSession string `short:"W" long:"wipesession" description:"Wipe session"`
|
||||
PrintContext string `long:"printcontext" description:"Print context"`
|
||||
PrintSession string `long:"printsession" description:"Print session"`
|
||||
HtmlReadability bool `long:"readability" description:"Convert HTML input into a clean, readable view"`
|
||||
InputHasVars bool `long:"input-has-vars" description:"Apply variables to user input"`
|
||||
DryRun bool `long:"dry-run" description:"Show what would be sent to the model without actually sending it"`
|
||||
Serve bool `long:"serve" description:"Serve the Fabric Rest API"`
|
||||
ServeOllama bool `long:"serveOllama" description:"Serve the Fabric Rest API with ollama endpoints"`
|
||||
ServeAddress string `long:"address" description:"The address to bind the REST API" default:":8080"`
|
||||
ServeAPIKey string `long:"api-key" description:"API key used to secure server routes" default:""`
|
||||
Config string `long:"config" description:"Path to YAML config file"`
|
||||
Version bool `long:"version" description:"Print current version"`
|
||||
ListExtensions bool `long:"listextensions" description:"List all registered extensions"`
|
||||
AddExtension string `long:"addextension" description:"Register a new extension from config file path"`
|
||||
RemoveExtension string `long:"rmextension" description:"Remove a registered extension by name"`
|
||||
Strategy string `long:"strategy" description:"Choose a strategy from the available strategies" default:""`
|
||||
ListStrategies bool `long:"liststrategies" description:"List all strategies"`
|
||||
ListVendors bool `long:"listvendors" description:"List all vendors"`
|
||||
ShellCompleteOutput bool `long:"shell-complete-list" description:"Output raw list without headers/formatting (for shell completion)"`
|
||||
Search bool `long:"search" description:"Enable web search tool for supported models (Anthropic, OpenAI)"`
|
||||
SearchLocation string `long:"search-location" description:"Set location for web search results (e.g., 'America/Los_Angeles')"`
|
||||
ImageFile string `long:"image-file" description:"Save generated image to specified file path (e.g., 'output.png')"`
|
||||
ImageSize string `long:"image-size" description:"Image dimensions: 1024x1024, 1536x1024, 1024x1536, auto (default: auto)"`
|
||||
ImageQuality string `long:"image-quality" description:"Image quality: low, medium, high, auto (default: auto)"`
|
||||
ImageCompression int `long:"image-compression" description:"Compression level 0-100 for JPEG/WebP formats (default: not set)"`
|
||||
ImageBackground string `long:"image-background" description:"Background type: opaque, transparent (default: opaque, only for PNG/WebP)"`
|
||||
SuppressThink bool `long:"suppress-think" yaml:"suppressThink" description:"Suppress text enclosed in thinking tags"`
|
||||
ThinkStartTag string `long:"think-start-tag" yaml:"thinkStartTag" description:"Start tag for thinking sections" default:"<think>"`
|
||||
ThinkEndTag string `long:"think-end-tag" yaml:"thinkEndTag" description:"End tag for thinking sections" default:"</think>"`
|
||||
DisableResponsesAPI bool `long:"disable-responses-api" yaml:"disableResponsesAPI" description:"Disable OpenAI Responses API (default: false)"`
|
||||
Pattern string `short:"p" long:"pattern" yaml:"pattern" description:"Choose a pattern from the available patterns" default:""`
|
||||
PatternVariables map[string]string `short:"v" long:"variable" description:"Values for pattern variables, e.g. -v=#role:expert -v=#points:30"`
|
||||
Context string `short:"C" long:"context" description:"Choose a context from the available contexts" default:""`
|
||||
Session string `long:"session" description:"Choose a session from the available sessions"`
|
||||
Attachments []string `short:"a" long:"attachment" description:"Attachment path or URL (e.g. for OpenAI image recognition messages)"`
|
||||
Setup bool `short:"S" long:"setup" description:"Run setup for all reconfigurable parts of fabric"`
|
||||
Temperature float64 `short:"t" long:"temperature" yaml:"temperature" description:"Set temperature" default:"0.7"`
|
||||
TopP float64 `short:"T" long:"topp" yaml:"topp" description:"Set top P" default:"0.9"`
|
||||
Stream bool `short:"s" long:"stream" yaml:"stream" description:"Stream"`
|
||||
PresencePenalty float64 `short:"P" long:"presencepenalty" yaml:"presencepenalty" description:"Set presence penalty" default:"0.0"`
|
||||
Raw bool `short:"r" long:"raw" yaml:"raw" description:"Use the defaults of the model without sending chat options (like temperature etc.) and use the user role instead of the system role for patterns."`
|
||||
FrequencyPenalty float64 `short:"F" long:"frequencypenalty" yaml:"frequencypenalty" description:"Set frequency penalty" default:"0.0"`
|
||||
ListPatterns bool `short:"l" long:"listpatterns" description:"List all patterns"`
|
||||
ListAllModels bool `short:"L" long:"listmodels" description:"List all available models"`
|
||||
ListAllContexts bool `short:"x" long:"listcontexts" description:"List all contexts"`
|
||||
ListAllSessions bool `short:"X" long:"listsessions" description:"List all sessions"`
|
||||
UpdatePatterns bool `short:"U" long:"updatepatterns" description:"Update patterns"`
|
||||
Message string `hidden:"true" description:"Messages to send to chat"`
|
||||
Copy bool `short:"c" long:"copy" description:"Copy to clipboard"`
|
||||
Model string `short:"m" long:"model" yaml:"model" description:"Choose model"`
|
||||
Vendor string `short:"V" long:"vendor" yaml:"vendor" description:"Specify vendor for the selected model (e.g., -V \"LM Studio\" -m openai/gpt-oss-20b)"`
|
||||
ModelContextLength int `long:"modelContextLength" yaml:"modelContextLength" description:"Model context length (only affects ollama)"`
|
||||
Output string `short:"o" long:"output" description:"Output to file" default:""`
|
||||
OutputSession bool `long:"output-session" description:"Output the entire session (also a temporary one) to the output file"`
|
||||
LatestPatterns string `short:"n" long:"latest" description:"Number of latest patterns to list" default:"0"`
|
||||
ChangeDefaultModel bool `short:"d" long:"changeDefaultModel" description:"Change default model"`
|
||||
YouTube string `short:"y" long:"youtube" description:"YouTube video or play list \"URL\" to grab transcript, comments from it and send to chat or print it put to the console and store it in the output file"`
|
||||
YouTubePlaylist bool `long:"playlist" description:"Prefer playlist over video if both ids are present in the URL"`
|
||||
YouTubeTranscript bool `long:"transcript" description:"Grab transcript from YouTube video and send to chat (it is used per default)."`
|
||||
YouTubeTranscriptWithTimestamps bool `long:"transcript-with-timestamps" description:"Grab transcript from YouTube video with timestamps and send to chat"`
|
||||
YouTubeComments bool `long:"comments" description:"Grab comments from YouTube video and send to chat"`
|
||||
YouTubeMetadata bool `long:"metadata" description:"Output video metadata"`
|
||||
YtDlpArgs string `long:"yt-dlp-args" yaml:"ytDlpArgs" description:"Additional arguments to pass to yt-dlp (e.g. '--cookies-from-browser brave')"`
|
||||
Language string `short:"g" long:"language" description:"Specify the Language Code for the chat, e.g. -g=en -g=zh" default:""`
|
||||
ScrapeURL string `short:"u" long:"scrape_url" description:"Scrape website URL to markdown using Jina AI"`
|
||||
ScrapeQuestion string `short:"q" long:"scrape_question" description:"Search question using Jina AI"`
|
||||
Seed int `short:"e" long:"seed" yaml:"seed" description:"Seed to be used for LMM generation"`
|
||||
WipeContext string `short:"w" long:"wipecontext" description:"Wipe context"`
|
||||
WipeSession string `short:"W" long:"wipesession" description:"Wipe session"`
|
||||
PrintContext string `long:"printcontext" description:"Print context"`
|
||||
PrintSession string `long:"printsession" description:"Print session"`
|
||||
HtmlReadability bool `long:"readability" description:"Convert HTML input into a clean, readable view"`
|
||||
InputHasVars bool `long:"input-has-vars" description:"Apply variables to user input"`
|
||||
DryRun bool `long:"dry-run" description:"Show what would be sent to the model without actually sending it"`
|
||||
Serve bool `long:"serve" description:"Serve the Fabric Rest API"`
|
||||
ServeOllama bool `long:"serveOllama" description:"Serve the Fabric Rest API with ollama endpoints"`
|
||||
ServeAddress string `long:"address" description:"The address to bind the REST API" default:":8080"`
|
||||
ServeAPIKey string `long:"api-key" description:"API key used to secure server routes" default:""`
|
||||
Config string `long:"config" description:"Path to YAML config file"`
|
||||
Version bool `long:"version" description:"Print current version"`
|
||||
ListExtensions bool `long:"listextensions" description:"List all registered extensions"`
|
||||
AddExtension string `long:"addextension" description:"Register a new extension from config file path"`
|
||||
RemoveExtension string `long:"rmextension" description:"Remove a registered extension by name"`
|
||||
Strategy string `long:"strategy" description:"Choose a strategy from the available strategies" default:""`
|
||||
ListStrategies bool `long:"liststrategies" description:"List all strategies"`
|
||||
ListVendors bool `long:"listvendors" description:"List all vendors"`
|
||||
ShellCompleteOutput bool `long:"shell-complete-list" description:"Output raw list without headers/formatting (for shell completion)"`
|
||||
Search bool `long:"search" description:"Enable web search tool for supported models (Anthropic, OpenAI, Gemini)"`
|
||||
SearchLocation string `long:"search-location" description:"Set location for web search results (e.g., 'America/Los_Angeles')"`
|
||||
ImageFile string `long:"image-file" description:"Save generated image to specified file path (e.g., 'output.png')"`
|
||||
ImageSize string `long:"image-size" description:"Image dimensions: 1024x1024, 1536x1024, 1024x1536, auto (default: auto)"`
|
||||
ImageQuality string `long:"image-quality" description:"Image quality: low, medium, high, auto (default: auto)"`
|
||||
ImageCompression int `long:"image-compression" description:"Compression level 0-100 for JPEG/WebP formats (default: not set)"`
|
||||
ImageBackground string `long:"image-background" description:"Background type: opaque, transparent (default: opaque, only for PNG/WebP)"`
|
||||
SuppressThink bool `long:"suppress-think" yaml:"suppressThink" description:"Suppress text enclosed in thinking tags"`
|
||||
ThinkStartTag string `long:"think-start-tag" yaml:"thinkStartTag" description:"Start tag for thinking sections" default:"<think>"`
|
||||
ThinkEndTag string `long:"think-end-tag" yaml:"thinkEndTag" description:"End tag for thinking sections" default:"</think>"`
|
||||
DisableResponsesAPI bool `long:"disable-responses-api" yaml:"disableResponsesAPI" description:"Disable OpenAI Responses API (default: false)"`
|
||||
Voice string `long:"voice" yaml:"voice" description:"TTS voice name for supported models (e.g., Kore, Charon, Puck)" default:"Kore"`
|
||||
ListGeminiVoices bool `long:"list-gemini-voices" description:"List all available Gemini TTS voices"`
|
||||
Notification bool `long:"notification" yaml:"notification" description:"Send desktop notification when command completes"`
|
||||
NotificationCommand string `long:"notification-command" yaml:"notificationCommand" description:"Custom command to run for notifications (overrides built-in notifications)"`
|
||||
Thinking domain.ThinkingLevel `long:"thinking" yaml:"thinking" description:"Set reasoning/thinking level (e.g., off, low, medium, high, or numeric tokens for Anthropic or Google Gemini)"`
|
||||
}
|
||||
|
||||
var debug = false
|
||||
@@ -423,24 +432,28 @@ func (o *Flags) BuildChatOptions() (ret *domain.ChatOptions, err error) {
|
||||
}
|
||||
|
||||
ret = &domain.ChatOptions{
|
||||
Model: o.Model,
|
||||
Temperature: o.Temperature,
|
||||
TopP: o.TopP,
|
||||
PresencePenalty: o.PresencePenalty,
|
||||
FrequencyPenalty: o.FrequencyPenalty,
|
||||
Raw: o.Raw,
|
||||
Seed: o.Seed,
|
||||
ModelContextLength: o.ModelContextLength,
|
||||
Search: o.Search,
|
||||
SearchLocation: o.SearchLocation,
|
||||
ImageFile: o.ImageFile,
|
||||
ImageSize: o.ImageSize,
|
||||
ImageQuality: o.ImageQuality,
|
||||
ImageCompression: o.ImageCompression,
|
||||
ImageBackground: o.ImageBackground,
|
||||
SuppressThink: o.SuppressThink,
|
||||
ThinkStartTag: startTag,
|
||||
ThinkEndTag: endTag,
|
||||
Model: o.Model,
|
||||
Temperature: o.Temperature,
|
||||
TopP: o.TopP,
|
||||
PresencePenalty: o.PresencePenalty,
|
||||
FrequencyPenalty: o.FrequencyPenalty,
|
||||
Raw: o.Raw,
|
||||
Seed: o.Seed,
|
||||
Thinking: o.Thinking,
|
||||
ModelContextLength: o.ModelContextLength,
|
||||
Search: o.Search,
|
||||
SearchLocation: o.SearchLocation,
|
||||
ImageFile: o.ImageFile,
|
||||
ImageSize: o.ImageSize,
|
||||
ImageQuality: o.ImageQuality,
|
||||
ImageCompression: o.ImageCompression,
|
||||
ImageBackground: o.ImageBackground,
|
||||
SuppressThink: o.SuppressThink,
|
||||
ThinkStartTag: startTag,
|
||||
ThinkEndTag: endTag,
|
||||
Voice: o.Voice,
|
||||
Notification: o.Notification || o.NotificationCommand != "",
|
||||
NotificationCommand: o.NotificationCommand,
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -64,6 +64,7 @@ func TestBuildChatOptions(t *testing.T) {
|
||||
FrequencyPenalty: 0.2,
|
||||
Raw: false,
|
||||
Seed: 1,
|
||||
Thinking: domain.ThinkingLevel(""),
|
||||
SuppressThink: false,
|
||||
ThinkStartTag: "<think>",
|
||||
ThinkEndTag: "</think>",
|
||||
@@ -88,6 +89,7 @@ func TestBuildChatOptionsDefaultSeed(t *testing.T) {
|
||||
FrequencyPenalty: 0.2,
|
||||
Raw: false,
|
||||
Seed: 0,
|
||||
Thinking: domain.ThinkingLevel(""),
|
||||
SuppressThink: false,
|
||||
ThinkStartTag: "<think>",
|
||||
ThinkEndTag: "</think>",
|
||||
|
||||
@@ -1,11 +1,13 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"strconv"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/gemini"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/db/fsdb"
|
||||
)
|
||||
|
||||
@@ -34,7 +36,11 @@ func handleListingCommands(currentFlags *Flags, fabricDb *fsdb.Db, registry *cor
|
||||
if models, err = registry.VendorManager.GetModels(); err != nil {
|
||||
return true, err
|
||||
}
|
||||
models.Print(currentFlags.ShellCompleteOutput)
|
||||
if currentFlags.ShellCompleteOutput {
|
||||
models.Print(true)
|
||||
} else {
|
||||
models.PrintWithVendor(false)
|
||||
}
|
||||
return true, nil
|
||||
}
|
||||
|
||||
@@ -58,5 +64,11 @@ func handleListingCommands(currentFlags *Flags, fabricDb *fsdb.Db, registry *cor
|
||||
return true, err
|
||||
}
|
||||
|
||||
if currentFlags.ListGeminiVoices {
|
||||
voicesList := gemini.ListGeminiVoices(currentFlags.ShellCompleteOutput)
|
||||
fmt.Print(voicesList)
|
||||
return true, nil
|
||||
}
|
||||
|
||||
return false, nil
|
||||
}
|
||||
|
||||
@@ -3,6 +3,8 @@ package cli
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/atotto/clipboard"
|
||||
)
|
||||
@@ -15,6 +17,10 @@ func CopyToClipboard(message string) (err error) {
|
||||
}
|
||||
|
||||
func CreateOutputFile(message string, fileName string) (err error) {
|
||||
if _, err = os.Stat(fileName); err == nil {
|
||||
err = fmt.Errorf("file %s already exists, not overwriting. Rename the existing file or choose a different name", fileName)
|
||||
return
|
||||
}
|
||||
var file *os.File
|
||||
if file, err = os.Create(fileName); err != nil {
|
||||
err = fmt.Errorf("error creating file: %v", err)
|
||||
@@ -24,7 +30,41 @@ func CreateOutputFile(message string, fileName string) (err error) {
|
||||
if _, err = file.WriteString(message); err != nil {
|
||||
err = fmt.Errorf("error writing to file: %v", err)
|
||||
} else {
|
||||
fmt.Printf("\n\n... written to %s\n", fileName)
|
||||
fmt.Fprintf(os.Stderr, "\n\n[Output also written to %s]\n", fileName)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// CreateAudioOutputFile creates a binary file for audio data
|
||||
func CreateAudioOutputFile(audioData []byte, fileName string) (err error) {
|
||||
// If no extension is provided, default to .wav
|
||||
if filepath.Ext(fileName) == "" {
|
||||
fileName += ".wav"
|
||||
}
|
||||
|
||||
// File existence check is now done in the CLI layer before TTS generation
|
||||
var file *os.File
|
||||
if file, err = os.Create(fileName); err != nil {
|
||||
err = fmt.Errorf("error creating audio file: %v", err)
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if _, err = file.Write(audioData); err != nil {
|
||||
err = fmt.Errorf("error writing audio data to file: %v", err)
|
||||
}
|
||||
// No redundant output message here - the CLI layer handles success messaging
|
||||
return
|
||||
}
|
||||
|
||||
// IsAudioFormat checks if the filename suggests an audio format
|
||||
func IsAudioFormat(fileName string) bool {
|
||||
ext := strings.ToLower(filepath.Ext(fileName))
|
||||
audioExts := []string{".wav", ".mp3", ".m4a", ".aac", ".ogg", ".flac"}
|
||||
for _, audioExt := range audioExts {
|
||||
if ext == audioExt {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
@@ -37,12 +37,16 @@ import (
|
||||
"github.com/danielmiessler/fabric/internal/util"
|
||||
)
|
||||
|
||||
// hasAWSCredentials checks if any AWS credentials are present either in the
|
||||
// environment variables or in the default/shared credentials file. It doesn't
|
||||
// attempt to verify the validity of the credentials, but simply ensures that a
|
||||
// potential authentication source exists so we can safely initialize the
|
||||
// Bedrock client without causing the AWS SDK to search for credentials.
|
||||
// hasAWSCredentials checks if Bedrock is properly configured by ensuring both
|
||||
// AWS credentials and BEDROCK_AWS_REGION are present. This prevents the Bedrock
|
||||
// client from being initialized when AWS credentials exist for other purposes.
|
||||
func hasAWSCredentials() bool {
|
||||
// First check if BEDROCK_AWS_REGION is set - this is required for Bedrock
|
||||
if os.Getenv("BEDROCK_AWS_REGION") == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Then check if AWS credentials are available
|
||||
if os.Getenv("AWS_PROFILE") != "" ||
|
||||
os.Getenv("AWS_ROLE_SESSION_NAME") != "" ||
|
||||
(os.Getenv("AWS_ACCESS_KEY_ID") != "" && os.Getenv("AWS_SECRET_ACCESS_KEY") != "") {
|
||||
@@ -284,7 +288,7 @@ func (o *PluginRegistry) Configure() (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
func (o *PluginRegistry) GetChatter(model string, modelContextLength int, strategy string, stream bool, dryRun bool) (ret *Chatter, err error) {
|
||||
func (o *PluginRegistry) GetChatter(model string, modelContextLength int, vendorName string, strategy string, stream bool, dryRun bool) (ret *Chatter, err error) {
|
||||
ret = &Chatter{
|
||||
db: o.Db,
|
||||
Stream: stream,
|
||||
@@ -313,14 +317,32 @@ func (o *PluginRegistry) GetChatter(model string, modelContextLength int, strate
|
||||
ret.model = defaultModel
|
||||
}
|
||||
} else if model == "" {
|
||||
ret.vendor = vendorManager.FindByName(defaultVendor)
|
||||
if vendorName != "" {
|
||||
ret.vendor = vendorManager.FindByName(vendorName)
|
||||
} else {
|
||||
ret.vendor = vendorManager.FindByName(defaultVendor)
|
||||
}
|
||||
ret.model = defaultModel
|
||||
} else {
|
||||
var models *ai.VendorsModels
|
||||
if models, err = vendorManager.GetModels(); err != nil {
|
||||
return
|
||||
}
|
||||
ret.vendor = vendorManager.FindByName(models.FindGroupsByItemFirst(model))
|
||||
if vendorName != "" {
|
||||
// ensure vendor exists and provides model
|
||||
ret.vendor = vendorManager.FindByName(vendorName)
|
||||
availableVendors := models.FindGroupsByItem(model)
|
||||
if ret.vendor == nil || !lo.Contains(availableVendors, vendorName) {
|
||||
err = fmt.Errorf("model %s not available for vendor %s", model, vendorName)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
availableVendors := models.FindGroupsByItem(model)
|
||||
if len(availableVendors) > 1 {
|
||||
fmt.Fprintf(os.Stderr, "Warning: multiple vendors provide model %s: %s. Using %s. Specify --vendor to select a vendor.\n", model, strings.Join(availableVendors, ", "), availableVendors[0])
|
||||
}
|
||||
ret.vendor = vendorManager.FindByName(models.FindGroupsByItemFirst(model))
|
||||
}
|
||||
ret.model = model
|
||||
}
|
||||
|
||||
|
||||
@@ -1,10 +1,19 @@
|
||||
package core
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"io"
|
||||
"os"
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/chat"
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
"github.com/danielmiessler/fabric/internal/plugins"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/db/fsdb"
|
||||
"github.com/danielmiessler/fabric/internal/tools"
|
||||
)
|
||||
|
||||
func TestSaveEnvFile(t *testing.T) {
|
||||
@@ -19,3 +28,63 @@ func TestSaveEnvFile(t *testing.T) {
|
||||
t.Fatalf("SaveEnvFile() error = %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
// testVendor implements ai.Vendor for testing purposes
|
||||
type testVendor struct {
|
||||
name string
|
||||
models []string
|
||||
}
|
||||
|
||||
func (m *testVendor) GetName() string { return m.name }
|
||||
func (m *testVendor) GetSetupDescription() string { return m.name }
|
||||
func (m *testVendor) IsConfigured() bool { return true }
|
||||
func (m *testVendor) Configure() error { return nil }
|
||||
func (m *testVendor) Setup() error { return nil }
|
||||
func (m *testVendor) SetupFillEnvFileContent(*bytes.Buffer) {}
|
||||
func (m *testVendor) ListModels() ([]string, error) { return m.models, nil }
|
||||
func (m *testVendor) SendStream([]*chat.ChatCompletionMessage, *domain.ChatOptions, chan string) error {
|
||||
return nil
|
||||
}
|
||||
func (m *testVendor) Send(context.Context, []*chat.ChatCompletionMessage, *domain.ChatOptions) (string, error) {
|
||||
return "", nil
|
||||
}
|
||||
func (m *testVendor) NeedsRawMode(string) bool { return false }
|
||||
|
||||
func TestGetChatter_WarnsOnAmbiguousModel(t *testing.T) {
|
||||
tempDir := t.TempDir()
|
||||
db := fsdb.NewDb(tempDir)
|
||||
|
||||
vendorA := &testVendor{name: "VendorA", models: []string{"shared-model"}}
|
||||
vendorB := &testVendor{name: "VendorB", models: []string{"shared-model"}}
|
||||
|
||||
vm := ai.NewVendorsManager()
|
||||
vm.AddVendors(vendorA, vendorB)
|
||||
|
||||
defaults := &tools.Defaults{
|
||||
PluginBase: &plugins.PluginBase{},
|
||||
Vendor: &plugins.Setting{Value: "VendorA"},
|
||||
Model: &plugins.SetupQuestion{Setting: &plugins.Setting{Value: "shared-model"}},
|
||||
ModelContextLength: &plugins.SetupQuestion{Setting: &plugins.Setting{Value: "0"}},
|
||||
}
|
||||
|
||||
registry := &PluginRegistry{Db: db, VendorManager: vm, Defaults: defaults}
|
||||
|
||||
r, w, _ := os.Pipe()
|
||||
oldStderr := os.Stderr
|
||||
os.Stderr = w
|
||||
defer func() { os.Stderr = oldStderr }()
|
||||
|
||||
chatter, err := registry.GetChatter("shared-model", 0, "", "", false, false)
|
||||
w.Close()
|
||||
warning, _ := io.ReadAll(r)
|
||||
|
||||
if err != nil {
|
||||
t.Fatalf("GetChatter() error = %v", err)
|
||||
}
|
||||
if chatter.vendor.GetName() != "VendorA" {
|
||||
t.Fatalf("expected vendor VendorA, got %s", chatter.vendor.GetName())
|
||||
}
|
||||
if !strings.Contains(string(warning), "multiple vendors provide model shared-model") {
|
||||
t.Fatalf("expected warning about multiple vendors, got %q", string(warning))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -4,6 +4,14 @@ import "github.com/danielmiessler/fabric/internal/chat"
|
||||
|
||||
const ChatMessageRoleMeta = "meta"
|
||||
|
||||
// Default values for chat options (must match cli/flags.go defaults)
|
||||
const (
|
||||
DefaultTemperature = 0.7
|
||||
DefaultTopP = 0.9
|
||||
DefaultPresencePenalty = 0.0
|
||||
DefaultFrequencyPenalty = 0.0
|
||||
)
|
||||
|
||||
type ChatRequest struct {
|
||||
ContextName string
|
||||
SessionName string
|
||||
@@ -17,25 +25,31 @@ type ChatRequest struct {
|
||||
}
|
||||
|
||||
type ChatOptions struct {
|
||||
Model string
|
||||
Temperature float64
|
||||
TopP float64
|
||||
PresencePenalty float64
|
||||
FrequencyPenalty float64
|
||||
Raw bool
|
||||
Seed int
|
||||
ModelContextLength int
|
||||
MaxTokens int
|
||||
Search bool
|
||||
SearchLocation string
|
||||
ImageFile string
|
||||
ImageSize string
|
||||
ImageQuality string
|
||||
ImageCompression int
|
||||
ImageBackground string
|
||||
SuppressThink bool
|
||||
ThinkStartTag string
|
||||
ThinkEndTag string
|
||||
Model string
|
||||
Temperature float64
|
||||
TopP float64
|
||||
PresencePenalty float64
|
||||
FrequencyPenalty float64
|
||||
Raw bool
|
||||
Seed int
|
||||
Thinking ThinkingLevel
|
||||
ModelContextLength int
|
||||
MaxTokens int
|
||||
Search bool
|
||||
SearchLocation string
|
||||
ImageFile string
|
||||
ImageSize string
|
||||
ImageQuality string
|
||||
ImageCompression int
|
||||
ImageBackground string
|
||||
SuppressThink bool
|
||||
ThinkStartTag string
|
||||
ThinkEndTag string
|
||||
AudioOutput bool
|
||||
AudioFormat string
|
||||
Voice string
|
||||
Notification bool
|
||||
NotificationCommand string
|
||||
}
|
||||
|
||||
// NormalizeMessages remove empty messages and ensure messages order user-assist-user
|
||||
|
||||
34
internal/domain/thinking.go
Normal file
34
internal/domain/thinking.go
Normal file
@@ -0,0 +1,34 @@
|
||||
package domain
|
||||
|
||||
// ThinkingLevel represents reasoning/thinking levels supported across providers.
|
||||
type ThinkingLevel string
|
||||
|
||||
const (
|
||||
ThinkingOff ThinkingLevel = "off"
|
||||
ThinkingLow ThinkingLevel = "low"
|
||||
ThinkingMedium ThinkingLevel = "medium"
|
||||
ThinkingHigh ThinkingLevel = "high"
|
||||
)
|
||||
|
||||
// ThinkingBudgets defines standardized token budgets for reasoning-enabled models.
|
||||
// The map assigns a maximum token count to each ThinkingLevel, representing the
|
||||
// amount of context or computation that can be used for reasoning at that level.
|
||||
// These values (e.g., 1024 for low, 2048 for medium, 4096 for high) are used to
|
||||
// Token budget constants for each ThinkingLevel.
|
||||
// These values are chosen to align with typical context window sizes for LLMs at different reasoning levels.
|
||||
// Adjust these if model capabilities change.
|
||||
const (
|
||||
// TokenBudgetLow is suitable for basic reasoning or smaller models (e.g., 1k context window).
|
||||
TokenBudgetLow int64 = 1024
|
||||
// TokenBudgetMedium is suitable for intermediate reasoning or mid-sized models (e.g., 2k context window).
|
||||
TokenBudgetMedium int64 = 2048
|
||||
// TokenBudgetHigh is suitable for advanced reasoning or large models (e.g., 4k context window).
|
||||
TokenBudgetHigh int64 = 4096
|
||||
)
|
||||
|
||||
// ThinkingBudgets defines standardized token budgets for reasoning-enabled models.
|
||||
var ThinkingBudgets = map[ThinkingLevel]int64{
|
||||
ThinkingLow: TokenBudgetLow,
|
||||
ThinkingMedium: TokenBudgetMedium,
|
||||
ThinkingHigh: TokenBudgetHigh,
|
||||
}
|
||||
@@ -4,6 +4,8 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"net/http"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/anthropics/anthropic-sdk-go"
|
||||
@@ -46,6 +48,11 @@ func NewClient() (ret *Client) {
|
||||
string(anthropic.ModelClaude_3_5_Sonnet_20240620), string(anthropic.ModelClaude3OpusLatest),
|
||||
string(anthropic.ModelClaude_3_Opus_20240229), string(anthropic.ModelClaude_3_Haiku_20240307),
|
||||
string(anthropic.ModelClaudeOpus4_20250514), string(anthropic.ModelClaudeSonnet4_20250514),
|
||||
string(anthropic.ModelClaudeOpus4_1_20250805),
|
||||
}
|
||||
|
||||
ret.modelBetas = map[string][]string{
|
||||
string(anthropic.ModelClaudeSonnet4_20250514): {"context-1m-2025-08-07"},
|
||||
}
|
||||
|
||||
return
|
||||
@@ -92,6 +99,7 @@ type Client struct {
|
||||
maxTokens int
|
||||
defaultRequiredUserMessage string
|
||||
models []string
|
||||
modelBetas map[string][]string
|
||||
|
||||
client anthropic.Client
|
||||
}
|
||||
@@ -147,6 +155,26 @@ func (an *Client) ListModels() (ret []string, err error) {
|
||||
return an.models, nil
|
||||
}
|
||||
|
||||
func parseThinking(level domain.ThinkingLevel) (anthropic.ThinkingConfigParamUnion, bool) {
|
||||
lower := strings.ToLower(string(level))
|
||||
switch domain.ThinkingLevel(lower) {
|
||||
case domain.ThinkingOff:
|
||||
disabled := anthropic.NewThinkingConfigDisabledParam()
|
||||
return anthropic.ThinkingConfigParamUnion{OfDisabled: &disabled}, true
|
||||
case domain.ThinkingLow, domain.ThinkingMedium, domain.ThinkingHigh:
|
||||
if budget, ok := domain.ThinkingBudgets[domain.ThinkingLevel(lower)]; ok {
|
||||
return anthropic.ThinkingConfigParamOfEnabled(budget), true
|
||||
}
|
||||
default:
|
||||
if tokens, err := strconv.ParseInt(lower, 10, 64); err == nil {
|
||||
if tokens >= 1 && tokens <= 10000 {
|
||||
return anthropic.ThinkingConfigParamOfEnabled(tokens), true
|
||||
}
|
||||
}
|
||||
}
|
||||
return anthropic.ThinkingConfigParamUnion{}, false
|
||||
}
|
||||
|
||||
func (an *Client) SendStream(
|
||||
msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string,
|
||||
) (err error) {
|
||||
@@ -159,7 +187,17 @@ func (an *Client) SendStream(
|
||||
|
||||
ctx := context.Background()
|
||||
|
||||
stream := an.client.Messages.NewStreaming(ctx, an.buildMessageParams(messages, opts))
|
||||
params := an.buildMessageParams(messages, opts)
|
||||
betas := an.modelBetas[opts.Model]
|
||||
var reqOpts []option.RequestOption
|
||||
if len(betas) > 0 {
|
||||
reqOpts = append(reqOpts, option.WithHeader("anthropic-beta", strings.Join(betas, ",")))
|
||||
}
|
||||
stream := an.client.Messages.NewStreaming(ctx, params, reqOpts...)
|
||||
if stream.Err() != nil && len(betas) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "Anthropic beta feature %s failed: %v\n", strings.Join(betas, ","), stream.Err())
|
||||
stream = an.client.Messages.NewStreaming(ctx, params)
|
||||
}
|
||||
|
||||
for stream.Next() {
|
||||
event := stream.Current()
|
||||
@@ -181,11 +219,19 @@ func (an *Client) buildMessageParams(msgs []anthropic.MessageParam, opts *domain
|
||||
params anthropic.MessageNewParams) {
|
||||
|
||||
params = anthropic.MessageNewParams{
|
||||
Model: anthropic.Model(opts.Model),
|
||||
MaxTokens: int64(an.maxTokens),
|
||||
TopP: anthropic.Opt(opts.TopP),
|
||||
Temperature: anthropic.Opt(opts.Temperature),
|
||||
Messages: msgs,
|
||||
Model: anthropic.Model(opts.Model),
|
||||
MaxTokens: int64(an.maxTokens),
|
||||
Messages: msgs,
|
||||
}
|
||||
|
||||
// Only set one of Temperature or TopP as some models don't allow both
|
||||
// Always set temperature to ensure consistent behavior (Anthropic default is 1.0, Fabric default is 0.7)
|
||||
if opts.TopP != domain.DefaultTopP {
|
||||
// User explicitly set TopP, so use that instead of temperature
|
||||
params.TopP = anthropic.Opt(opts.TopP)
|
||||
} else {
|
||||
// Use temperature (always set to ensure Fabric's default of 0.7, not Anthropic's 1.0)
|
||||
params.Temperature = anthropic.Opt(opts.Temperature)
|
||||
}
|
||||
|
||||
// Add Claude Code spoofing system message for OAuth authentication
|
||||
@@ -217,6 +263,11 @@ func (an *Client) buildMessageParams(msgs []anthropic.MessageParam, opts *domain
|
||||
{OfWebSearchTool20250305: &webTool},
|
||||
}
|
||||
}
|
||||
|
||||
if t, ok := parseThinking(opts.Thinking); ok {
|
||||
params.Thinking = t
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -230,8 +281,21 @@ func (an *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage,
|
||||
}
|
||||
|
||||
var message *anthropic.Message
|
||||
if message, err = an.client.Messages.New(ctx, an.buildMessageParams(messages, opts)); err != nil {
|
||||
return
|
||||
params := an.buildMessageParams(messages, opts)
|
||||
betas := an.modelBetas[opts.Model]
|
||||
var reqOpts []option.RequestOption
|
||||
if len(betas) > 0 {
|
||||
reqOpts = append(reqOpts, option.WithHeader("anthropic-beta", strings.Join(betas, ",")))
|
||||
}
|
||||
if message, err = an.client.Messages.New(ctx, params, reqOpts...); err != nil {
|
||||
if len(betas) > 0 {
|
||||
fmt.Fprintf(os.Stderr, "Anthropic beta feature %s failed: %v\n", strings.Join(betas, ","), err)
|
||||
if message, err = an.client.Messages.New(ctx, params); err != nil {
|
||||
return
|
||||
}
|
||||
} else {
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
var textParts []string
|
||||
|
||||
@@ -72,7 +72,8 @@ func TestBuildMessageParams_WithoutSearch(t *testing.T) {
|
||||
client := NewClient()
|
||||
opts := &domain.ChatOptions{
|
||||
Model: "claude-3-5-sonnet-latest",
|
||||
Temperature: 0.7,
|
||||
Temperature: 0.8, // Use non-default value to ensure it gets set
|
||||
TopP: domain.DefaultTopP, // Use default TopP so temperature takes precedence
|
||||
Search: false,
|
||||
}
|
||||
|
||||
@@ -90,6 +91,7 @@ func TestBuildMessageParams_WithoutSearch(t *testing.T) {
|
||||
t.Errorf("Expected model %s, got %s", opts.Model, params.Model)
|
||||
}
|
||||
|
||||
// When using non-default temperature, it should be set in params
|
||||
if params.Temperature.Value != opts.Temperature {
|
||||
t.Errorf("Expected temperature %f, got %f", opts.Temperature, params.Temperature.Value)
|
||||
}
|
||||
@@ -99,7 +101,8 @@ func TestBuildMessageParams_WithSearch(t *testing.T) {
|
||||
client := NewClient()
|
||||
opts := &domain.ChatOptions{
|
||||
Model: "claude-3-5-sonnet-latest",
|
||||
Temperature: 0.7,
|
||||
Temperature: 0.8, // Use non-default value
|
||||
TopP: domain.DefaultTopP, // Use default TopP so temperature takes precedence
|
||||
Search: true,
|
||||
}
|
||||
|
||||
@@ -135,7 +138,8 @@ func TestBuildMessageParams_WithSearchAndLocation(t *testing.T) {
|
||||
client := NewClient()
|
||||
opts := &domain.ChatOptions{
|
||||
Model: "claude-3-5-sonnet-latest",
|
||||
Temperature: 0.7,
|
||||
Temperature: 0.8, // Use non-default value
|
||||
TopP: domain.DefaultTopP, // Use default TopP so temperature takes precedence
|
||||
Search: true,
|
||||
SearchLocation: "America/Los_Angeles",
|
||||
}
|
||||
@@ -164,6 +168,15 @@ func TestBuildMessageParams_WithSearchAndLocation(t *testing.T) {
|
||||
}
|
||||
}
|
||||
|
||||
func TestModelBetasConfiguration(t *testing.T) {
|
||||
client := NewClient()
|
||||
model := string(anthropic.ModelClaudeSonnet4_20250514)
|
||||
betas, ok := client.modelBetas[model]
|
||||
if !ok || len(betas) != 1 || betas[0] != "context-1m-2025-08-07" {
|
||||
t.Errorf("expected beta mapping for %s", model)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCitationFormatting(t *testing.T) {
|
||||
// Test the citation formatting logic by creating a mock message with citations
|
||||
message := &anthropic.Message{
|
||||
@@ -256,3 +269,59 @@ func TestCitationFormatting(t *testing.T) {
|
||||
t.Errorf("Expected 2 unique citations, got %d", citationCount)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMessageParams_DefaultValues(t *testing.T) {
|
||||
client := NewClient()
|
||||
|
||||
// Test with default temperature - should always set temperature unless TopP is explicitly set
|
||||
opts := &domain.ChatOptions{
|
||||
Model: "claude-3-5-sonnet-latest",
|
||||
Temperature: domain.DefaultTemperature, // 0.7 - should be set to override Anthropic's 1.0 default
|
||||
TopP: domain.DefaultTopP, // 0.9 - default, so temperature takes precedence
|
||||
Search: false,
|
||||
}
|
||||
|
||||
messages := []anthropic.MessageParam{
|
||||
anthropic.NewUserMessage(anthropic.NewTextBlock("Hello")),
|
||||
}
|
||||
|
||||
params := client.buildMessageParams(messages, opts)
|
||||
|
||||
// Temperature should be set when using default value to override Anthropic's 1.0 default
|
||||
if params.Temperature.Value != opts.Temperature {
|
||||
t.Errorf("Expected temperature %f, got %f", opts.Temperature, params.Temperature.Value)
|
||||
}
|
||||
|
||||
// TopP should not be set when using default value (temperature takes precedence)
|
||||
if params.TopP.Value != 0 {
|
||||
t.Errorf("Expected TopP to not be set (0), but got %f", params.TopP.Value)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildMessageParams_ExplicitTopP(t *testing.T) {
|
||||
client := NewClient()
|
||||
|
||||
// Test with explicit TopP - should set TopP instead of temperature
|
||||
opts := &domain.ChatOptions{
|
||||
Model: "claude-3-5-sonnet-latest",
|
||||
Temperature: domain.DefaultTemperature, // 0.7 - ignored when TopP is explicitly set
|
||||
TopP: 0.5, // Non-default - should be set
|
||||
Search: false,
|
||||
}
|
||||
|
||||
messages := []anthropic.MessageParam{
|
||||
anthropic.NewUserMessage(anthropic.NewTextBlock("Hello")),
|
||||
}
|
||||
|
||||
params := client.buildMessageParams(messages, opts)
|
||||
|
||||
// Temperature should not be set when TopP is explicitly set
|
||||
if params.Temperature.Value != 0 {
|
||||
t.Errorf("Expected temperature to not be set (0), but got %f", params.Temperature.Value)
|
||||
}
|
||||
|
||||
// TopP should be set when using non-default value
|
||||
if params.TopP.Value != opts.TopP {
|
||||
t.Errorf("Expected TopP %f, got %f", opts.TopP, params.TopP.Value)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -46,8 +46,13 @@ func (t *OAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
// Add OAuth Bearer token
|
||||
newReq.Header.Set("Authorization", "Bearer "+token)
|
||||
|
||||
// Add the anthropic-beta header for OAuth
|
||||
newReq.Header.Set("anthropic-beta", "oauth-2025-04-20")
|
||||
// Add the anthropic-beta header for OAuth, preserving existing betas
|
||||
existing := newReq.Header.Get("anthropic-beta")
|
||||
beta := "oauth-2025-04-20"
|
||||
if existing != "" {
|
||||
beta = existing + "," + beta
|
||||
}
|
||||
newReq.Header.Set("anthropic-beta", beta)
|
||||
|
||||
// Set User-Agent to match AI SDK exactly
|
||||
newReq.Header.Set("User-Agent", "ai-sdk/anthropic")
|
||||
|
||||
@@ -153,7 +153,7 @@ func (c *BedrockClient) ListModels() ([]string, error) {
|
||||
return models, nil
|
||||
}
|
||||
|
||||
// SendStream sends the messages to the the Bedrock ConverseStream API
|
||||
// SendStream sends the messages to the Bedrock ConverseStream API
|
||||
func (c *BedrockClient) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string) (err error) {
|
||||
// Ensure channel is closed on all exit paths to prevent goroutine leaks
|
||||
defer func() {
|
||||
|
||||
@@ -87,6 +87,9 @@ func (c *Client) formatOptions(opts *domain.ChatOptions) string {
|
||||
if opts.ImageFile != "" {
|
||||
builder.WriteString(fmt.Sprintf("ImageFile: %s\n", opts.ImageFile))
|
||||
}
|
||||
if opts.Thinking != "" {
|
||||
builder.WriteString(fmt.Sprintf("Thinking: %s\n", string(opts.Thinking)))
|
||||
}
|
||||
if opts.SuppressThink {
|
||||
builder.WriteString("SuppressThink: enabled\n")
|
||||
builder.WriteString(fmt.Sprintf("Thinking Start Tag: %s\n", opts.ThinkStartTag))
|
||||
|
||||
@@ -13,6 +13,7 @@ func NewClient() (ret *Client) {
|
||||
ret = &Client{}
|
||||
ret.Client = openai.NewClientCompatibleNoSetupQuestions("Exolab", ret.configure)
|
||||
|
||||
ret.ApiKey = ret.AddSetupQuestion("API Key", false)
|
||||
ret.ApiBaseURL = ret.AddSetupQuestion("API Base URL", true)
|
||||
ret.ApiBaseURL.Value = "http://localhost:52415"
|
||||
|
||||
|
||||
@@ -1,21 +1,50 @@
|
||||
package gemini
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"regexp"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/chat"
|
||||
"github.com/danielmiessler/fabric/internal/plugins"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
"github.com/google/generative-ai-go/genai"
|
||||
"google.golang.org/api/iterator"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/genai"
|
||||
)
|
||||
|
||||
const modelsNamePrefix = "models/"
|
||||
// WAV audio constants
|
||||
const (
|
||||
DefaultChannels = 1
|
||||
DefaultSampleRate = 24000
|
||||
DefaultBitsPerSample = 16
|
||||
WAVHeaderSize = 44
|
||||
RIFFHeaderSize = 36
|
||||
MaxAudioDataSize = 100 * 1024 * 1024 // 100MB limit for security
|
||||
MinAudioDataSize = 44 // Minimum viable audio data
|
||||
AudioDataPrefix = "FABRIC_AUDIO_DATA:"
|
||||
)
|
||||
|
||||
const (
|
||||
citationHeader = "\n\n## Sources\n\n"
|
||||
citationSeparator = "\n"
|
||||
citationFormat = "- [%s](%s)"
|
||||
|
||||
errInvalidLocationFormat = "invalid search location format %q: must be timezone (e.g., 'America/Los_Angeles') or language code (e.g., 'en-US')"
|
||||
locationSeparator = "/"
|
||||
langCodeSeparator = "_"
|
||||
langCodeNormalizedSep = "-"
|
||||
|
||||
modelPrefix = "models/"
|
||||
modelTypeTTS = "tts"
|
||||
modelTypePreviewTTS = "preview-tts"
|
||||
modelTypeTextToSpeech = "text-to-speech"
|
||||
)
|
||||
|
||||
var langCodeRegex = regexp.MustCompile(`^[a-z]{2}(-[A-Z]{2})?$`)
|
||||
|
||||
func NewClient() (ret *Client) {
|
||||
vendorName := "Gemini"
|
||||
@@ -39,107 +68,102 @@ type Client struct {
|
||||
func (o *Client) ListModels() (ret []string, err error) {
|
||||
ctx := context.Background()
|
||||
var client *genai.Client
|
||||
if client, err = genai.NewClient(ctx, option.WithAPIKey(o.ApiKey.Value)); err != nil {
|
||||
if client, err = genai.NewClient(ctx, &genai.ClientConfig{
|
||||
APIKey: o.ApiKey.Value,
|
||||
Backend: genai.BackendGeminiAPI,
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
iter := client.ListModels(ctx)
|
||||
for {
|
||||
var resp *genai.ModelInfo
|
||||
if resp, err = iter.Next(); err != nil {
|
||||
if errors.Is(err, iterator.Done) {
|
||||
err = nil
|
||||
}
|
||||
break
|
||||
}
|
||||
// List available models using the correct API
|
||||
resp, err := client.Models.List(ctx, &genai.ListModelsConfig{})
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
name := o.buildModelNameSimple(resp.Name)
|
||||
ret = append(ret, name)
|
||||
for _, model := range resp.Items {
|
||||
// Strip the "models/" prefix for user convenience
|
||||
modelName := strings.TrimPrefix(model.Name, "models/")
|
||||
ret = append(ret, modelName)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions) (ret string, err error) {
|
||||
systemInstruction, messages := toMessages(msgs)
|
||||
// Check if this is a TTS model request
|
||||
if o.isTTSModel(opts.Model) {
|
||||
if !opts.AudioOutput {
|
||||
err = fmt.Errorf("TTS model '%s' requires audio output. Please specify an audio output file with -o flag ending in .wav", opts.Model)
|
||||
return
|
||||
}
|
||||
|
||||
// Handle TTS generation
|
||||
return o.generateTTSAudio(ctx, msgs, opts)
|
||||
}
|
||||
|
||||
// Regular text generation
|
||||
var client *genai.Client
|
||||
if client, err = genai.NewClient(ctx, option.WithAPIKey(o.ApiKey.Value)); err != nil {
|
||||
return
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
model := client.GenerativeModel(o.buildModelNameFull(opts.Model))
|
||||
model.SetTemperature(float32(opts.Temperature))
|
||||
model.SetTopP(float32(opts.TopP))
|
||||
model.SystemInstruction = systemInstruction
|
||||
|
||||
var response *genai.GenerateContentResponse
|
||||
if response, err = model.GenerateContent(ctx, messages...); err != nil {
|
||||
if client, err = genai.NewClient(ctx, &genai.ClientConfig{
|
||||
APIKey: o.ApiKey.Value,
|
||||
Backend: genai.BackendGeminiAPI,
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
ret = o.extractText(response)
|
||||
// Convert messages to new SDK format
|
||||
contents := o.convertMessages(msgs)
|
||||
|
||||
cfg, err := o.buildGenerateContentConfig(opts)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Generate content with optional tools
|
||||
response, err := client.Models.GenerateContent(ctx, o.buildModelNameFull(opts.Model), contents, cfg)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Extract text from response
|
||||
ret = o.extractTextFromResponse(response)
|
||||
return
|
||||
}
|
||||
|
||||
func (o *Client) buildModelNameSimple(fullModelName string) string {
|
||||
return strings.TrimPrefix(fullModelName, modelsNamePrefix)
|
||||
}
|
||||
|
||||
func (o *Client) buildModelNameFull(modelName string) string {
|
||||
return fmt.Sprintf("%v%v", modelsNamePrefix, modelName)
|
||||
}
|
||||
|
||||
func (o *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string) (err error) {
|
||||
ctx := context.Background()
|
||||
var client *genai.Client
|
||||
if client, err = genai.NewClient(ctx, option.WithAPIKey(o.ApiKey.Value)); err != nil {
|
||||
if client, err = genai.NewClient(ctx, &genai.ClientConfig{
|
||||
APIKey: o.ApiKey.Value,
|
||||
Backend: genai.BackendGeminiAPI,
|
||||
}); err != nil {
|
||||
return
|
||||
}
|
||||
defer client.Close()
|
||||
|
||||
systemInstruction, messages := toMessages(msgs)
|
||||
// Convert messages to new SDK format
|
||||
contents := o.convertMessages(msgs)
|
||||
|
||||
model := client.GenerativeModel(o.buildModelNameFull(opts.Model))
|
||||
model.SetTemperature(float32(opts.Temperature))
|
||||
model.SetTopP(float32(opts.TopP))
|
||||
model.SystemInstruction = systemInstruction
|
||||
cfg, err := o.buildGenerateContentConfig(opts)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
iter := model.GenerateContentStream(ctx, messages...)
|
||||
for {
|
||||
if resp, iterErr := iter.Next(); iterErr == nil {
|
||||
for _, candidate := range resp.Candidates {
|
||||
if candidate.Content != nil {
|
||||
for _, part := range candidate.Content.Parts {
|
||||
if text, ok := part.(genai.Text); ok {
|
||||
channel <- string(text)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
if !errors.Is(iterErr, iterator.Done) {
|
||||
channel <- fmt.Sprintf("%v\n", iterErr)
|
||||
}
|
||||
// Generate streaming content with optional tools
|
||||
stream := client.Models.GenerateContentStream(ctx, o.buildModelNameFull(opts.Model), contents, cfg)
|
||||
|
||||
for response, err := range stream {
|
||||
if err != nil {
|
||||
channel <- fmt.Sprintf("Error: %v\n", err)
|
||||
close(channel)
|
||||
break
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (o *Client) extractText(response *genai.GenerateContentResponse) (ret string) {
|
||||
for _, candidate := range response.Candidates {
|
||||
if candidate.Content == nil {
|
||||
break
|
||||
}
|
||||
for _, part := range candidate.Content.Parts {
|
||||
if text, ok := part.(genai.Text); ok {
|
||||
ret += string(text)
|
||||
}
|
||||
text := o.extractTextFromResponse(response)
|
||||
if text != "" {
|
||||
channel <- text
|
||||
}
|
||||
}
|
||||
close(channel)
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
@@ -147,18 +171,377 @@ func (o *Client) NeedsRawMode(modelName string) bool {
|
||||
return false
|
||||
}
|
||||
|
||||
func toMessages(msgs []*chat.ChatCompletionMessage) (systemInstruction *genai.Content, messages []genai.Part) {
|
||||
if len(msgs) >= 2 {
|
||||
systemInstruction = &genai.Content{
|
||||
Parts: []genai.Part{
|
||||
genai.Text(msgs[0].Content),
|
||||
},
|
||||
func parseThinkingConfig(level domain.ThinkingLevel) (*genai.ThinkingConfig, bool) {
|
||||
lower := strings.ToLower(strings.TrimSpace(string(level)))
|
||||
switch domain.ThinkingLevel(lower) {
|
||||
case "", domain.ThinkingOff:
|
||||
return nil, false
|
||||
case domain.ThinkingLow, domain.ThinkingMedium, domain.ThinkingHigh:
|
||||
if budget, ok := domain.ThinkingBudgets[domain.ThinkingLevel(lower)]; ok {
|
||||
b := int32(budget)
|
||||
return &genai.ThinkingConfig{IncludeThoughts: true, ThinkingBudget: &b}, true
|
||||
}
|
||||
for _, msg := range msgs[1:] {
|
||||
messages = append(messages, genai.Text(msg.Content))
|
||||
default:
|
||||
if tokens, err := strconv.ParseInt(lower, 10, 32); err == nil && tokens > 0 {
|
||||
t := int32(tokens)
|
||||
return &genai.ThinkingConfig{IncludeThoughts: true, ThinkingBudget: &t}, true
|
||||
}
|
||||
} else {
|
||||
messages = append(messages, genai.Text(msgs[0].Content))
|
||||
}
|
||||
return
|
||||
return nil, false
|
||||
}
|
||||
|
||||
// buildGenerateContentConfig constructs the generation config with optional tools.
|
||||
// When search is enabled it injects the Google Search tool. The optional search
|
||||
// location accepts either:
|
||||
// - A timezone in the format "Continent/City" (e.g., "America/Los_Angeles")
|
||||
// - An ISO language code "ll" or "ll-CC" (e.g., "en" or "en-US")
|
||||
//
|
||||
// Underscores are normalized to hyphens. Returns an error if the location is
|
||||
// invalid.
|
||||
func (o *Client) buildGenerateContentConfig(opts *domain.ChatOptions) (*genai.GenerateContentConfig, error) {
|
||||
temperature := float32(opts.Temperature)
|
||||
topP := float32(opts.TopP)
|
||||
cfg := &genai.GenerateContentConfig{
|
||||
Temperature: &temperature,
|
||||
TopP: &topP,
|
||||
MaxOutputTokens: int32(opts.ModelContextLength),
|
||||
}
|
||||
|
||||
if opts.Search {
|
||||
cfg.Tools = []*genai.Tool{{GoogleSearch: &genai.GoogleSearch{}}}
|
||||
if loc := opts.SearchLocation; loc != "" {
|
||||
if isValidLocationFormat(loc) {
|
||||
loc = normalizeLocation(loc)
|
||||
cfg.ToolConfig = &genai.ToolConfig{
|
||||
RetrievalConfig: &genai.RetrievalConfig{LanguageCode: loc},
|
||||
}
|
||||
} else {
|
||||
return nil, fmt.Errorf(errInvalidLocationFormat, loc)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
if tc, ok := parseThinkingConfig(opts.Thinking); ok {
|
||||
cfg.ThinkingConfig = tc
|
||||
}
|
||||
|
||||
return cfg, nil
|
||||
}
|
||||
|
||||
// buildModelNameFull adds the "models/" prefix for API calls
|
||||
func (o *Client) buildModelNameFull(modelName string) string {
|
||||
if strings.HasPrefix(modelName, modelPrefix) {
|
||||
return modelName
|
||||
}
|
||||
return modelPrefix + modelName
|
||||
}
|
||||
|
||||
func isValidLocationFormat(location string) bool {
|
||||
if strings.Contains(location, locationSeparator) {
|
||||
parts := strings.Split(location, locationSeparator)
|
||||
return len(parts) == 2 && parts[0] != "" && parts[1] != ""
|
||||
}
|
||||
return isValidLanguageCode(location)
|
||||
}
|
||||
|
||||
func normalizeLocation(location string) string {
|
||||
if strings.Contains(location, locationSeparator) {
|
||||
return location
|
||||
}
|
||||
return strings.Replace(location, langCodeSeparator, langCodeNormalizedSep, 1)
|
||||
}
|
||||
|
||||
// isValidLanguageCode reports whether the input is an ISO 639-1 language code
|
||||
// optionally followed by an ISO 3166-1 country code. Underscores are
|
||||
// normalized to hyphens before validation.
|
||||
func isValidLanguageCode(code string) bool {
|
||||
normalized := strings.Replace(code, langCodeSeparator, langCodeNormalizedSep, 1)
|
||||
parts := strings.Split(normalized, langCodeNormalizedSep)
|
||||
switch len(parts) {
|
||||
case 1:
|
||||
return langCodeRegex.MatchString(strings.ToLower(parts[0]))
|
||||
case 2:
|
||||
formatted := strings.ToLower(parts[0]) + langCodeNormalizedSep + strings.ToUpper(parts[1])
|
||||
return langCodeRegex.MatchString(formatted)
|
||||
default:
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
// isTTSModel checks if the model is a text-to-speech model
|
||||
func (o *Client) isTTSModel(modelName string) bool {
|
||||
lowerModel := strings.ToLower(modelName)
|
||||
return strings.Contains(lowerModel, modelTypeTTS) ||
|
||||
strings.Contains(lowerModel, modelTypePreviewTTS) ||
|
||||
strings.Contains(lowerModel, modelTypeTextToSpeech)
|
||||
}
|
||||
|
||||
// extractTextForTTS extracts text content from chat messages for TTS generation
|
||||
func (o *Client) extractTextForTTS(msgs []*chat.ChatCompletionMessage) (string, error) {
|
||||
for i := len(msgs) - 1; i >= 0; i-- {
|
||||
if msgs[i].Role == chat.ChatMessageRoleUser && msgs[i].Content != "" {
|
||||
return msgs[i].Content, nil
|
||||
}
|
||||
}
|
||||
return "", fmt.Errorf("no text content found for TTS generation")
|
||||
}
|
||||
|
||||
// createGenaiClient creates a new GenAI client for TTS operations
|
||||
func (o *Client) createGenaiClient(ctx context.Context) (*genai.Client, error) {
|
||||
return genai.NewClient(ctx, &genai.ClientConfig{
|
||||
APIKey: o.ApiKey.Value,
|
||||
Backend: genai.BackendGeminiAPI,
|
||||
})
|
||||
}
|
||||
|
||||
// generateTTSAudio handles TTS audio generation using the new SDK
|
||||
func (o *Client) generateTTSAudio(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions) (ret string, err error) {
|
||||
textToSpeak, err := o.extractTextForTTS(msgs)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
// Validate voice name before making API call
|
||||
if opts.Voice != "" && !IsValidGeminiVoice(opts.Voice) {
|
||||
validVoices := GetGeminiVoiceNames()
|
||||
return "", fmt.Errorf("invalid voice '%s'. Valid voices are: %v", opts.Voice, validVoices)
|
||||
}
|
||||
|
||||
client, err := o.createGenaiClient(ctx)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return o.performTTSGeneration(ctx, client, textToSpeak, opts)
|
||||
}
|
||||
|
||||
// performTTSGeneration performs the actual TTS generation and audio processing
|
||||
func (o *Client) performTTSGeneration(ctx context.Context, client *genai.Client, textToSpeak string, opts *domain.ChatOptions) (string, error) {
|
||||
|
||||
// Create content for TTS
|
||||
contents := []*genai.Content{{
|
||||
Parts: []*genai.Part{{Text: textToSpeak}},
|
||||
}}
|
||||
|
||||
// Configure for TTS generation
|
||||
voiceName := opts.Voice
|
||||
if voiceName == "" {
|
||||
voiceName = "Kore" // Default voice if none specified
|
||||
}
|
||||
|
||||
config := &genai.GenerateContentConfig{
|
||||
ResponseModalities: []string{"AUDIO"},
|
||||
SpeechConfig: &genai.SpeechConfig{
|
||||
VoiceConfig: &genai.VoiceConfig{
|
||||
PrebuiltVoiceConfig: &genai.PrebuiltVoiceConfig{
|
||||
VoiceName: voiceName,
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
// Generate TTS content
|
||||
response, err := client.Models.GenerateContent(ctx, o.buildModelNameFull(opts.Model), contents, config)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("TTS generation failed: %w", err)
|
||||
}
|
||||
|
||||
// Extract and process audio data
|
||||
if len(response.Candidates) > 0 && response.Candidates[0].Content != nil && len(response.Candidates[0].Content.Parts) > 0 {
|
||||
part := response.Candidates[0].Content.Parts[0]
|
||||
if part.InlineData != nil && len(part.InlineData.Data) > 0 {
|
||||
// Validate audio data format and size
|
||||
if part.InlineData.MIMEType != "" && !strings.HasPrefix(part.InlineData.MIMEType, "audio/") {
|
||||
return "", fmt.Errorf("unexpected data type: %s, expected audio data", part.InlineData.MIMEType)
|
||||
}
|
||||
|
||||
pcmData := part.InlineData.Data
|
||||
if len(pcmData) < MinAudioDataSize {
|
||||
return "", fmt.Errorf("audio data too small: %d bytes, minimum required: %d", len(pcmData), MinAudioDataSize)
|
||||
}
|
||||
|
||||
// Generate WAV file with proper headers and return the binary data
|
||||
wavData, err := o.generateWAVFile(pcmData)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to generate WAV file: %w", err)
|
||||
}
|
||||
|
||||
// Validate generated WAV data
|
||||
if len(wavData) < WAVHeaderSize {
|
||||
return "", fmt.Errorf("generated WAV data is invalid: %d bytes, minimum required: %d", len(wavData), WAVHeaderSize)
|
||||
}
|
||||
|
||||
// Store the binary audio data in a special format that the CLI can detect
|
||||
// Use more efficient string concatenation
|
||||
return AudioDataPrefix + string(wavData), nil
|
||||
}
|
||||
}
|
||||
|
||||
return "", fmt.Errorf("no audio data received from TTS model")
|
||||
}
|
||||
|
||||
// generateWAVFile creates WAV data from PCM data with proper headers
|
||||
func (o *Client) generateWAVFile(pcmData []byte) ([]byte, error) {
|
||||
// Validate input size to prevent potential security issues
|
||||
if len(pcmData) == 0 {
|
||||
return nil, fmt.Errorf("empty PCM data provided")
|
||||
}
|
||||
if len(pcmData) > MaxAudioDataSize {
|
||||
return nil, fmt.Errorf("PCM data too large: %d bytes, maximum allowed: %d", len(pcmData), MaxAudioDataSize)
|
||||
}
|
||||
|
||||
// WAV file parameters (Gemini TTS default specs)
|
||||
channels := DefaultChannels
|
||||
sampleRate := DefaultSampleRate
|
||||
bitsPerSample := DefaultBitsPerSample
|
||||
|
||||
// Calculate required values
|
||||
byteRate := sampleRate * channels * bitsPerSample / 8
|
||||
blockAlign := channels * bitsPerSample / 8
|
||||
dataLen := uint32(len(pcmData))
|
||||
riffSize := RIFFHeaderSize + dataLen
|
||||
|
||||
// Pre-allocate buffer with known size for better performance
|
||||
totalSize := int(riffSize + 8) // +8 for RIFF header
|
||||
buf := bytes.NewBuffer(make([]byte, 0, totalSize))
|
||||
|
||||
// RIFF header
|
||||
buf.WriteString("RIFF")
|
||||
binary.Write(buf, binary.LittleEndian, riffSize)
|
||||
buf.WriteString("WAVE")
|
||||
|
||||
// fmt chunk
|
||||
buf.WriteString("fmt ")
|
||||
binary.Write(buf, binary.LittleEndian, uint32(16)) // subchunk1Size
|
||||
binary.Write(buf, binary.LittleEndian, uint16(1)) // audioFormat = PCM
|
||||
binary.Write(buf, binary.LittleEndian, uint16(channels)) // numChannels
|
||||
binary.Write(buf, binary.LittleEndian, uint32(sampleRate)) // sampleRate
|
||||
binary.Write(buf, binary.LittleEndian, uint32(byteRate)) // byteRate
|
||||
binary.Write(buf, binary.LittleEndian, uint16(blockAlign)) // blockAlign
|
||||
binary.Write(buf, binary.LittleEndian, uint16(bitsPerSample)) // bitsPerSample
|
||||
|
||||
// data chunk
|
||||
buf.WriteString("data")
|
||||
binary.Write(buf, binary.LittleEndian, dataLen)
|
||||
|
||||
// Write PCM data to buffer
|
||||
buf.Write(pcmData)
|
||||
|
||||
// Validate generated WAV data
|
||||
result := buf.Bytes()
|
||||
if len(result) < WAVHeaderSize {
|
||||
return nil, fmt.Errorf("generated WAV data is invalid: %d bytes, minimum required: %d", len(result), WAVHeaderSize)
|
||||
}
|
||||
|
||||
return result, nil
|
||||
}
|
||||
|
||||
// convertMessages converts fabric chat messages to genai Content format
|
||||
func (o *Client) convertMessages(msgs []*chat.ChatCompletionMessage) []*genai.Content {
|
||||
var contents []*genai.Content
|
||||
|
||||
for _, msg := range msgs {
|
||||
content := &genai.Content{Parts: []*genai.Part{}}
|
||||
|
||||
switch msg.Role {
|
||||
case chat.ChatMessageRoleAssistant:
|
||||
content.Role = "model"
|
||||
case chat.ChatMessageRoleUser:
|
||||
content.Role = "user"
|
||||
case chat.ChatMessageRoleSystem, chat.ChatMessageRoleDeveloper, chat.ChatMessageRoleFunction, chat.ChatMessageRoleTool:
|
||||
// Gemini's API only accepts "user" and "model" roles.
|
||||
// Map all other roles to "user" to preserve instruction context.
|
||||
content.Role = "user"
|
||||
default:
|
||||
content.Role = "user"
|
||||
}
|
||||
|
||||
if msg.Content != "" {
|
||||
content.Parts = append(content.Parts, &genai.Part{Text: msg.Content})
|
||||
}
|
||||
|
||||
// Handle multi-content messages (images, etc.)
|
||||
for _, part := range msg.MultiContent {
|
||||
switch part.Type {
|
||||
case chat.ChatMessagePartTypeText:
|
||||
content.Parts = append(content.Parts, &genai.Part{Text: part.Text})
|
||||
case chat.ChatMessagePartTypeImageURL:
|
||||
// TODO: Handle image URLs if needed
|
||||
// This would require downloading and converting to inline data
|
||||
}
|
||||
}
|
||||
|
||||
contents = append(contents, content)
|
||||
}
|
||||
|
||||
return contents
|
||||
}
|
||||
|
||||
// extractTextFromResponse extracts text content from the response and appends
|
||||
// any web citations in a standardized format.
|
||||
func (o *Client) extractTextFromResponse(response *genai.GenerateContentResponse) string {
|
||||
if response == nil {
|
||||
return ""
|
||||
}
|
||||
|
||||
text := o.extractTextParts(response)
|
||||
citations := o.extractCitations(response)
|
||||
if len(citations) > 0 {
|
||||
return text + citationHeader + strings.Join(citations, citationSeparator)
|
||||
}
|
||||
return text
|
||||
}
|
||||
|
||||
func (o *Client) extractTextParts(response *genai.GenerateContentResponse) string {
|
||||
var builder strings.Builder
|
||||
for _, candidate := range response.Candidates {
|
||||
if candidate == nil || candidate.Content == nil {
|
||||
continue
|
||||
}
|
||||
for _, part := range candidate.Content.Parts {
|
||||
if part != nil && part.Text != "" {
|
||||
builder.WriteString(part.Text)
|
||||
}
|
||||
}
|
||||
}
|
||||
return builder.String()
|
||||
}
|
||||
|
||||
func (o *Client) extractCitations(response *genai.GenerateContentResponse) []string {
|
||||
if response == nil || len(response.Candidates) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
citationMap := make(map[string]bool)
|
||||
var citations []string
|
||||
for _, candidate := range response.Candidates {
|
||||
if candidate == nil || candidate.GroundingMetadata == nil {
|
||||
continue
|
||||
}
|
||||
chunks := candidate.GroundingMetadata.GroundingChunks
|
||||
if len(chunks) == 0 {
|
||||
continue
|
||||
}
|
||||
for _, chunk := range chunks {
|
||||
if chunk == nil || chunk.Web == nil {
|
||||
continue
|
||||
}
|
||||
uri := chunk.Web.URI
|
||||
title := chunk.Web.Title
|
||||
if uri == "" || title == "" {
|
||||
continue
|
||||
}
|
||||
var keyBuilder strings.Builder
|
||||
keyBuilder.WriteString(uri)
|
||||
keyBuilder.WriteByte('|')
|
||||
keyBuilder.WriteString(title)
|
||||
key := keyBuilder.String()
|
||||
if !citationMap[key] {
|
||||
citationMap[key] = true
|
||||
citationText := fmt.Sprintf(citationFormat, title, uri)
|
||||
citations = append(citations, citationText)
|
||||
}
|
||||
}
|
||||
}
|
||||
return citations
|
||||
}
|
||||
|
||||
@@ -1,34 +1,46 @@
|
||||
package gemini
|
||||
|
||||
import (
|
||||
"strings"
|
||||
"testing"
|
||||
|
||||
"github.com/google/generative-ai-go/genai"
|
||||
"google.golang.org/genai"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/chat"
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
)
|
||||
|
||||
// Test generated using Keploy
|
||||
func TestBuildModelNameSimple(t *testing.T) {
|
||||
// Test buildModelNameFull method
|
||||
func TestBuildModelNameFull(t *testing.T) {
|
||||
client := &Client{}
|
||||
fullModelName := "models/chat-bison-001"
|
||||
expected := "chat-bison-001"
|
||||
|
||||
result := client.buildModelNameSimple(fullModelName)
|
||||
tests := []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
{"chat-bison-001", "models/chat-bison-001"},
|
||||
{"models/chat-bison-001", "models/chat-bison-001"},
|
||||
{"gemini-2.5-flash-preview-tts", "models/gemini-2.5-flash-preview-tts"},
|
||||
}
|
||||
|
||||
if result != expected {
|
||||
t.Errorf("Expected %v, got %v", expected, result)
|
||||
for _, test := range tests {
|
||||
result := client.buildModelNameFull(test.input)
|
||||
if result != test.expected {
|
||||
t.Errorf("For input %v, expected %v, got %v", test.input, test.expected, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test generated using Keploy
|
||||
func TestExtractText(t *testing.T) {
|
||||
// Test extractTextFromResponse method
|
||||
func TestExtractTextFromResponse(t *testing.T) {
|
||||
client := &Client{}
|
||||
response := &genai.GenerateContentResponse{
|
||||
Candidates: []*genai.Candidate{
|
||||
{
|
||||
Content: &genai.Content{
|
||||
Parts: []genai.Part{
|
||||
genai.Text("Hello, "),
|
||||
genai.Text("world!"),
|
||||
Parts: []*genai.Part{
|
||||
{Text: "Hello, "},
|
||||
{Text: "world!"},
|
||||
},
|
||||
},
|
||||
},
|
||||
@@ -36,9 +48,212 @@ func TestExtractText(t *testing.T) {
|
||||
}
|
||||
expected := "Hello, world!"
|
||||
|
||||
result := client.extractText(response)
|
||||
result := client.extractTextFromResponse(response)
|
||||
|
||||
if result != expected {
|
||||
t.Errorf("Expected %v, got %v", expected, result)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractTextFromResponse_Nil(t *testing.T) {
|
||||
client := &Client{}
|
||||
if got := client.extractTextFromResponse(nil); got != "" {
|
||||
t.Fatalf("expected empty string, got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestExtractTextFromResponse_EmptyGroundingChunks(t *testing.T) {
|
||||
client := &Client{}
|
||||
response := &genai.GenerateContentResponse{
|
||||
Candidates: []*genai.Candidate{
|
||||
{
|
||||
Content: &genai.Content{Parts: []*genai.Part{{Text: "Hello"}}},
|
||||
GroundingMetadata: &genai.GroundingMetadata{GroundingChunks: nil},
|
||||
},
|
||||
},
|
||||
}
|
||||
if got := client.extractTextFromResponse(response); got != "Hello" {
|
||||
t.Fatalf("expected 'Hello', got %q", got)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGenerateContentConfig_WithSearch(t *testing.T) {
|
||||
client := &Client{}
|
||||
opts := &domain.ChatOptions{Search: true}
|
||||
|
||||
cfg, err := client.buildGenerateContentConfig(opts)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if cfg.Tools == nil || len(cfg.Tools) != 1 || cfg.Tools[0].GoogleSearch == nil {
|
||||
t.Errorf("expected google search tool to be included")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGenerateContentConfig_WithSearchAndLocation(t *testing.T) {
|
||||
client := &Client{}
|
||||
opts := &domain.ChatOptions{Search: true, SearchLocation: "America/Los_Angeles"}
|
||||
|
||||
cfg, err := client.buildGenerateContentConfig(opts)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if cfg.ToolConfig == nil || cfg.ToolConfig.RetrievalConfig == nil {
|
||||
t.Fatalf("expected retrieval config when search location provided")
|
||||
}
|
||||
if cfg.ToolConfig.RetrievalConfig.LanguageCode != opts.SearchLocation {
|
||||
t.Errorf("expected language code %s, got %s", opts.SearchLocation, cfg.ToolConfig.RetrievalConfig.LanguageCode)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGenerateContentConfig_InvalidLocation(t *testing.T) {
|
||||
client := &Client{}
|
||||
opts := &domain.ChatOptions{Search: true, SearchLocation: "invalid"}
|
||||
|
||||
_, err := client.buildGenerateContentConfig(opts)
|
||||
if err == nil {
|
||||
t.Fatalf("expected error for invalid location")
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGenerateContentConfig_LanguageCodeNormalization(t *testing.T) {
|
||||
client := &Client{}
|
||||
opts := &domain.ChatOptions{Search: true, SearchLocation: "en_US"}
|
||||
|
||||
cfg, err := client.buildGenerateContentConfig(opts)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if cfg.ToolConfig == nil || cfg.ToolConfig.RetrievalConfig.LanguageCode != "en-US" {
|
||||
t.Fatalf("expected normalized language code 'en-US', got %+v", cfg.ToolConfig)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGenerateContentConfig_Thinking(t *testing.T) {
|
||||
client := &Client{}
|
||||
opts := &domain.ChatOptions{Thinking: domain.ThinkingLow}
|
||||
|
||||
cfg, err := client.buildGenerateContentConfig(opts)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if cfg.ThinkingConfig == nil || !cfg.ThinkingConfig.IncludeThoughts {
|
||||
t.Fatalf("expected thinking config with thoughts included")
|
||||
}
|
||||
if cfg.ThinkingConfig.ThinkingBudget == nil || *cfg.ThinkingConfig.ThinkingBudget != int32(domain.TokenBudgetLow) {
|
||||
t.Errorf("expected thinking budget %d, got %+v", domain.TokenBudgetLow, cfg.ThinkingConfig.ThinkingBudget)
|
||||
}
|
||||
}
|
||||
|
||||
func TestBuildGenerateContentConfig_ThinkingTokens(t *testing.T) {
|
||||
client := &Client{}
|
||||
opts := &domain.ChatOptions{Thinking: domain.ThinkingLevel("123")}
|
||||
|
||||
cfg, err := client.buildGenerateContentConfig(opts)
|
||||
if err != nil {
|
||||
t.Fatalf("unexpected error: %v", err)
|
||||
}
|
||||
if cfg.ThinkingConfig == nil || cfg.ThinkingConfig.ThinkingBudget == nil {
|
||||
t.Fatalf("expected thinking config with budget")
|
||||
}
|
||||
if *cfg.ThinkingConfig.ThinkingBudget != 123 {
|
||||
t.Errorf("expected thinking budget 123, got %d", *cfg.ThinkingConfig.ThinkingBudget)
|
||||
}
|
||||
}
|
||||
|
||||
func TestCitationFormatting(t *testing.T) {
|
||||
client := &Client{}
|
||||
response := &genai.GenerateContentResponse{
|
||||
Candidates: []*genai.Candidate{
|
||||
{
|
||||
Content: &genai.Content{Parts: []*genai.Part{{Text: "Based on recent research, AI is advancing rapidly."}}},
|
||||
GroundingMetadata: &genai.GroundingMetadata{
|
||||
GroundingChunks: []*genai.GroundingChunk{
|
||||
{Web: &genai.GroundingChunkWeb{URI: "https://example.com/ai", Title: "AI Research"}},
|
||||
{Web: &genai.GroundingChunkWeb{URI: "https://news.com/tech", Title: "Tech News"}},
|
||||
{Web: &genai.GroundingChunkWeb{URI: "https://example.com/ai", Title: "AI Research"}}, // duplicate
|
||||
},
|
||||
},
|
||||
},
|
||||
},
|
||||
}
|
||||
|
||||
result := client.extractTextFromResponse(response)
|
||||
if !strings.Contains(result, "## Sources") {
|
||||
t.Fatalf("expected sources section in result: %s", result)
|
||||
}
|
||||
if strings.Count(result, "- [") != 2 {
|
||||
t.Errorf("expected 2 unique citations, got %d", strings.Count(result, "- ["))
|
||||
}
|
||||
}
|
||||
|
||||
// Test convertMessages handles role mapping correctly
|
||||
func TestConvertMessagesRoles(t *testing.T) {
|
||||
client := &Client{}
|
||||
msgs := []*chat.ChatCompletionMessage{
|
||||
{Role: chat.ChatMessageRoleUser, Content: "user"},
|
||||
{Role: chat.ChatMessageRoleAssistant, Content: "assistant"},
|
||||
{Role: chat.ChatMessageRoleSystem, Content: "system"},
|
||||
}
|
||||
|
||||
contents := client.convertMessages(msgs)
|
||||
|
||||
expected := []string{"user", "model", "user"}
|
||||
|
||||
if len(contents) != len(expected) {
|
||||
t.Fatalf("expected %d contents, got %d", len(expected), len(contents))
|
||||
}
|
||||
|
||||
for i, c := range contents {
|
||||
if c.Role != expected[i] {
|
||||
t.Errorf("content %d expected role %s, got %s", i, expected[i], c.Role)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test isTTSModel method
|
||||
func TestIsTTSModel(t *testing.T) {
|
||||
client := &Client{}
|
||||
|
||||
tests := []struct {
|
||||
modelName string
|
||||
expected bool
|
||||
}{
|
||||
{"gemini-2.5-flash-preview-tts", true},
|
||||
{"text-to-speech-model", true},
|
||||
{"TTS-MODEL", true},
|
||||
{"gemini-pro", false},
|
||||
{"chat-bison", false},
|
||||
{"", false},
|
||||
}
|
||||
|
||||
for _, test := range tests {
|
||||
result := client.isTTSModel(test.modelName)
|
||||
if result != test.expected {
|
||||
t.Errorf("For model %v, expected %v, got %v", test.modelName, test.expected, result)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Test generateWAVFile method (basic test)
|
||||
func TestGenerateWAVFile(t *testing.T) {
|
||||
client := &Client{}
|
||||
|
||||
// Test with minimal PCM data
|
||||
pcmData := []byte{0x00, 0x01, 0x02, 0x03}
|
||||
|
||||
result, err := client.generateWAVFile(pcmData)
|
||||
if err != nil {
|
||||
t.Errorf("generateWAVFile failed: %v", err)
|
||||
}
|
||||
|
||||
// Check that we got some data back
|
||||
if len(result) == 0 {
|
||||
t.Error("generateWAVFile returned empty data")
|
||||
}
|
||||
|
||||
// Check that it starts with RIFF header
|
||||
if len(result) >= 4 && string(result[0:4]) != "RIFF" {
|
||||
t.Error("Generated WAV data doesn't start with RIFF header")
|
||||
}
|
||||
}
|
||||
|
||||
218
internal/plugins/ai/gemini/voices.go
Normal file
218
internal/plugins/ai/gemini/voices.go
Normal file
@@ -0,0 +1,218 @@
|
||||
package gemini
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
)
|
||||
|
||||
// GeminiVoice represents a Gemini TTS voice with its characteristics
|
||||
type GeminiVoice struct {
|
||||
Name string
|
||||
Description string
|
||||
Characteristics []string
|
||||
}
|
||||
|
||||
// GetGeminiVoices returns the current list of supported Gemini TTS voices
|
||||
// This list is maintained based on official Google Gemini documentation
|
||||
// https://ai.google.dev/gemini-api/docs/speech-generation
|
||||
func GetGeminiVoices() []GeminiVoice {
|
||||
return []GeminiVoice{
|
||||
// Firm voices
|
||||
{Name: "Kore", Description: "Firm and confident", Characteristics: []string{"firm", "confident", "default"}},
|
||||
{Name: "Orus", Description: "Firm and decisive", Characteristics: []string{"firm", "decisive"}},
|
||||
{Name: "Alnilam", Description: "Firm and strong", Characteristics: []string{"firm", "strong"}},
|
||||
|
||||
// Upbeat voices
|
||||
{Name: "Puck", Description: "Upbeat and energetic", Characteristics: []string{"upbeat", "energetic"}},
|
||||
{Name: "Laomedeia", Description: "Upbeat and lively", Characteristics: []string{"upbeat", "lively"}},
|
||||
|
||||
// Bright voices
|
||||
{Name: "Zephyr", Description: "Bright and cheerful", Characteristics: []string{"bright", "cheerful"}},
|
||||
{Name: "Autonoe", Description: "Bright and optimistic", Characteristics: []string{"bright", "optimistic"}},
|
||||
|
||||
// Informative voices
|
||||
{Name: "Charon", Description: "Informative and clear", Characteristics: []string{"informative", "clear"}},
|
||||
{Name: "Rasalgethi", Description: "Informative and professional", Characteristics: []string{"informative", "professional"}},
|
||||
|
||||
// Natural voices
|
||||
{Name: "Aoede", Description: "Breezy and natural", Characteristics: []string{"breezy", "natural"}},
|
||||
{Name: "Leda", Description: "Youthful and energetic", Characteristics: []string{"youthful", "energetic"}},
|
||||
|
||||
// Gentle voices
|
||||
{Name: "Vindemiatrix", Description: "Gentle and kind", Characteristics: []string{"gentle", "kind"}},
|
||||
{Name: "Achernar", Description: "Soft and gentle", Characteristics: []string{"soft", "gentle"}},
|
||||
{Name: "Enceladus", Description: "Breathy and soft", Characteristics: []string{"breathy", "soft"}},
|
||||
|
||||
// Warm voices
|
||||
{Name: "Sulafat", Description: "Warm and welcoming", Characteristics: []string{"warm", "welcoming"}},
|
||||
{Name: "Capella", Description: "Warm and approachable", Characteristics: []string{"warm", "approachable"}},
|
||||
|
||||
// Clear voices
|
||||
{Name: "Iapetus", Description: "Clear and articulate", Characteristics: []string{"clear", "articulate"}},
|
||||
{Name: "Erinome", Description: "Clear and precise", Characteristics: []string{"clear", "precise"}},
|
||||
|
||||
// Pleasant voices
|
||||
{Name: "Algieba", Description: "Smooth and pleasant", Characteristics: []string{"smooth", "pleasant"}},
|
||||
{Name: "Vega", Description: "Smooth and flowing", Characteristics: []string{"smooth", "flowing"}},
|
||||
|
||||
// Textured voices
|
||||
{Name: "Algenib", Description: "Gravelly texture", Characteristics: []string{"gravelly", "textured"}},
|
||||
|
||||
// Relaxed voices
|
||||
{Name: "Callirrhoe", Description: "Easy-going and relaxed", Characteristics: []string{"relaxed", "easy-going"}},
|
||||
{Name: "Despina", Description: "Calm and serene", Characteristics: []string{"calm", "serene"}},
|
||||
|
||||
// Mature voices
|
||||
{Name: "Gacrux", Description: "Mature and experienced", Characteristics: []string{"mature", "experienced"}},
|
||||
|
||||
// Expressive voices
|
||||
{Name: "Pulcherrima", Description: "Forward and expressive", Characteristics: []string{"forward", "expressive"}},
|
||||
{Name: "Lyra", Description: "Melodic and expressive", Characteristics: []string{"melodic", "expressive"}},
|
||||
|
||||
// Dynamic voices
|
||||
{Name: "Fenrir", Description: "Excitable and dynamic", Characteristics: []string{"excitable", "dynamic"}},
|
||||
{Name: "Sadachbia", Description: "Lively and animated", Characteristics: []string{"lively", "animated"}},
|
||||
|
||||
// Friendly voices
|
||||
{Name: "Achird", Description: "Friendly and approachable", Characteristics: []string{"friendly", "approachable"}},
|
||||
|
||||
// Casual voices
|
||||
{Name: "Zubenelgenubi", Description: "Casual and conversational", Characteristics: []string{"casual", "conversational"}},
|
||||
|
||||
// Additional voices from latest API
|
||||
{Name: "Sadaltager", Description: "Experimental voice with a calm and neutral tone", Characteristics: []string{"experimental", "calm", "neutral"}},
|
||||
{Name: "Schedar", Description: "Experimental voice with a warm and engaging tone", Characteristics: []string{"experimental", "warm", "engaging"}},
|
||||
{Name: "Umbriel", Description: "Experimental voice with a deep and resonant tone", Characteristics: []string{"experimental", "deep", "resonant"}},
|
||||
}
|
||||
}
|
||||
|
||||
// GetGeminiVoiceNames returns just the voice names in alphabetical order
|
||||
func GetGeminiVoiceNames() []string {
|
||||
voices := GetGeminiVoices()
|
||||
names := make([]string, len(voices))
|
||||
for i, voice := range voices {
|
||||
names[i] = voice.Name
|
||||
}
|
||||
sort.Strings(names)
|
||||
return names
|
||||
}
|
||||
|
||||
// IsValidGeminiVoice checks if a voice name is valid
|
||||
func IsValidGeminiVoice(voiceName string) bool {
|
||||
if voiceName == "" {
|
||||
return true // Empty voice is valid (will use default)
|
||||
}
|
||||
|
||||
for _, voice := range GetGeminiVoices() {
|
||||
if voice.Name == voiceName {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
// GetGeminiVoiceByName returns a specific voice by name
|
||||
func GetGeminiVoiceByName(name string) (*GeminiVoice, error) {
|
||||
for _, voice := range GetGeminiVoices() {
|
||||
if voice.Name == name {
|
||||
return &voice, nil
|
||||
}
|
||||
}
|
||||
return nil, fmt.Errorf("voice '%s' not found", name)
|
||||
}
|
||||
|
||||
// ListGeminiVoices formats the voice list for display
|
||||
func ListGeminiVoices(shellCompleteMode bool) string {
|
||||
if shellCompleteMode {
|
||||
// For shell completion, just return voice names
|
||||
names := GetGeminiVoiceNames()
|
||||
result := ""
|
||||
for _, name := range names {
|
||||
result += name + "\n"
|
||||
}
|
||||
return result
|
||||
}
|
||||
|
||||
// For human-readable output
|
||||
voices := GetGeminiVoices()
|
||||
result := "Available Gemini Text-to-Speech voices:\n\n"
|
||||
|
||||
// Group by characteristics for better readability
|
||||
groups := map[string][]GeminiVoice{
|
||||
"Firm & Confident": {},
|
||||
"Bright & Cheerful": {},
|
||||
"Warm & Welcoming": {},
|
||||
"Clear & Professional": {},
|
||||
"Natural & Expressive": {},
|
||||
"Other Voices": {},
|
||||
}
|
||||
|
||||
for _, voice := range voices {
|
||||
placed := false
|
||||
for _, char := range voice.Characteristics {
|
||||
switch char {
|
||||
case "firm", "confident", "decisive", "strong":
|
||||
if !placed {
|
||||
groups["Firm & Confident"] = append(groups["Firm & Confident"], voice)
|
||||
placed = true
|
||||
}
|
||||
case "bright", "cheerful", "upbeat", "energetic", "lively":
|
||||
if !placed {
|
||||
groups["Bright & Cheerful"] = append(groups["Bright & Cheerful"], voice)
|
||||
placed = true
|
||||
}
|
||||
case "warm", "welcoming", "friendly", "approachable":
|
||||
if !placed {
|
||||
groups["Warm & Welcoming"] = append(groups["Warm & Welcoming"], voice)
|
||||
placed = true
|
||||
}
|
||||
case "clear", "informative", "professional", "articulate":
|
||||
if !placed {
|
||||
groups["Clear & Professional"] = append(groups["Clear & Professional"], voice)
|
||||
placed = true
|
||||
}
|
||||
case "natural", "expressive", "melodic", "breezy":
|
||||
if !placed {
|
||||
groups["Natural & Expressive"] = append(groups["Natural & Expressive"], voice)
|
||||
placed = true
|
||||
}
|
||||
}
|
||||
}
|
||||
if !placed {
|
||||
groups["Other Voices"] = append(groups["Other Voices"], voice)
|
||||
}
|
||||
}
|
||||
|
||||
// Output grouped voices
|
||||
for groupName, groupVoices := range groups {
|
||||
if len(groupVoices) > 0 {
|
||||
result += fmt.Sprintf("%s:\n", groupName)
|
||||
for _, voice := range groupVoices {
|
||||
defaultStr := ""
|
||||
if voice.Name == "Kore" {
|
||||
defaultStr = " (default)"
|
||||
}
|
||||
result += fmt.Sprintf(" %-15s - %s%s\n", voice.Name, voice.Description, defaultStr)
|
||||
}
|
||||
result += "\n"
|
||||
}
|
||||
}
|
||||
|
||||
result += "Use --voice <voice_name> to select a specific voice.\n"
|
||||
result += "Example: fabric --voice Charon -m gemini-2.5-flash-preview-tts -o output.wav \"Hello world\"\n"
|
||||
|
||||
return result
|
||||
}
|
||||
|
||||
// NOTE: This implementation maintains a curated list based on official Google documentation.
|
||||
// In the future, if Google provides a dynamic voice discovery API, this can be updated
|
||||
// to make API calls for real-time voice discovery.
|
||||
//
|
||||
// The current approach ensures:
|
||||
// 1. Fast response times (no API calls needed)
|
||||
// 2. Reliable voice information with descriptions
|
||||
// 3. Easy maintenance when new voices are added
|
||||
// 4. Offline functionality
|
||||
//
|
||||
// To update voices: Monitor Google's Gemini TTS documentation at:
|
||||
// https://ai.google.dev/gemini-api/docs/speech-generation
|
||||
@@ -1,6 +1,10 @@
|
||||
package ai
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/util"
|
||||
)
|
||||
|
||||
@@ -11,3 +15,35 @@ func NewVendorsModels() *VendorsModels {
|
||||
type VendorsModels struct {
|
||||
*util.GroupsItemsSelectorString
|
||||
}
|
||||
|
||||
// PrintWithVendor prints models including their vendor on each line.
|
||||
// When shellCompleteList is true, output is suitable for shell completion.
|
||||
func (o *VendorsModels) PrintWithVendor(shellCompleteList bool) {
|
||||
if !shellCompleteList {
|
||||
fmt.Printf("\n%v:\n", o.SelectionLabel)
|
||||
}
|
||||
|
||||
var currentItemIndex int
|
||||
|
||||
sortedGroups := make([]*util.GroupItems[string], len(o.GroupsItems))
|
||||
copy(sortedGroups, o.GroupsItems)
|
||||
sort.SliceStable(sortedGroups, func(i, j int) bool {
|
||||
return strings.ToLower(sortedGroups[i].Group) < strings.ToLower(sortedGroups[j].Group)
|
||||
})
|
||||
|
||||
for _, groupItems := range sortedGroups {
|
||||
items := make([]string, len(groupItems.Items))
|
||||
copy(items, groupItems.Items)
|
||||
sort.SliceStable(items, func(i, j int) bool {
|
||||
return strings.ToLower(items[i]) < strings.ToLower(items[j])
|
||||
})
|
||||
for _, item := range items {
|
||||
currentItemIndex++
|
||||
if shellCompleteList {
|
||||
fmt.Printf("%s|%s\n", groupItems.Group, item)
|
||||
} else {
|
||||
fmt.Printf("\t[%d]\t%s|%s\n", currentItemIndex, groupItems.Group, item)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
"net/http"
|
||||
"net/url"
|
||||
"os"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
@@ -61,6 +62,11 @@ func (t *transport_sec) RoundTrip(req *http.Request) (*http.Response, error) {
|
||||
return t.underlyingTransport.RoundTrip(req)
|
||||
}
|
||||
|
||||
// IsConfigured returns true only if OLLAMA_API_URL environment variable is explicitly set
|
||||
func (o *Client) IsConfigured() bool {
|
||||
return os.Getenv("OLLAMA_API_URL") != ""
|
||||
}
|
||||
|
||||
func (o *Client) configure() (err error) {
|
||||
if o.apiUrl, err = url.Parse(o.ApiUrl.Value); err != nil {
|
||||
fmt.Printf("cannot parse URL: %s: %v\n", o.ApiUrl.Value, err)
|
||||
@@ -160,6 +166,7 @@ func (o *Client) NeedsRawMode(modelName string) bool {
|
||||
ollamaPrefixes := []string{
|
||||
"llama3",
|
||||
"llama2",
|
||||
"mistral",
|
||||
}
|
||||
for _, prefix := range ollamaPrefixes {
|
||||
if strings.HasPrefix(modelName, prefix) {
|
||||
|
||||
@@ -85,6 +85,9 @@ func (o *Client) buildChatCompletionParams(
|
||||
ret.Seed = openai.Int(int64(opts.Seed))
|
||||
}
|
||||
}
|
||||
if eff, ok := parseReasoningEffort(opts.Thinking); ok {
|
||||
ret.ReasoningEffort = eff
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
|
||||
@@ -115,7 +115,11 @@ func (o *Client) sendStreamResponses(
|
||||
case string(constant.ResponseOutputTextDelta("").Default()):
|
||||
channel <- event.AsResponseOutputTextDelta().Delta
|
||||
case string(constant.ResponseOutputTextDone("").Default()):
|
||||
channel <- event.AsResponseOutputTextDone().Text
|
||||
// The Responses API sends the full text again in the
|
||||
// final "done" event. Since we've already streamed all
|
||||
// delta chunks above, sending it would duplicate the
|
||||
// output. Ignore it here to prevent doubled results.
|
||||
continue
|
||||
}
|
||||
}
|
||||
if stream.Err() == nil {
|
||||
@@ -164,6 +168,7 @@ func (o *Client) NeedsRawMode(modelName string) bool {
|
||||
"o1",
|
||||
"o3",
|
||||
"o4",
|
||||
"gpt-5",
|
||||
}
|
||||
openAIModelsNeedingRaw := []string{
|
||||
"gpt-4o-mini-search-preview",
|
||||
@@ -179,6 +184,19 @@ func (o *Client) NeedsRawMode(modelName string) bool {
|
||||
return slices.Contains(openAIModelsNeedingRaw, modelName)
|
||||
}
|
||||
|
||||
func parseReasoningEffort(level domain.ThinkingLevel) (shared.ReasoningEffort, bool) {
|
||||
switch domain.ThinkingLevel(strings.ToLower(string(level))) {
|
||||
case domain.ThinkingLow:
|
||||
return shared.ReasoningEffortLow, true
|
||||
case domain.ThinkingMedium:
|
||||
return shared.ReasoningEffortMedium, true
|
||||
case domain.ThinkingHigh:
|
||||
return shared.ReasoningEffortHigh, true
|
||||
default:
|
||||
return "", false
|
||||
}
|
||||
}
|
||||
|
||||
func (o *Client) buildResponseParams(
|
||||
inputMsgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions,
|
||||
) (ret responses.ResponseNewParams) {
|
||||
@@ -224,6 +242,10 @@ func (o *Client) buildResponseParams(
|
||||
ret.Tools = tools
|
||||
}
|
||||
|
||||
if eff, ok := parseReasoningEffort(opts.Thinking); ok {
|
||||
ret.Reasoning = shared.ReasoningParam{Effort: eff}
|
||||
}
|
||||
|
||||
if !opts.Raw {
|
||||
ret.Temperature = openai.Float(opts.Temperature)
|
||||
if opts.TopP != 0 {
|
||||
|
||||
@@ -105,7 +105,7 @@ func (h *ChatHandler) HandleChat(c *gin.Context) {
|
||||
}
|
||||
}
|
||||
|
||||
chatter, err := h.registry.GetChatter(p.Model, 2048, "", false, false)
|
||||
chatter, err := h.registry.GetChatter(p.Model, 2048, p.Vendor, "", false, false)
|
||||
if err != nil {
|
||||
log.Printf("Error creating chatter: %v", err)
|
||||
streamChan <- fmt.Sprintf("Error: %v", err)
|
||||
@@ -130,6 +130,7 @@ func (h *ChatHandler) HandleChat(c *gin.Context) {
|
||||
TopP: request.TopP,
|
||||
FrequencyPenalty: request.FrequencyPenalty,
|
||||
PresencePenalty: request.PresencePenalty,
|
||||
Thinking: request.Thinking,
|
||||
}
|
||||
|
||||
session, err := chatter.Send(chatReq, opts)
|
||||
|
||||
128
internal/tools/notifications/notifications.go
Normal file
128
internal/tools/notifications/notifications.go
Normal file
@@ -0,0 +1,128 @@
|
||||
package notifications
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"os"
|
||||
"os/exec"
|
||||
"runtime"
|
||||
)
|
||||
|
||||
// NotificationProvider interface for different notification backends
|
||||
type NotificationProvider interface {
|
||||
Send(title, message string) error
|
||||
IsAvailable() bool
|
||||
}
|
||||
|
||||
// NotificationManager handles cross-platform notifications
|
||||
type NotificationManager struct {
|
||||
provider NotificationProvider
|
||||
}
|
||||
|
||||
// NewNotificationManager creates a new notification manager with the best available provider
|
||||
func NewNotificationManager() *NotificationManager {
|
||||
var provider NotificationProvider
|
||||
|
||||
switch runtime.GOOS {
|
||||
case "darwin":
|
||||
// Try terminal-notifier first, then fall back to osascript
|
||||
provider = &TerminalNotifierProvider{}
|
||||
if !provider.IsAvailable() {
|
||||
provider = &OSAScriptProvider{}
|
||||
}
|
||||
case "linux":
|
||||
provider = &NotifySendProvider{}
|
||||
case "windows":
|
||||
provider = &PowerShellProvider{}
|
||||
default:
|
||||
provider = &NoopProvider{}
|
||||
}
|
||||
|
||||
return &NotificationManager{provider: provider}
|
||||
}
|
||||
|
||||
// Send sends a notification using the configured provider
|
||||
func (nm *NotificationManager) Send(title, message string) error {
|
||||
if nm.provider == nil {
|
||||
return fmt.Errorf("no notification provider available")
|
||||
}
|
||||
return nm.provider.Send(title, message)
|
||||
}
|
||||
|
||||
// IsAvailable checks if notifications are available
|
||||
func (nm *NotificationManager) IsAvailable() bool {
|
||||
return nm.provider != nil && nm.provider.IsAvailable()
|
||||
}
|
||||
|
||||
// macOS terminal-notifier implementation
|
||||
type TerminalNotifierProvider struct{}
|
||||
|
||||
func (t *TerminalNotifierProvider) Send(title, message string) error {
|
||||
cmd := exec.Command("terminal-notifier", "-title", title, "-message", message, "-sound", "Glass")
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func (t *TerminalNotifierProvider) IsAvailable() bool {
|
||||
_, err := exec.LookPath("terminal-notifier")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// macOS osascript implementation
|
||||
type OSAScriptProvider struct{}
|
||||
|
||||
func (o *OSAScriptProvider) Send(title, message string) error {
|
||||
// SECURITY: Use separate arguments instead of string interpolation to prevent AppleScript injection
|
||||
script := `display notification (system attribute "FABRIC_MESSAGE") with title (system attribute "FABRIC_TITLE") sound name "Glass"`
|
||||
cmd := exec.Command("osascript", "-e", script)
|
||||
|
||||
// Set environment variables for the AppleScript to read safely
|
||||
cmd.Env = append(os.Environ(), "FABRIC_TITLE="+title, "FABRIC_MESSAGE="+message)
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func (o *OSAScriptProvider) IsAvailable() bool {
|
||||
_, err := exec.LookPath("osascript")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Linux notify-send implementation
|
||||
type NotifySendProvider struct{}
|
||||
|
||||
func (n *NotifySendProvider) Send(title, message string) error {
|
||||
cmd := exec.Command("notify-send", title, message)
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func (n *NotifySendProvider) IsAvailable() bool {
|
||||
_, err := exec.LookPath("notify-send")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// Windows PowerShell implementation
|
||||
type PowerShellProvider struct{}
|
||||
|
||||
func (p *PowerShellProvider) Send(title, message string) error {
|
||||
// SECURITY: Use environment variables to avoid PowerShell injection attacks
|
||||
script := `Add-Type -AssemblyName System.Windows.Forms; [System.Windows.Forms.MessageBox]::Show($env:FABRIC_MESSAGE, $env:FABRIC_TITLE)`
|
||||
cmd := exec.Command("powershell", "-Command", script)
|
||||
|
||||
// Set environment variables for PowerShell to read safely
|
||||
cmd.Env = append(os.Environ(), "FABRIC_TITLE="+title, "FABRIC_MESSAGE="+message)
|
||||
return cmd.Run()
|
||||
}
|
||||
|
||||
func (p *PowerShellProvider) IsAvailable() bool {
|
||||
_, err := exec.LookPath("powershell")
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// NoopProvider for unsupported platforms
|
||||
type NoopProvider struct{}
|
||||
|
||||
func (n *NoopProvider) Send(title, message string) error {
|
||||
// Silent no-op for unsupported platforms
|
||||
return nil
|
||||
}
|
||||
|
||||
func (n *NoopProvider) IsAvailable() bool {
|
||||
return false
|
||||
}
|
||||
168
internal/tools/notifications/notifications_test.go
Normal file
168
internal/tools/notifications/notifications_test.go
Normal file
@@ -0,0 +1,168 @@
|
||||
package notifications
|
||||
|
||||
import (
|
||||
"os/exec"
|
||||
"runtime"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestNewNotificationManager(t *testing.T) {
|
||||
manager := NewNotificationManager()
|
||||
if manager == nil {
|
||||
t.Fatal("NewNotificationManager() returned nil")
|
||||
}
|
||||
if manager.provider == nil {
|
||||
t.Fatal("NotificationManager provider is nil")
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotificationManagerIsAvailable(t *testing.T) {
|
||||
manager := NewNotificationManager()
|
||||
// Should not panic
|
||||
_ = manager.IsAvailable()
|
||||
}
|
||||
|
||||
func TestNotificationManagerSend(t *testing.T) {
|
||||
manager := NewNotificationManager()
|
||||
|
||||
// Test sending notification - this may fail on systems without notification tools
|
||||
// but should not panic
|
||||
err := manager.Send("Test Title", "Test Message")
|
||||
if err != nil {
|
||||
t.Logf("Notification send failed (expected on systems without notification tools): %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestTerminalNotifierProvider(t *testing.T) {
|
||||
if runtime.GOOS != "darwin" {
|
||||
t.Skip("Skipping macOS terminal-notifier test on non-macOS platform")
|
||||
}
|
||||
|
||||
provider := &TerminalNotifierProvider{}
|
||||
|
||||
// Test availability - depends on whether terminal-notifier is installed
|
||||
available := provider.IsAvailable()
|
||||
t.Logf("terminal-notifier available: %v", available)
|
||||
|
||||
if available {
|
||||
err := provider.Send("Test", "Test message")
|
||||
if err != nil {
|
||||
t.Logf("terminal-notifier send failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestOSAScriptProvider(t *testing.T) {
|
||||
if runtime.GOOS != "darwin" {
|
||||
t.Skip("Skipping macOS osascript test on non-macOS platform")
|
||||
}
|
||||
|
||||
provider := &OSAScriptProvider{}
|
||||
|
||||
// osascript should always be available on macOS
|
||||
if !provider.IsAvailable() {
|
||||
t.Error("osascript should be available on macOS")
|
||||
}
|
||||
|
||||
// Test sending (may show actual notification)
|
||||
err := provider.Send("Test", "Test message")
|
||||
if err != nil {
|
||||
t.Errorf("osascript send failed: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNotifySendProvider(t *testing.T) {
|
||||
if runtime.GOOS != "linux" {
|
||||
t.Skip("Skipping Linux notify-send test on non-Linux platform")
|
||||
}
|
||||
|
||||
provider := &NotifySendProvider{}
|
||||
|
||||
// Test availability - depends on whether notify-send is installed
|
||||
available := provider.IsAvailable()
|
||||
t.Logf("notify-send available: %v", available)
|
||||
|
||||
if available {
|
||||
err := provider.Send("Test", "Test message")
|
||||
if err != nil {
|
||||
t.Logf("notify-send send failed: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func TestPowerShellProvider(t *testing.T) {
|
||||
if runtime.GOOS != "windows" {
|
||||
t.Skip("Skipping Windows PowerShell test on non-Windows platform")
|
||||
}
|
||||
|
||||
provider := &PowerShellProvider{}
|
||||
|
||||
// PowerShell should be available on Windows
|
||||
if !provider.IsAvailable() {
|
||||
t.Error("PowerShell should be available on Windows")
|
||||
}
|
||||
|
||||
// Note: This will show a message box if run
|
||||
// In CI/CD, this might not work properly
|
||||
err := provider.Send("Test", "Test message")
|
||||
if err != nil {
|
||||
t.Logf("PowerShell send failed (expected in headless environments): %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestNoopProvider(t *testing.T) {
|
||||
provider := &NoopProvider{}
|
||||
|
||||
// Should always report as not available
|
||||
if provider.IsAvailable() {
|
||||
t.Error("NoopProvider should report as not available")
|
||||
}
|
||||
|
||||
// Should never error
|
||||
err := provider.Send("Test", "Test message")
|
||||
if err != nil {
|
||||
t.Errorf("NoopProvider send should never error, got: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
func TestProviderIsAvailable(t *testing.T) {
|
||||
tests := []struct {
|
||||
name string
|
||||
provider NotificationProvider
|
||||
command string
|
||||
}{
|
||||
{"TerminalNotifier", &TerminalNotifierProvider{}, "terminal-notifier"},
|
||||
{"OSAScript", &OSAScriptProvider{}, "osascript"},
|
||||
{"NotifySend", &NotifySendProvider{}, "notify-send"},
|
||||
{"PowerShell", &PowerShellProvider{}, "powershell"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
available := tt.provider.IsAvailable()
|
||||
|
||||
// Cross-check with actual command availability
|
||||
_, err := exec.LookPath(tt.command)
|
||||
expectedAvailable := err == nil
|
||||
|
||||
if available != expectedAvailable {
|
||||
t.Logf("Provider %s availability mismatch: provider=%v, command=%v",
|
||||
tt.name, available, expectedAvailable)
|
||||
// This is informational, not a failure, since system setup varies
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestSendWithSpecialCharacters(t *testing.T) {
|
||||
manager := NewNotificationManager()
|
||||
|
||||
// Test with special characters that might break shell commands
|
||||
specialTitle := `Title with "quotes" and 'apostrophes'`
|
||||
specialMessage := `Message with \backslashes and $variables and "quotes"`
|
||||
|
||||
err := manager.Send(specialTitle, specialMessage)
|
||||
if err != nil {
|
||||
t.Logf("Send with special characters failed (may be expected): %v", err)
|
||||
}
|
||||
}
|
||||
@@ -25,17 +25,33 @@ import (
|
||||
"time"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/plugins"
|
||||
"github.com/kballard/go-shellquote"
|
||||
"google.golang.org/api/option"
|
||||
"google.golang.org/api/youtube/v3"
|
||||
)
|
||||
|
||||
var timestampRegex *regexp.Regexp
|
||||
var languageFileRegex *regexp.Regexp
|
||||
var videoPatternRegex *regexp.Regexp
|
||||
var playlistPatternRegex *regexp.Regexp
|
||||
var vttTagRegex *regexp.Regexp
|
||||
var durationRegex *regexp.Regexp
|
||||
|
||||
const TimeGapForRepeats = 10 // seconds
|
||||
|
||||
func init() {
|
||||
// Match timestamps like "00:00:01.234" or just numbers or sequence numbers
|
||||
timestampRegex = regexp.MustCompile(`^\d+$|^\d{1,2}:\d{2}(:\d{2})?(\.\d{3})?$`)
|
||||
// Match language-specific VTT files like .en.vtt, .es.vtt, .en-US.vtt, .pt-BR.vtt
|
||||
languageFileRegex = regexp.MustCompile(`\.[a-z]{2}(-[A-Z]{2})?\.vtt$`)
|
||||
// YouTube video ID pattern
|
||||
videoPatternRegex = regexp.MustCompile(`(?:https?:\/\/)?(?:www\.)?(?:youtube\.com\/(?:live\/|[^\/\n\s]+\/\S+\/|(?:v|e(?:mbed)?)\/|(?:s(?:horts)\/)|\S*?[?&]v=)|youtu\.be\/)([a-zA-Z0-9_-]*)`)
|
||||
// YouTube playlist ID pattern
|
||||
playlistPatternRegex = regexp.MustCompile(`[?&]list=([a-zA-Z0-9_-]+)`)
|
||||
// VTT formatting tags like <c.colorE5E5E5>, </c>, etc.
|
||||
vttTagRegex = regexp.MustCompile(`<[^>]*>`)
|
||||
// YouTube duration format PT1H2M3S
|
||||
durationRegex = regexp.MustCompile(`(?i)PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?`)
|
||||
}
|
||||
|
||||
func NewYouTube() (ret *YouTube) {
|
||||
@@ -76,18 +92,14 @@ func (o *YouTube) initService() (err error) {
|
||||
}
|
||||
|
||||
func (o *YouTube) GetVideoOrPlaylistId(url string) (videoId string, playlistId string, err error) {
|
||||
// Video ID pattern
|
||||
videoPattern := `(?:https?:\/\/)?(?:www\.)?(?:youtube\.com\/(?:live\/|[^\/\n\s]+\/\S+\/|(?:v|e(?:mbed)?)\/|(?:s(?:horts)\/)|\S*?[?&]v=)|youtu\.be\/)([a-zA-Z0-9_-]*)`
|
||||
videoRe := regexp.MustCompile(videoPattern)
|
||||
videoMatch := videoRe.FindStringSubmatch(url)
|
||||
// Extract video ID using pre-compiled regex
|
||||
videoMatch := videoPatternRegex.FindStringSubmatch(url)
|
||||
if len(videoMatch) > 1 {
|
||||
videoId = videoMatch[1]
|
||||
}
|
||||
|
||||
// Playlist ID pattern
|
||||
playlistPattern := `[?&]list=([a-zA-Z0-9_-]+)`
|
||||
playlistRe := regexp.MustCompile(playlistPattern)
|
||||
playlistMatch := playlistRe.FindStringSubmatch(url)
|
||||
// Extract playlist ID using pre-compiled regex
|
||||
playlistMatch := playlistPatternRegex.FindStringSubmatch(url)
|
||||
if len(playlistMatch) > 1 {
|
||||
playlistId = playlistMatch[1]
|
||||
}
|
||||
@@ -113,17 +125,27 @@ func (o *YouTube) GrabTranscriptForUrl(url string, language string) (ret string,
|
||||
|
||||
func (o *YouTube) GrabTranscript(videoId string, language string) (ret string, err error) {
|
||||
// Use yt-dlp for reliable transcript extraction
|
||||
return o.tryMethodYtDlp(videoId, language)
|
||||
return o.GrabTranscriptWithArgs(videoId, language, "")
|
||||
}
|
||||
|
||||
func (o *YouTube) GrabTranscriptWithArgs(videoId string, language string, additionalArgs string) (ret string, err error) {
|
||||
// Use yt-dlp for reliable transcript extraction
|
||||
return o.tryMethodYtDlp(videoId, language, additionalArgs)
|
||||
}
|
||||
|
||||
func (o *YouTube) GrabTranscriptWithTimestamps(videoId string, language string) (ret string, err error) {
|
||||
// Use yt-dlp for reliable transcript extraction with timestamps
|
||||
return o.tryMethodYtDlpWithTimestamps(videoId, language)
|
||||
return o.GrabTranscriptWithTimestampsWithArgs(videoId, language, "")
|
||||
}
|
||||
|
||||
func (o *YouTube) GrabTranscriptWithTimestampsWithArgs(videoId string, language string, additionalArgs string) (ret string, err error) {
|
||||
// Use yt-dlp for reliable transcript extraction with timestamps
|
||||
return o.tryMethodYtDlpWithTimestamps(videoId, language, additionalArgs)
|
||||
}
|
||||
|
||||
// tryMethodYtDlpInternal is a helper function to reduce duplication between
|
||||
// tryMethodYtDlp and tryMethodYtDlpWithTimestamps.
|
||||
func (o *YouTube) tryMethodYtDlpInternal(videoId string, language string, processVTTFileFunc func(filename string) (string, error)) (ret string, err error) {
|
||||
func (o *YouTube) tryMethodYtDlpInternal(videoId string, language string, additionalArgs string, processVTTFileFunc func(filename string) (string, error)) (ret string, err error) {
|
||||
// Check if yt-dlp is available
|
||||
if _, err = exec.LookPath("yt-dlp"); err != nil {
|
||||
err = fmt.Errorf("yt-dlp not found in PATH. Please install yt-dlp to use YouTube transcript functionality")
|
||||
@@ -141,30 +163,93 @@ func (o *YouTube) tryMethodYtDlpInternal(videoId string, language string, proces
|
||||
// Use yt-dlp to get transcript
|
||||
videoURL := "https://www.youtube.com/watch?v=" + videoId
|
||||
outputPath := filepath.Join(tempDir, "%(title)s.%(ext)s")
|
||||
lang_match := language
|
||||
if len(language) > 2 {
|
||||
lang_match = language[:2]
|
||||
}
|
||||
cmd := exec.Command("yt-dlp",
|
||||
|
||||
baseArgs := []string{
|
||||
"--write-auto-subs",
|
||||
"--sub-lang", lang_match,
|
||||
"--skip-download",
|
||||
"--sub-format", "vtt",
|
||||
"--quiet",
|
||||
"--no-warnings",
|
||||
"-o", outputPath,
|
||||
videoURL)
|
||||
}
|
||||
|
||||
args := append([]string{}, baseArgs...)
|
||||
|
||||
// Add built-in language selection first
|
||||
if language != "" {
|
||||
langMatch := language
|
||||
if len(langMatch) > 2 {
|
||||
langMatch = langMatch[:2]
|
||||
}
|
||||
args = append(args, "--sub-langs", langMatch)
|
||||
}
|
||||
|
||||
// Add user-provided arguments last so they take precedence
|
||||
if additionalArgs != "" {
|
||||
additionalArgsList, err := shellquote.Split(additionalArgs)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("invalid yt-dlp arguments: %v", err)
|
||||
}
|
||||
args = append(args, additionalArgsList...)
|
||||
}
|
||||
|
||||
args = append(args, videoURL)
|
||||
|
||||
cmd := exec.Command("yt-dlp", args...)
|
||||
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err = cmd.Run(); err != nil {
|
||||
err = fmt.Errorf("yt-dlp failed: %v, stderr: %s", err, stderr.String())
|
||||
return
|
||||
stderrStr := stderr.String()
|
||||
|
||||
// Check for specific YouTube errors
|
||||
if strings.Contains(stderrStr, "429") || strings.Contains(stderrStr, "Too Many Requests") {
|
||||
err = fmt.Errorf("YouTube rate limit exceeded. Try again later or use different yt-dlp arguments like '--sleep-requests 1' to slow down requests. Error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if strings.Contains(stderrStr, "Sign in to confirm you're not a bot") || strings.Contains(stderrStr, "Use --cookies-from-browser") {
|
||||
err = fmt.Errorf("YouTube requires authentication (bot detection). Use --yt-dlp-args='--cookies-from-browser BROWSER' where BROWSER is chrome, firefox, brave, etc. Error: %v", err)
|
||||
return
|
||||
}
|
||||
|
||||
if language != "" {
|
||||
// Fallback: try without specifying language (let yt-dlp choose best available)
|
||||
stderr.Reset()
|
||||
fallbackArgs := append([]string{}, baseArgs...)
|
||||
|
||||
// Add additional arguments if provided
|
||||
if additionalArgs != "" {
|
||||
additionalArgsList, parseErr := shellquote.Split(additionalArgs)
|
||||
if parseErr != nil {
|
||||
return "", fmt.Errorf("invalid yt-dlp arguments: %v", parseErr)
|
||||
}
|
||||
fallbackArgs = append(fallbackArgs, additionalArgsList...)
|
||||
}
|
||||
|
||||
// Don't specify language, let yt-dlp choose
|
||||
fallbackArgs = append(fallbackArgs, videoURL)
|
||||
cmd = exec.Command("yt-dlp", fallbackArgs...)
|
||||
cmd.Stderr = &stderr
|
||||
if err = cmd.Run(); err != nil {
|
||||
stderrStr2 := stderr.String()
|
||||
if strings.Contains(stderrStr2, "429") || strings.Contains(stderrStr2, "Too Many Requests") {
|
||||
err = fmt.Errorf("YouTube rate limit exceeded. Try again later or use different yt-dlp arguments like '--sleep-requests 1'. Error: %v", err)
|
||||
} else {
|
||||
err = fmt.Errorf("yt-dlp failed with language '%s' and fallback. Original error: %s. Fallback error: %s", language, stderrStr, stderrStr2)
|
||||
}
|
||||
return
|
||||
}
|
||||
} else {
|
||||
err = fmt.Errorf("yt-dlp failed: %v, stderr: %s", err, stderrStr)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Find VTT files using cross-platform approach
|
||||
vttFiles, err := o.findVTTFiles(tempDir, language)
|
||||
// Try to find files with the requested language first, but fall back to any VTT file
|
||||
vttFiles, err := o.findVTTFilesWithFallback(tempDir, language)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -172,12 +257,12 @@ func (o *YouTube) tryMethodYtDlpInternal(videoId string, language string, proces
|
||||
return processVTTFileFunc(vttFiles[0])
|
||||
}
|
||||
|
||||
func (o *YouTube) tryMethodYtDlp(videoId string, language string) (ret string, err error) {
|
||||
return o.tryMethodYtDlpInternal(videoId, language, o.readAndCleanVTTFile)
|
||||
func (o *YouTube) tryMethodYtDlp(videoId string, language string, additionalArgs string) (ret string, err error) {
|
||||
return o.tryMethodYtDlpInternal(videoId, language, additionalArgs, o.readAndCleanVTTFile)
|
||||
}
|
||||
|
||||
func (o *YouTube) tryMethodYtDlpWithTimestamps(videoId string, language string) (ret string, err error) {
|
||||
return o.tryMethodYtDlpInternal(videoId, language, o.readAndFormatVTTWithTimestamps)
|
||||
func (o *YouTube) tryMethodYtDlpWithTimestamps(videoId string, language string, additionalArgs string) (ret string, err error) {
|
||||
return o.tryMethodYtDlpInternal(videoId, language, additionalArgs, o.readAndFormatVTTWithTimestamps)
|
||||
}
|
||||
|
||||
func (o *YouTube) readAndCleanVTTFile(filename string) (ret string, err error) {
|
||||
@@ -303,8 +388,7 @@ func isTimeStamp(s string) bool {
|
||||
|
||||
func removeVTTTags(s string) string {
|
||||
// Remove VTT tags like <c.colorE5E5E5>, </c>, etc.
|
||||
tagRegex := regexp.MustCompile(`<[^>]*>`)
|
||||
return tagRegex.ReplaceAllString(s, "")
|
||||
return vttTagRegex.ReplaceAllString(s, "")
|
||||
}
|
||||
|
||||
// shouldIncludeRepeat determines if repeated content should be included based on time gap
|
||||
@@ -428,7 +512,7 @@ func (o *YouTube) GrabDuration(videoId string) (ret int, err error) {
|
||||
|
||||
durationStr := videoResponse.Items[0].ContentDetails.Duration
|
||||
|
||||
matches := regexp.MustCompile(`(?i)PT(?:(\d+)H)?(?:(\d+)M)?(?:(\d+)S)?`).FindStringSubmatch(durationStr)
|
||||
matches := durationRegex.FindStringSubmatch(durationStr)
|
||||
if len(matches) == 0 {
|
||||
return 0, fmt.Errorf("invalid duration string: %s", durationStr)
|
||||
}
|
||||
@@ -588,8 +672,9 @@ func (o *YouTube) normalizeFileName(name string) string {
|
||||
|
||||
}
|
||||
|
||||
// findVTTFiles searches for VTT files in a directory using cross-platform approach
|
||||
func (o *YouTube) findVTTFiles(dir, language string) ([]string, error) {
|
||||
// findVTTFilesWithFallback searches for VTT files, handling fallback scenarios
|
||||
// where the requested language might not be available
|
||||
func (o *YouTube) findVTTFilesWithFallback(dir, requestedLanguage string) ([]string, error) {
|
||||
var vttFiles []string
|
||||
|
||||
// Walk through the directory to find VTT files
|
||||
@@ -612,14 +697,28 @@ func (o *YouTube) findVTTFiles(dir, language string) ([]string, error) {
|
||||
return nil, fmt.Errorf("no VTT files found in directory")
|
||||
}
|
||||
|
||||
// Prefer files with the specified language
|
||||
// If no specific language requested, return the first file
|
||||
if requestedLanguage == "" {
|
||||
return []string{vttFiles[0]}, nil
|
||||
}
|
||||
|
||||
// First, try to find files with the requested language
|
||||
for _, file := range vttFiles {
|
||||
if strings.Contains(file, "."+language+".vtt") {
|
||||
if strings.Contains(file, "."+requestedLanguage+".vtt") {
|
||||
return []string{file}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// Return the first VTT file found if no language-specific file exists
|
||||
// If requested language not found, check if we have any language-specific files
|
||||
// This handles the fallback case where yt-dlp downloaded a different language
|
||||
for _, file := range vttFiles {
|
||||
// Look for any language pattern (e.g., .en.vtt, .es.vtt, etc.)
|
||||
if languageFileRegex.MatchString(file) {
|
||||
return []string{file}, nil
|
||||
}
|
||||
}
|
||||
|
||||
// If no language-specific files found, return the first VTT file
|
||||
return []string{vttFiles[0]}, nil
|
||||
}
|
||||
|
||||
|
||||
@@ -4,9 +4,6 @@ schema = 3
|
||||
[mod."cloud.google.com/go"]
|
||||
version = "v0.121.2"
|
||||
hash = "sha256-BCgGHxKti8slH98UDDurtgzX3lgcYEklsmj4ImPpwlc="
|
||||
[mod."cloud.google.com/go/ai"]
|
||||
version = "v0.12.1"
|
||||
hash = "sha256-wg3oLMS68E/v7EdNzywbjwEmpk+u6U8LTnIc1pq8edo="
|
||||
[mod."cloud.google.com/go/auth"]
|
||||
version = "v0.16.2"
|
||||
hash = "sha256-BAU9WGFKe0pd5Eu3l/Mbts+QeCOjS+lChr5hrPBCzdA="
|
||||
@@ -16,9 +13,6 @@ schema = 3
|
||||
[mod."cloud.google.com/go/compute/metadata"]
|
||||
version = "v0.7.0"
|
||||
hash = "sha256-jJZDW+hibqjMiY8OiJhgJALbGwEq+djLOxfYR7upQyE="
|
||||
[mod."cloud.google.com/go/longrunning"]
|
||||
version = "v0.6.7"
|
||||
hash = "sha256-9I0Nc2KWAEVoxDngNkqFUdASmZIAySfMEELlPh3Q3xA="
|
||||
[mod."dario.cat/mergo"]
|
||||
version = "v1.0.2"
|
||||
hash = "sha256-p6jdiHlLEfZES8vJnDywG4aVzIe16p0CU6iglglIweA="
|
||||
@@ -32,8 +26,8 @@ schema = 3
|
||||
version = "v1.3.3"
|
||||
hash = "sha256-jv7ZshpSd7FZzKKN6hqlUgiR8C3y85zNIS/hq7g76Ho="
|
||||
[mod."github.com/anthropics/anthropic-sdk-go"]
|
||||
version = "v1.4.0"
|
||||
hash = "sha256-4kwFw9gt/sRIlTo0fC2PbfLnCyc4lCOtmfQelhpORX8="
|
||||
version = "v1.9.1"
|
||||
hash = "sha256-1saDnM1DMnDLHT4RoA/EFuOvW7CIFh2tkfOJ1/+itNc="
|
||||
[mod."github.com/araddon/dateparse"]
|
||||
version = "v0.0.0-20210429162001-6b43995a97de"
|
||||
hash = "sha256-UuX84naeRGMsFOgIgRoBHG5sNy1CzBkWPKmd6VbLwFw="
|
||||
@@ -163,9 +157,9 @@ schema = 3
|
||||
[mod."github.com/golang/groupcache"]
|
||||
version = "v0.0.0-20241129210726-2c02b8208cf8"
|
||||
hash = "sha256-AdLZ3dJLe/yduoNvZiXugZxNfmwJjNQyQGsIdzYzH74="
|
||||
[mod."github.com/google/generative-ai-go"]
|
||||
version = "v0.20.1"
|
||||
hash = "sha256-9bSpEs4kByhgyTKiHdOY5muYjGBTluA1LvEjw2gSoLI="
|
||||
[mod."github.com/google/go-cmp"]
|
||||
version = "v0.7.0"
|
||||
hash = "sha256-JbxZFBFGCh/Rj5XZ1vG94V2x7c18L8XKB0N9ZD5F2rM="
|
||||
[mod."github.com/google/go-github/v66"]
|
||||
version = "v66.0.0"
|
||||
hash = "sha256-o4usfbApXwTuwIFMECagJwK2H4UMJbCpdyGdWZ5VUpI="
|
||||
@@ -184,6 +178,9 @@ schema = 3
|
||||
[mod."github.com/googleapis/gax-go/v2"]
|
||||
version = "v2.14.2"
|
||||
hash = "sha256-QyY7wuCkrOJCJIf9Q884KD/BC3vk/QtQLXeLeNPt750="
|
||||
[mod."github.com/gorilla/websocket"]
|
||||
version = "v1.5.3"
|
||||
hash = "sha256-vTIGEFMEi+30ZdO6ffMNJ/kId6pZs5bbyqov8xe9BM0="
|
||||
[mod."github.com/hasura/go-graphql-client"]
|
||||
version = "v0.14.4"
|
||||
hash = "sha256-TBNYIfC2CI0cVu7aZcHSWc6ZkgdkWSSfoCXqoAJT8jw="
|
||||
@@ -202,6 +199,9 @@ schema = 3
|
||||
[mod."github.com/json-iterator/go"]
|
||||
version = "v1.1.12"
|
||||
hash = "sha256-To8A0h+lbfZ/6zM+2PpRpY3+L6725OPC66lffq6fUoM="
|
||||
[mod."github.com/kballard/go-shellquote"]
|
||||
version = "v0.0.0-20180428030007-95032a82bc51"
|
||||
hash = "sha256-AOEdKETBMUC39ln6jBJ9NYdJWp++jV5lSbjNqG3dV+c="
|
||||
[mod."github.com/kevinburke/ssh_config"]
|
||||
version = "v1.2.0"
|
||||
hash = "sha256-Ta7ZOmyX8gG5tzWbY2oES70EJPfI90U7CIJS9EAce0s="
|
||||
@@ -292,9 +292,6 @@ schema = 3
|
||||
[mod."go.opentelemetry.io/auto/sdk"]
|
||||
version = "v1.1.0"
|
||||
hash = "sha256-cA9qCCu8P1NSJRxgmpfkfa5rKyn9X+Y/9FSmSd5xjyo="
|
||||
[mod."go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"]
|
||||
version = "v0.61.0"
|
||||
hash = "sha256-o5w9k3VbqP3gaXI3Aelw93LLHH53U4PnkYVwc3MaY3Y="
|
||||
[mod."go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"]
|
||||
version = "v0.61.0"
|
||||
hash = "sha256-4pfXD7ErXhexSynXiEEQSAkWoPwHd7PEDE3M1Zi5gLM="
|
||||
@@ -311,8 +308,8 @@ schema = 3
|
||||
version = "v0.18.0"
|
||||
hash = "sha256-tUpUPERjmRi7zldj0oPlnbnBhEkcI9iQGvP1HqlsK10="
|
||||
[mod."golang.org/x/crypto"]
|
||||
version = "v0.39.0"
|
||||
hash = "sha256-FtwjbVoAhZkx7F2hmzi9Y0J87CVVhWcrZzun+zWQLzc="
|
||||
version = "v0.40.0"
|
||||
hash = "sha256-I6p2fqvz63P9MwAuoQrljI7IUbfZQvCem0ii4Q2zZng="
|
||||
[mod."golang.org/x/exp"]
|
||||
version = "v0.0.0-20250531010427-b6e5de432a8b"
|
||||
hash = "sha256-QaFfjyB+pogCkUkJskR9xnXwkCOU828XJRrzwwLm6Ms="
|
||||
@@ -331,12 +328,12 @@ schema = 3
|
||||
[mod."golang.org/x/text"]
|
||||
version = "v0.27.0"
|
||||
hash = "sha256-VX0rOh6L3qIvquKSGjfZQFU8URNtGvkNvxE7OZtboW8="
|
||||
[mod."golang.org/x/time"]
|
||||
version = "v0.12.0"
|
||||
hash = "sha256-Cp3oxrCMH2wyxjzr5SHVmyhgaoUuSl56Uy00Q7DYEpw="
|
||||
[mod."google.golang.org/api"]
|
||||
version = "v0.236.0"
|
||||
hash = "sha256-tP1RSUSnQ4a0axgZQwEZgKF1E13nL02FSP1NPSZr0Rc="
|
||||
[mod."google.golang.org/genai"]
|
||||
version = "v1.17.0"
|
||||
hash = "sha256-Iw09DYpWuGR8E++dsFCBs702oKJPZLBEEGv0g4a4AhA="
|
||||
[mod."google.golang.org/genproto/googleapis/api"]
|
||||
version = "v0.0.0-20250603155806-513f23925822"
|
||||
hash = "sha256-0CS432v9zVhkVLqFpZtxBX8rvVqP67lb7qQ3es7RqIU="
|
||||
|
||||
@@ -1 +1 @@
|
||||
"1.4.259"
|
||||
"1.4.287"
|
||||
|
||||
116
scripts/docker-test/README.md
Normal file
116
scripts/docker-test/README.md
Normal file
@@ -0,0 +1,116 @@
|
||||
# Docker Test Environment for API Configuration Fix
|
||||
|
||||
This directory contains a Docker-based testing setup for fixing the issue where Fabric calls Ollama and Bedrock APIs even when not configured. This addresses the problem where unconfigured services show error messages during model listing.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
./scripts/docker-test/test-runner.sh
|
||||
|
||||
# Interactive mode - pick which test to run
|
||||
./scripts/docker-test/test-runner.sh -i
|
||||
|
||||
# Run specific test case
|
||||
./scripts/docker-test/test-runner.sh gemini-only
|
||||
|
||||
# Shell into test environment
|
||||
./scripts/docker-test/test-runner.sh -s gemini-only
|
||||
|
||||
# Build image only (for development)
|
||||
./scripts/docker-test/test-runner.sh -b
|
||||
|
||||
# Show help
|
||||
./scripts/docker-test/test-runner.sh -h
|
||||
```
|
||||
|
||||
## Test Cases
|
||||
|
||||
1. **no-config**: No APIs configured
|
||||
2. **gemini-only**: Only Gemini configured (reproduces original issue #1195)
|
||||
3. **openai-only**: Only OpenAI configured
|
||||
4. **ollama-only**: Only Ollama configured
|
||||
5. **bedrock-only**: Only Bedrock configured
|
||||
6. **mixed**: Multiple APIs configured (Gemini + OpenAI + Ollama)
|
||||
|
||||
## Environment Files
|
||||
|
||||
Each test case has a corresponding environment file in `scripts/docker-test/env/`:
|
||||
|
||||
- `env.no-config` - Empty configuration
|
||||
- `env.gemini-only` - Only Gemini API key
|
||||
- `env.openai-only` - Only OpenAI API key
|
||||
- `env.ollama-only` - Only Ollama URL
|
||||
- `env.bedrock-only` - Only Bedrock configuration
|
||||
- `env.mixed` - Multiple API configurations
|
||||
|
||||
These files are volume-mounted into the Docker container and persist changes made with `fabric -S`.
|
||||
|
||||
## Interactive Mode & Shell Access
|
||||
|
||||
The interactive mode (`-i`) provides several options:
|
||||
|
||||
```text
|
||||
Available test cases:
|
||||
|
||||
1) No APIs configured (no-config)
|
||||
2) Only Gemini configured (gemini-only)
|
||||
3) Only OpenAI configured (openai-only)
|
||||
4) Only Ollama configured (ollama-only)
|
||||
5) Only Bedrock configured (bedrock-only)
|
||||
6) Mixed configuration (mixed)
|
||||
7) Run all tests
|
||||
0) Exit
|
||||
|
||||
Add '!' after number to shell into test environment (e.g., '1!' to shell into no-config)
|
||||
```
|
||||
|
||||
### Shell Mode
|
||||
|
||||
- Use `1!`, `2!`, etc. to shell into any test environment
|
||||
- Run `fabric -S` to configure APIs interactively
|
||||
- Run `fabric --listmodels` or `fabric -L` to test model listing
|
||||
- Changes persist in the environment files
|
||||
- Type `exit` to return to test runner
|
||||
|
||||
## Expected Results
|
||||
|
||||
**Before Fix:**
|
||||
|
||||
- `no-config` and `gemini-only` tests show Ollama connection errors
|
||||
- Tests show Bedrock authentication errors when BEDROCK_AWS_REGION not set
|
||||
- Error: `Ollama Get "http://localhost:11434/api/tags": dial tcp...`
|
||||
- Error: `Bedrock failed to list foundation models...`
|
||||
|
||||
**After Fix:**
|
||||
|
||||
- Clean output with no error messages for unconfigured services
|
||||
- Only configured services appear in model listings
|
||||
- Ollama only initialized when `OLLAMA_API_URL` is set
|
||||
- Bedrock only initialized when `BEDROCK_AWS_REGION` is set
|
||||
|
||||
## Implementation Details
|
||||
|
||||
- **Volume-mounted configs**: Environment files are mounted to `/home/testuser/.config/fabric/.env`
|
||||
- **Persistent state**: Configuration changes survive between test runs
|
||||
- **Single Docker image**: Built once from `scripts/docker-test/base/Dockerfile`, reused for all tests
|
||||
- **Isolated environments**: Each test uses its own environment file
|
||||
- **Cross-platform**: Works on macOS, Linux, and Windows with Docker
|
||||
|
||||
## Development Workflow
|
||||
|
||||
1. Make code changes to fix API initialization logic
|
||||
2. Run `./scripts/docker-test/test-runner.sh no-config` to test the main issue
|
||||
3. Use `./scripts/docker-test/test-runner.sh -i` for interactive testing
|
||||
4. Shell into environments (`1!`, `2!`, etc.) to debug specific configurations
|
||||
5. Run all tests before submitting PR: `./scripts/docker-test/test-runner.sh`
|
||||
|
||||
## Architecture
|
||||
|
||||
The fix involves:
|
||||
|
||||
1. **Ollama**: Override `IsConfigured()` method to check for `OLLAMA_API_URL` env var
|
||||
2. **Bedrock**: Modify `hasAWSCredentials()` to require `BEDROCK_AWS_REGION`
|
||||
3. **Plugin Registry**: Only initialize providers when properly configured
|
||||
|
||||
This prevents unnecessary API calls and eliminates confusing error messages for users.
|
||||
30
scripts/docker-test/base/Dockerfile
Normal file
30
scripts/docker-test/base/Dockerfile
Normal file
@@ -0,0 +1,30 @@
|
||||
FROM golang:1.24-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
COPY ./cmd/fabric ./cmd/fabric
|
||||
COPY ./internal ./internal
|
||||
RUN go build -o fabric ./cmd/fabric
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates
|
||||
|
||||
# Create a test user
|
||||
RUN adduser -D -s /bin/sh testuser
|
||||
|
||||
# Switch to test user
|
||||
USER testuser
|
||||
WORKDIR /home/testuser
|
||||
|
||||
# Set environment variables for the test user
|
||||
ENV HOME=/home/testuser
|
||||
ENV USER=testuser
|
||||
|
||||
COPY --from=builder /app/fabric .
|
||||
|
||||
# Create fabric config directory and empty .env file
|
||||
RUN mkdir -p .config/fabric && touch .config/fabric/.env
|
||||
|
||||
ENTRYPOINT ["./fabric"]
|
||||
235
scripts/docker-test/test-runner.sh
Executable file
235
scripts/docker-test/test-runner.sh
Executable file
@@ -0,0 +1,235 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# Get the directory where this script is located
|
||||
top_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
base_name="$(basename "$top_dir")"
|
||||
cd "$top_dir"/../.. || exit 1
|
||||
|
||||
# Check if bash version supports associative arrays
|
||||
if [[ ${BASH_VERSION%%.*} -lt 4 ]]; then
|
||||
echo "This script requires bash 4.0 or later for associative arrays."
|
||||
echo "Current version: $BASH_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IMAGE_NAME="fabric-test-setup"
|
||||
ENV_DIR="scripts/${base_name}/env"
|
||||
|
||||
# Test case descriptions
|
||||
declare -A test_descriptions=(
|
||||
["no-config"]="No APIs configured"
|
||||
["gemini-only"]="Only Gemini configured (reproduces original issue)"
|
||||
["openai-only"]="Only OpenAI configured"
|
||||
["ollama-only"]="Only Ollama configured"
|
||||
["bedrock-only"]="Only Bedrock configured"
|
||||
["mixed"]="Mixed configuration (Gemini + OpenAI + Ollama)"
|
||||
)
|
||||
|
||||
# Test case order for consistent display
|
||||
test_order=("no-config" "gemini-only" "openai-only" "ollama-only" "bedrock-only" "mixed")
|
||||
|
||||
build_image() {
|
||||
echo "=== Building Docker image ==="
|
||||
docker build -f "${top_dir}/base/Dockerfile" -t "$IMAGE_NAME" .
|
||||
echo
|
||||
}
|
||||
|
||||
check_env_file() {
|
||||
local test_name="$1"
|
||||
local env_file="$ENV_DIR/env.$test_name"
|
||||
|
||||
if [[ ! -f "$env_file" ]]; then
|
||||
echo "Error: Environment file not found: $env_file"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
run_test() {
|
||||
local test_name="$1"
|
||||
local description="${test_descriptions[$test_name]}"
|
||||
local env_file="$ENV_DIR/env.$test_name"
|
||||
|
||||
check_env_file "$test_name"
|
||||
|
||||
echo "===================="
|
||||
echo "Test: $description"
|
||||
echo "Config: $test_name"
|
||||
echo "Env file: $env_file"
|
||||
echo "===================="
|
||||
|
||||
echo "Running test..."
|
||||
if docker run --rm \
|
||||
-e HOME=/home/testuser \
|
||||
-e USER=testuser \
|
||||
-v "$(pwd)/$env_file:/home/testuser/.config/fabric/.env:ro" \
|
||||
"$IMAGE_NAME" --listmodels 2>&1; then
|
||||
echo "✅ Test completed"
|
||||
else
|
||||
echo "❌ Test failed"
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
shell_into_env() {
|
||||
local test_name="$1"
|
||||
local description="${test_descriptions[$test_name]}"
|
||||
local env_file="$ENV_DIR/env.$test_name"
|
||||
|
||||
check_env_file "$test_name"
|
||||
|
||||
echo "===================="
|
||||
echo "Shelling into: $description"
|
||||
echo "Config: $test_name"
|
||||
echo "Env file: $env_file"
|
||||
echo "===================="
|
||||
echo "You can now run 'fabric -S' to configure, or 'fabric --listmodels' or 'fabric -L' to test."
|
||||
echo "Changes to .env will persist in $env_file"
|
||||
echo "Type 'exit' to return to the test runner."
|
||||
echo
|
||||
|
||||
docker run -it --rm \
|
||||
-e HOME=/home/testuser \
|
||||
-e USER=testuser \
|
||||
-v "$(pwd)/$env_file:/home/testuser/.config/fabric/.env" \
|
||||
--entrypoint=/bin/sh \
|
||||
"$IMAGE_NAME"
|
||||
}
|
||||
|
||||
interactive_mode() {
|
||||
echo "=== Interactive Mode ==="
|
||||
echo "Available test cases:"
|
||||
echo
|
||||
local i=1
|
||||
local cases=()
|
||||
for test_name in "${test_order[@]}"; do
|
||||
echo "$i) ${test_descriptions[$test_name]} ($test_name)"
|
||||
cases[i]="$test_name"
|
||||
((i++))
|
||||
done
|
||||
echo "$i) Run all tests"
|
||||
echo "0) Exit"
|
||||
echo
|
||||
echo "Add '!' after number to shell into test environment (e.g., '1!' to shell into no-config)"
|
||||
echo
|
||||
|
||||
while true; do
|
||||
read -r -p "Select test case (0-$i) [or 1!, etc. to shell into test environment]: " choice
|
||||
|
||||
# Check for shell mode (! suffix)
|
||||
local shell_mode=false
|
||||
if [[ "$choice" == *"!" ]]; then
|
||||
shell_mode=true
|
||||
choice="${choice%!}" # Remove the ! suffix
|
||||
fi
|
||||
|
||||
if [[ "$choice" == "0" ]]; then
|
||||
if [[ "$shell_mode" == true ]]; then
|
||||
echo "Cannot shell into exit option."
|
||||
continue
|
||||
fi
|
||||
echo "Exiting..."
|
||||
exit 0
|
||||
elif [[ "$choice" == "$i" ]]; then
|
||||
if [[ "$shell_mode" == true ]]; then
|
||||
echo "Cannot shell into 'run all tests' option."
|
||||
continue
|
||||
fi
|
||||
echo "Running all tests..."
|
||||
run_all_tests
|
||||
break
|
||||
elif [[ "$choice" -ge 1 && "$choice" -lt "$i" ]]; then
|
||||
local selected_test="${cases[$choice]}"
|
||||
if [[ "$shell_mode" == true ]]; then
|
||||
echo "Shelling into: ${test_descriptions[$selected_test]}"
|
||||
shell_into_env "$selected_test"
|
||||
else
|
||||
echo "Running: ${test_descriptions[$selected_test]}"
|
||||
run_test "$selected_test"
|
||||
fi
|
||||
|
||||
read -r -p "Continue testing? (y/n): " again
|
||||
if [[ "$again" != "y" && "$again" != "Y" ]]; then
|
||||
break
|
||||
fi
|
||||
echo
|
||||
else
|
||||
echo "Invalid choice. Please select 0-$i (optionally with '!' for shell mode)."
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
run_all_tests() {
|
||||
echo "=== Testing PR #1645: Conditional API initialization ==="
|
||||
echo
|
||||
|
||||
for test_name in "${test_order[@]}"; do
|
||||
run_test "$test_name"
|
||||
done
|
||||
|
||||
echo "=== Test run complete ==="
|
||||
echo "Review the output above to check:"
|
||||
echo "1. No Ollama connection errors when OLLAMA_URL not set"
|
||||
echo "2. No Bedrock authentication errors when BEDROCK_AWS_REGION not set"
|
||||
echo "3. Only configured services appear in model listings"
|
||||
}
|
||||
|
||||
show_help() {
|
||||
echo "Usage: $0 [OPTIONS] [TEST_CASE]"
|
||||
echo
|
||||
echo "Test PR #1645 conditional API initialization"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -h, --help Show this help message"
|
||||
echo " -i, --interactive Run in interactive mode"
|
||||
echo " -b, --build-only Build image only, don't run tests"
|
||||
echo " -s, --shell TEST Shell into test environment"
|
||||
echo
|
||||
echo "Test cases:"
|
||||
for test_name in "${test_order[@]}"; do
|
||||
echo " $test_name: ${test_descriptions[$test_name]}"
|
||||
done
|
||||
echo
|
||||
echo "Examples:"
|
||||
echo " $0 # Run all tests"
|
||||
echo " $0 -i # Interactive mode"
|
||||
echo " $0 gemini-only # Run specific test"
|
||||
echo " $0 -s gemini-only # Shell into gemini-only environment"
|
||||
echo " $0 -b # Build image only"
|
||||
echo
|
||||
echo "Environment files are located in $ENV_DIR/ and can be edited directly."
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
if [[ $# -eq 0 ]]; then
|
||||
build_image
|
||||
run_all_tests
|
||||
elif [[ "$1" == "-h" || "$1" == "--help" ]]; then
|
||||
show_help
|
||||
elif [[ "$1" == "-i" || "$1" == "--interactive" ]]; then
|
||||
build_image
|
||||
interactive_mode
|
||||
elif [[ "$1" == "-b" || "$1" == "--build-only" ]]; then
|
||||
build_image
|
||||
elif [[ "$1" == "-s" || "$1" == "--shell" ]]; then
|
||||
if [[ -z "$2" ]]; then
|
||||
echo "Error: -s/--shell requires a test case name"
|
||||
echo "Use -h for help."
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${test_descriptions[$2]}" ]]; then
|
||||
echo "Error: Unknown test case: $2"
|
||||
echo "Use -h for help."
|
||||
exit 1
|
||||
fi
|
||||
build_image
|
||||
shell_into_env "$2"
|
||||
elif [[ -n "${test_descriptions[$1]}" ]]; then
|
||||
build_image
|
||||
run_test "$1"
|
||||
else
|
||||
echo "Unknown test case or option: $1"
|
||||
echo "Use -h for help."
|
||||
exit 1
|
||||
fi
|
||||
99
scripts/readme_updates/README.md
Normal file
99
scripts/readme_updates/README.md
Normal file
@@ -0,0 +1,99 @@
|
||||
# README Update Scripts
|
||||
|
||||
This directory contains automation scripts for updating the main README.md file with release information from the changelog database.
|
||||
|
||||
## `update_readme_features.py`
|
||||
|
||||
A Python script that generates the "Recent Major Features" section for the README by extracting and filtering release information from the changelog SQLite database.
|
||||
|
||||
### Usage
|
||||
|
||||
```bash
|
||||
# Generate the Recent Major Features section with default limit (20 releases)
|
||||
python scripts/readme_updates/update_readme_features.py
|
||||
|
||||
# Specify a custom limit
|
||||
python scripts/readme_updates/update_readme_features.py --limit 15
|
||||
|
||||
# Use a custom database path
|
||||
python scripts/readme_updates/update_readme_features.py --db /path/to/changelog.db
|
||||
```
|
||||
|
||||
### How It Works
|
||||
|
||||
1. **Database Connection**: Connects to `cmd/generate_changelog/changelog.db` (or custom path)
|
||||
2. **Data Extraction**: Queries the `versions` table for release information
|
||||
3. **Feature Filtering**: Uses heuristics to identify feature/improvement releases
|
||||
4. **Markdown Generation**: Formats output to match README style
|
||||
|
||||
### Feature Detection Heuristics
|
||||
|
||||
The script uses keyword-based heuristics to filter releases:
|
||||
|
||||
#### Include Keywords (Features/Improvements)
|
||||
- new, feature, feat, add, introduce, enable, support
|
||||
- improve, enhance, performance, speed
|
||||
- option, flag, argument, parameter
|
||||
- integration, provider, search, tts, audio, model
|
||||
- cli, ui, web, oauth, sync, database
|
||||
- notifications, desktop, reasoning, thinking
|
||||
|
||||
#### Exclude Keywords (Non-Features)
|
||||
- fix, bug, hotfix
|
||||
- ci, cd, pipeline, chore
|
||||
- docs, readme, refactor, style, typo
|
||||
- test, bump, deps, dependency
|
||||
- merge, revert, format, lint, build
|
||||
- release, prepare, coverage, security
|
||||
|
||||
### Integration with README
|
||||
|
||||
To update the README with new release features:
|
||||
|
||||
```bash
|
||||
# Generate the features and save to a temporary file
|
||||
python scripts/readme_updates/update_readme_features.py --limit 20 > /tmp/recent_features.md
|
||||
|
||||
# Manually replace the "### Recent Major Features" section in README.md
|
||||
# with the generated content
|
||||
```
|
||||
|
||||
### Database Schema
|
||||
|
||||
The script expects the following SQLite table structure:
|
||||
|
||||
```sql
|
||||
CREATE TABLE versions (
|
||||
name TEXT PRIMARY KEY,
|
||||
date DATETIME,
|
||||
commit_sha TEXT,
|
||||
pr_numbers TEXT,
|
||||
ai_summary TEXT,
|
||||
created_at DATETIME DEFAULT CURRENT_TIMESTAMP
|
||||
);
|
||||
```
|
||||
|
||||
### Date Format Support
|
||||
|
||||
The script can parse various date formats:
|
||||
- ISO 8601 with timezone: `2025-08-14 14:11:04+00:00`
|
||||
- ISO 8601 basic: `2025-08-14T14:11:04`
|
||||
- Date only: `2025-08-14`
|
||||
- US format: `08/14/2025`
|
||||
|
||||
Output format is standardized to: `Aug 14, 2025`
|
||||
|
||||
### Maintenance Notes
|
||||
|
||||
- **AI Summary Format Changes**: If the format of AI summaries changes, update the `extract_title_desc()` and `split_summary()` functions
|
||||
- **Keyword Tuning**: Adjust `INCLUDE_RE` and `EXCLUDE_RE` patterns as needed
|
||||
- **Title Extraction**: The script attempts to extract concise titles from feature descriptions
|
||||
- **Description Length**: Descriptions are limited to 200 characters for readability
|
||||
|
||||
### Future Enhancements
|
||||
|
||||
Potential improvements for automated README updates:
|
||||
- Add section delimiter markers in README for automated replacement
|
||||
- Create a GitHub Action to run on new releases
|
||||
- Add support for categorizing features by type
|
||||
- Implement confidence scoring for feature detection
|
||||
281
scripts/readme_updates/update_readme_features.py
Executable file
281
scripts/readme_updates/update_readme_features.py
Executable file
@@ -0,0 +1,281 @@
|
||||
#!/usr/bin/env python3
|
||||
"""
|
||||
Generate the '### Recent Major Features' markdown section for README from the changelog SQLite DB.
|
||||
|
||||
- Connects to cmd/generate_changelog/changelog.db
|
||||
- Extracts version, date, and AI summaries from the 'versions' table
|
||||
- Heuristically filters for feature/improvement items (excludes CI/CD/docs/bug fixes)
|
||||
- Formats output to match README style:
|
||||
- [vX.Y.Z](https://github.com/danielmiessler/fabric/releases/tag/vX.Y.Z) (Aug 14, 2025) — **Feature Name**: Short description
|
||||
|
||||
Usage:
|
||||
python scripts/readme_updates/update_readme_features.py --limit 20
|
||||
"""
|
||||
|
||||
import argparse
|
||||
import sqlite3
|
||||
from pathlib import Path
|
||||
from datetime import datetime
|
||||
import re
|
||||
import sys
|
||||
from typing import List, Optional, Tuple
|
||||
|
||||
# Heuristics for filtering feature-related lines
|
||||
EXCLUDE_RE = re.compile(
|
||||
r"(?i)\b(fix|bug|hotfix|ci|cd|pipeline|chore|docs|doc|readme|refactor|style|typo|"
|
||||
"test|tests|bump|deps|dependency|merge|revert|format|lint|build|release\b|prepare|"
|
||||
"codeowners|coverage|security)\b"
|
||||
)
|
||||
INCLUDE_RE = re.compile(
|
||||
r"(?i)\b(new|feature|feat|add|added|introduce|enable|support|improve|enhance|"
|
||||
"performance|speed|option|flag|argument|parameter|integration|provider|search|tts|"
|
||||
"audio|model|cli|ui|web|oauth|sync|database|notifications|desktop|reasoning|thinking)\b"
|
||||
)
|
||||
|
||||
|
||||
def parse_args():
|
||||
"""Parse command-line arguments."""
|
||||
p = argparse.ArgumentParser(
|
||||
description="Generate README 'Recent Major Features' markdown from changelog DB."
|
||||
)
|
||||
p.add_argument(
|
||||
"--limit", type=int, default=20, help="Maximum number of releases to include."
|
||||
)
|
||||
p.add_argument(
|
||||
"--db",
|
||||
type=str,
|
||||
default=None,
|
||||
help="Optional path to changelog.db (defaults to repo cmd/generate_changelog/changelog.db)",
|
||||
)
|
||||
return p.parse_args()
|
||||
|
||||
|
||||
def repo_root() -> Path:
|
||||
"""Get the repository root directory."""
|
||||
# scripts/readme_updates/update_readme_features.py -> repo root is parent.parent.parent
|
||||
return Path(__file__).resolve().parent.parent.parent
|
||||
|
||||
|
||||
def db_path(args) -> Path:
|
||||
"""Determine the database path."""
|
||||
if args.db:
|
||||
return Path(args.db).expanduser().resolve()
|
||||
return repo_root() / "cmd" / "generate_changelog" / "changelog.db"
|
||||
|
||||
|
||||
def connect(dbfile: Path):
|
||||
"""Connect to the SQLite database."""
|
||||
if not dbfile.exists():
|
||||
print(f"ERROR: changelog database not found: {dbfile}", file=sys.stderr)
|
||||
sys.exit(1)
|
||||
return sqlite3.connect(str(dbfile))
|
||||
|
||||
|
||||
def normalize_version(name: str) -> str:
|
||||
"""Ensure version string starts with 'v'."""
|
||||
n = str(name).strip()
|
||||
return n if n.startswith("v") else f"v{n}"
|
||||
|
||||
|
||||
def parse_date(value) -> str:
|
||||
"""Parse various date formats and return formatted string."""
|
||||
if value is None:
|
||||
return "(Unknown date)"
|
||||
|
||||
# Handle the ISO format with timezone from the database
|
||||
s = str(value).strip()
|
||||
|
||||
# Try to parse the ISO format with timezone
|
||||
if "+" in s or "T" in s:
|
||||
# Remove timezone info and microseconds for simpler parsing
|
||||
s_clean = s.split("+")[0].split(".")[0]
|
||||
try:
|
||||
dt = datetime.strptime(s_clean, "%Y-%m-%d %H:%M:%S")
|
||||
return dt.strftime("%b %d, %Y").replace(" 0", " ")
|
||||
except ValueError:
|
||||
pass
|
||||
|
||||
# Fallback formats
|
||||
fmts = [
|
||||
"%Y-%m-%d",
|
||||
"%Y-%m-%d %H:%M:%S",
|
||||
"%Y-%m-%dT%H:%M:%S",
|
||||
"%Y/%m/%d",
|
||||
"%m/%d/%Y",
|
||||
]
|
||||
|
||||
for fmt in fmts:
|
||||
try:
|
||||
dt = datetime.strptime(s, fmt)
|
||||
return dt.strftime("%b %d, %Y").replace(" 0", " ")
|
||||
except ValueError:
|
||||
continue
|
||||
|
||||
# Return original if we can't parse it
|
||||
return f"({s})"
|
||||
|
||||
|
||||
def split_summary(text: str) -> List[str]:
|
||||
"""Split AI summary into individual lines/bullets."""
|
||||
if not text:
|
||||
return []
|
||||
|
||||
lines = []
|
||||
# Split by newlines first
|
||||
for line in text.split("\n"):
|
||||
line = line.strip()
|
||||
if not line:
|
||||
continue
|
||||
# Remove markdown headers
|
||||
line = re.sub(r"^#+\s+", "", line)
|
||||
# Remove PR links and author info
|
||||
line = re.sub(
|
||||
r"^PR\s+\[#\d+\]\([^)]+\)\s+by\s+\[[^\]]+\]\([^)]+\):\s*", "", line
|
||||
)
|
||||
# Remove bullet points
|
||||
line = re.sub(r"^[-*•]\s+", "", line)
|
||||
if line:
|
||||
lines.append(line)
|
||||
|
||||
return lines
|
||||
|
||||
|
||||
def is_feature_line(line: str) -> bool:
|
||||
"""Check if a line describes a feature/improvement (not a bug fix or CI/CD)."""
|
||||
line_lower = line.lower()
|
||||
|
||||
# Strong exclusions first
|
||||
if any(
|
||||
word in line_lower
|
||||
for word in ["chore:", "fix:", "docs:", "test:", "ci:", "build:", "refactor:"]
|
||||
):
|
||||
return False
|
||||
|
||||
if EXCLUDE_RE.search(line):
|
||||
return False
|
||||
|
||||
return bool(INCLUDE_RE.search(line))
|
||||
|
||||
|
||||
def extract_title_desc(line: str) -> Tuple[str, str]:
|
||||
"""Extract title and description from a feature line."""
|
||||
# Remove any markdown formatting
|
||||
line = re.sub(r"\*\*([^*]+)\*\*", r"\1", line)
|
||||
|
||||
# Look for colon separator first
|
||||
if ":" in line:
|
||||
parts = line.split(":", 1)
|
||||
if len(parts) == 2:
|
||||
title = parts[0].strip()
|
||||
desc = parts[1].strip()
|
||||
|
||||
# Clean up the title
|
||||
title = (
|
||||
title.replace("Introduce ", "")
|
||||
.replace("Enable ", "")
|
||||
.replace("Add ", "")
|
||||
)
|
||||
title = title.replace("Implement ", "").replace("Support ", "")
|
||||
|
||||
# Make title more concise
|
||||
if len(title) > 30:
|
||||
# Try to extract key words
|
||||
key_words = []
|
||||
for word in title.split():
|
||||
if word[0].isupper() or "-" in word or "_" in word:
|
||||
key_words.append(word)
|
||||
if key_words:
|
||||
title = " ".join(key_words[:3])
|
||||
|
||||
return (title, desc)
|
||||
|
||||
# Fallback: use first sentence as description
|
||||
sentences = re.split(r"[.!?]\s+", line)
|
||||
if sentences:
|
||||
desc = sentences[0].strip()
|
||||
# Extract a title from the description
|
||||
if "thinking" in desc.lower():
|
||||
return ("AI Reasoning", desc)
|
||||
elif "token" in desc.lower() and "context" in desc.lower():
|
||||
return ("Extended Context", desc)
|
||||
elif "curl" in desc.lower() or "install" in desc.lower():
|
||||
return ("Easy Setup", desc)
|
||||
elif "vendor" in desc.lower() or "model" in desc.lower():
|
||||
return ("Model Management", desc)
|
||||
elif "notification" in desc.lower():
|
||||
return ("Desktop Notifications", desc)
|
||||
elif "tts" in desc.lower() or "speech" in desc.lower():
|
||||
return ("Text-to-Speech", desc)
|
||||
elif "oauth" in desc.lower() or "auth" in desc.lower():
|
||||
return ("OAuth Auto-Auth", desc)
|
||||
elif "search" in desc.lower() and "web" in desc.lower():
|
||||
return ("Web Search", desc)
|
||||
else:
|
||||
# Generic title from first significant words
|
||||
words = desc.split()[:2]
|
||||
title = " ".join(words)
|
||||
return (title, desc)
|
||||
|
||||
return ("Feature", line)
|
||||
|
||||
|
||||
def pick_feature(ai_summary: str) -> Optional[Tuple[str, str]]:
|
||||
"""Pick the best feature line from the AI summary."""
|
||||
lines = split_summary(ai_summary)
|
||||
|
||||
# Look for the first feature line
|
||||
for line in lines:
|
||||
if is_feature_line(line):
|
||||
title, desc = extract_title_desc(line)
|
||||
# Clean up description - remove redundant info
|
||||
desc = desc[:200] if len(desc) > 200 else desc # Limit length
|
||||
return (title, desc)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def build_item(
|
||||
version: str, date_str: str, feature_title: str, feature_desc: str
|
||||
) -> str:
|
||||
"""Build a markdown list item for a release."""
|
||||
url = f"https://github.com/danielmiessler/fabric/releases/tag/{version}"
|
||||
return f"- [{version}]({url}) ({date_str}) — **{feature_title}**: {feature_desc}"
|
||||
|
||||
|
||||
def main():
|
||||
"""Main function."""
|
||||
args = parse_args()
|
||||
dbfile = db_path(args)
|
||||
conn = connect(dbfile)
|
||||
cur = conn.cursor()
|
||||
|
||||
# Query the database
|
||||
cur.execute("SELECT name, date, ai_summary FROM versions ORDER BY date DESC")
|
||||
rows = cur.fetchall()
|
||||
|
||||
items = []
|
||||
for name, date, summary in rows:
|
||||
version = normalize_version(name)
|
||||
date_fmt = parse_date(date)
|
||||
feat = pick_feature(summary or "")
|
||||
|
||||
if not feat:
|
||||
continue
|
||||
|
||||
title, desc = feat
|
||||
items.append(build_item(version, date_fmt, title, desc))
|
||||
|
||||
if len(items) >= args.limit:
|
||||
break
|
||||
|
||||
conn.close()
|
||||
|
||||
# Output the markdown
|
||||
print("### Recent Major Features")
|
||||
print()
|
||||
for item in items:
|
||||
print(item)
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
main()
|
||||
@@ -11,7 +11,7 @@ This is a web app for Fabric. It was built using [Svelte][svelte], [SkeletonUI][
|
||||
|
||||
The goal of this app is to not only provide a user interface for Fabric, but also an out-of-the-box website for those who want to get started with web development, blogging, or to just have a web interface for fabric. You can use this app as a GUI interface for Fabric, a ready to go blog-site, or a website template for your own projects.
|
||||
|
||||

|
||||

|
||||
|
||||
## Installing
|
||||
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
---
|
||||
title: README
|
||||
description: fabric is an open-source framework for augmenting humans using AI. It provides a modular framework for solving specific problems using a crowdsourced set of AI prompts that can be used anywhere.
|
||||
description: fabric is an open-source framework for augmenting humans using AI. It provides a modular framework for solving specific problems using a crowd-sourced set of AI prompts that can be used anywhere.
|
||||
aliases: Fabric/Docs
|
||||
date: 2024-1-12
|
||||
updated: 2024-11-22
|
||||
@@ -12,7 +12,6 @@ updated: 2024-11-22
|
||||
|
||||
# `fabric`
|
||||
|
||||
|
||||
<div class="justify-left flex gap-2">
|
||||
<img src="https://img.shields.io/github/languages/top/danielmiessler/fabric" alt="Github top language">
|
||||
<img src="https://img.shields.io/github/last-commit/danielmiessler/fabric" alt="GitHub last commit">
|
||||
@@ -23,10 +22,10 @@ updated: 2024-11-22
|
||||
<h4><code>fabric</code> is an open-source framework for augmenting humans using AI.</h4>
|
||||
|
||||
[Updates](#updates) •
|
||||
[What and Why](#whatandwhy) •
|
||||
[What and Why](#what-and-why) •
|
||||
[Philosophy](#philosophy) •
|
||||
[Installation](#Installation) •
|
||||
[Usage](#Usage) •
|
||||
[Installation](#installation) •
|
||||
[Usage](#usage) •
|
||||
[Examples](#examples) •
|
||||
[Just Use the Patterns](#just-use-the-patterns) •
|
||||
[Custom Patterns](#custom-patterns) •
|
||||
@@ -42,8 +41,8 @@ updated: 2024-11-22
|
||||
- [`fabric`](#fabric)
|
||||
- [Navigation](#navigation)
|
||||
- [Updates](#updates)
|
||||
- [Intro videos](#intro-videos)
|
||||
- [What and why](#what-and-why)
|
||||
- [Intro videos](#intro-videos)
|
||||
- [Philosophy](#philosophy)
|
||||
- [Breaking problems into components](#breaking-problems-into-components)
|
||||
- [Too many prompts](#too-many-prompts)
|
||||
@@ -65,7 +64,9 @@ updated: 2024-11-22
|
||||
- [`to_pdf`](#to_pdf)
|
||||
- [`to_pdf` Installation](#to_pdf-installation)
|
||||
- [pbpaste](#pbpaste)
|
||||
- [Web Interface](#Web_Interface)
|
||||
- [Web Interface](#web-interface)
|
||||
- [Installing](#installing)
|
||||
- [Streamlit UI](#streamlit-ui)
|
||||
- [Meta](#meta)
|
||||
- [Primary contributors](#primary-contributors)
|
||||
|
||||
@@ -76,7 +77,7 @@ updated: 2024-11-22
|
||||
> [!NOTE]
|
||||
> November 8, 2024
|
||||
>
|
||||
> - **Multimodal Support**: You can now use `-a` (attachment) for Multimodal submissions to OpenAI models that support it. Example: `fabric -a https://path/to/image "Give me a description of this image."`
|
||||
> - **Multi-modal Support**: You can now use `-a` (attachment) for Multi-modal submissions to OpenAI models that support it. Example: `fabric -a https://path/to/image "Give me a description of this image."`
|
||||
|
||||
## What and why
|
||||
|
||||
@@ -90,7 +91,7 @@ Fabric was created to address this by enabling everyone to granularly apply AI t
|
||||
|
||||
## Intro videos
|
||||
|
||||
Keep in mind that many of these were recorded when Fabric was Python-based, so remember to use the current [install instructions](#Installation) below.
|
||||
Keep in mind that many of these were recorded when Fabric was Python-based, so remember to use the current [install instructions](#installation) below.
|
||||
|
||||
- [Network Chuck](https://www.youtube.com/watch?v=UbDyjIIGaxQ)
|
||||
- [David Bombal](https://www.youtube.com/watch?v=vF-MQmVxnCs)
|
||||
@@ -223,7 +224,7 @@ This also creates a `yt` alias that allows you to use `yt https://www.youtube.co
|
||||
|
||||
#### Save your files in markdown using aliases
|
||||
|
||||
If in addition to the above aliases you would like to have the option to save the output to your favourite markdown note vault like Obsidian then instead of the above add the following to your `.zshrc` or `.bashrc` file:
|
||||
If in addition to the above aliases you would like to have the option to save the output to your favorite markdown note vault like Obsidian then instead of the above add the following to your `.zshrc` or `.bashrc` file:
|
||||
|
||||
```bash
|
||||
# Define the base directory for Obsidian notes
|
||||
@@ -281,7 +282,7 @@ go install github.com/danielmiessler/fabric@latest
|
||||
fabric --setup
|
||||
```
|
||||
|
||||
Then [set your environmental variables](#environmental-variables) as shown above.
|
||||
Then [set your environmental variables](#environment-variables) as shown above.
|
||||
|
||||
### Upgrading
|
||||
|
||||
@@ -324,6 +325,7 @@ Application Options:
|
||||
-U, --updatepatterns Update patterns
|
||||
-c, --copy Copy to clipboard
|
||||
-m, --model= Choose model
|
||||
-V, --vendor= Specify vendor for chosen model (e.g., -V "LM Studio" -m openai/gpt-oss-20b)
|
||||
-o, --output= Output to file
|
||||
--output-session Output the entire session (also a temporary one) to the output file
|
||||
-n, --latest= Number of latest patterns to list (default: 0)
|
||||
@@ -375,21 +377,21 @@ Now let's look at some things you can do with Fabric.
|
||||
|
||||
1. Run the `summarize` Pattern based on input from `stdin`. In this case, the body of an article.
|
||||
|
||||
```bash
|
||||
pbpaste | fabric --pattern summarize
|
||||
```
|
||||
```bash
|
||||
pbpaste | fabric --pattern summarize
|
||||
```
|
||||
|
||||
2. Run the `analyze_claims` Pattern with the `--stream` option to get immediate and streaming results.
|
||||
|
||||
```bash
|
||||
pbpaste | fabric --stream --pattern analyze_claims
|
||||
```
|
||||
```bash
|
||||
pbpaste | fabric --stream --pattern analyze_claims
|
||||
```
|
||||
|
||||
3. Run the `extract_wisdom` Pattern with the `--stream` option to get immediate and streaming results from any Youtube video (much like in the original introduction video).
|
||||
|
||||
```bash
|
||||
fabric -y "https://youtube.com/watch?v=uXs-zPc63kM" --stream --pattern extract_wisdom
|
||||
```
|
||||
```bash
|
||||
fabric -y "https://youtube.com/watch?v=uXs-zPc63kM" --stream --pattern extract_wisdom
|
||||
```
|
||||
|
||||
4. Create patterns- you must create a .md file with the pattern and save it to ~/.config/fabric/patterns/[yourpatternname].
|
||||
|
||||
@@ -414,11 +416,7 @@ You may want to use Fabric to create your own custom Patterns—but not share th
|
||||
|
||||
Just make a directory in `~/.config/custompatterns/` (or wherever) and put your `.md` files in there.
|
||||
|
||||
When you're ready to use them, copy them into:
|
||||
|
||||
```
|
||||
~/.config/fabric/patterns/
|
||||
```
|
||||
When you're ready to use them, copy them into: `~/.config/fabric/patterns/`
|
||||
|
||||
You can then use them like any other Patterns, but they won't be public unless you explicitly submit them as Pull Requests to the Fabric project. So don't worry—they're private to you.
|
||||
|
||||
@@ -462,7 +460,7 @@ The [examples](#examples) use the macOS program `pbpaste` to paste content from
|
||||
|
||||
On Windows, you can use the PowerShell command `Get-Clipboard` from a PowerShell command prompt. If you like, you can also alias it to `pbpaste`. If you are using classic PowerShell, edit the file `~\Documents\WindowsPowerShell\.profile.ps1`, or if you are using PowerShell Core, edit `~\Documents\PowerShell\.profile.ps1` and add the alias,
|
||||
|
||||
```
|
||||
```powershell
|
||||
Set-Alias pbpaste Get-Clipboard
|
||||
```
|
||||
|
||||
@@ -481,17 +479,19 @@ alias pbpaste='xclip -selection clipboard -o'
|
||||
|
||||
## Web Interface
|
||||
|
||||
Fabric now includes a built-in web interface that provides a GUI alternative to the command-line interface and an out-of-the-box website for those who want to get started with web development or blogging.
|
||||
You can use this app as a GUI interface for Fabric, a ready to go blog-site, or a website template for your own projects.
|
||||
Fabric now includes a built-in web interface that provides a GUI alternative to the command-line interface and an out-of-the-box website for those who want to get started with web development or blogging.
|
||||
You can use this app as a GUI interface for Fabric, a ready to go blog-site, or a website template for your own projects.
|
||||
|
||||
The `web/src/lib/content` directory includes starter `.obsidian/` and `templates/` directories, allowing you to open up the `web/src/lib/content/` directory as an [Obsidian.md](https://obsidian.md) vault. You can place your posts in the posts directory when you're ready to publish.
|
||||
|
||||
The `web/src/lib/content` directory includes starter `.obsidian/` and `templates/` directories, allowing you to open up the `web/src/lib/content/` directory as an [Obsidian.md](https://obsidian.md) vault. You can place your posts in the posts directory when you're ready to publish.
|
||||
### Installing
|
||||
|
||||
The GUI can be installed by navigating to the `web` directory and using `npm install`, `pnpm install`, or your favorite package manager. Then simply run the development server to start the app.
|
||||
The GUI can be installed by navigating to the `web` directory and using `npm install`, `pnpm install`, or your favorite package manager. Then simply run the development server to start the app.
|
||||
|
||||
_You will need to run fabric in a separate terminal with the `fabric --serve` command._
|
||||
_You will need to run fabric in a separate terminal with the `fabric --serve` command._
|
||||
|
||||
**From the fabric project `web/` directory:**
|
||||
|
||||
```shell
|
||||
npm run dev
|
||||
|
||||
@@ -499,7 +499,7 @@ npm run dev
|
||||
|
||||
pnpm run dev
|
||||
|
||||
## or your equivalent
|
||||
## or your equivalent
|
||||
```
|
||||
|
||||
### Streamlit UI
|
||||
@@ -515,10 +515,12 @@ streamlit run streamlit.py
|
||||
```
|
||||
|
||||
The Streamlit UI provides a user-friendly interface for:
|
||||
|
||||
- Running and chaining patterns
|
||||
- Managing pattern outputs
|
||||
- Creating and editing patterns
|
||||
- Analyzing pattern results
|
||||
|
||||
## Meta
|
||||
|
||||
> [!NOTE]
|
||||
|
||||
@@ -44,7 +44,7 @@ export default defineConfig({
|
||||
'/api': {
|
||||
target: FABRIC_BASE_URL,
|
||||
changeOrigin: true,
|
||||
timeout: 30000,
|
||||
timeout: 900000,
|
||||
rewrite: (path) => path.replace(/^\/api/, ''),
|
||||
configure: (proxy, _options) => {
|
||||
proxy.on('error', (err, req, res) => {
|
||||
@@ -59,7 +59,7 @@ export default defineConfig({
|
||||
'^/(patterns|models|sessions)/names': {
|
||||
target: FABRIC_BASE_URL,
|
||||
changeOrigin: true,
|
||||
timeout: 30000,
|
||||
timeout: 900000,
|
||||
configure: (proxy, _options) => {
|
||||
proxy.on('error', (err, req, res) => {
|
||||
console.log('proxy error', err);
|
||||
|
||||
Reference in New Issue
Block a user