mirror of
https://github.com/danielmiessler/Fabric.git
synced 2026-01-09 22:38:10 -05:00
Compare commits
47 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
65dae9bb85 | ||
|
|
cbd88f6314 | ||
|
|
651c5743f1 | ||
|
|
a68e63bc49 | ||
|
|
cab51f06df | ||
|
|
20080fcb78 | ||
|
|
a46f189def | ||
|
|
3f8ca72010 | ||
|
|
f58f20bcd0 | ||
|
|
70f8c013f3 | ||
|
|
8f6e2a3d4a | ||
|
|
fad176a0a8 | ||
|
|
dd213eb965 | ||
|
|
d205dbcdac | ||
|
|
f8ff9129b5 | ||
|
|
f9d01b5ebb | ||
|
|
2c7f4753a2 | ||
|
|
9b261b9adf | ||
|
|
a23b6d518f | ||
|
|
bc73bdb704 | ||
|
|
f22c144786 | ||
|
|
eb759251ad | ||
|
|
19b512c3ab | ||
|
|
a4ce90970a | ||
|
|
8d2fda3af9 | ||
|
|
aa59d58deb | ||
|
|
d209ee38c7 | ||
|
|
c20be027fe | ||
|
|
3ef3509bfd | ||
|
|
7142b020ef | ||
|
|
1b9f07b525 | ||
|
|
dcfc94ca07 | ||
|
|
0e85861a46 | ||
|
|
7c5a040287 | ||
|
|
08eb48c2e7 | ||
|
|
e40d4e6623 | ||
|
|
51bd1ebadf | ||
|
|
d3de731967 | ||
|
|
458b0a5e1c | ||
|
|
b8f64bd554 | ||
|
|
1622a34331 | ||
|
|
6b9f4c1fb8 | ||
|
|
4d2061a641 | ||
|
|
713f6e46fe | ||
|
|
efadc81974 | ||
|
|
ea54f60dcc | ||
|
|
4008125e37 |
3
.github/pull_request_template.md
vendored
3
.github/pull_request_template.md
vendored
@@ -1,9 +1,12 @@
|
||||
## What this Pull Request (PR) does
|
||||
|
||||
Please briefly describe what this PR does.
|
||||
|
||||
## Related issues
|
||||
|
||||
Please reference any open issues this PR relates to in here.
|
||||
If it closes an issue, type `closes #[ISSUE_NUMBER]`.
|
||||
|
||||
## Screenshots
|
||||
|
||||
Provide any screenshots you may find relevant to facilitate us understanding your PR.
|
||||
|
||||
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@@ -20,13 +20,13 @@ jobs:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Install Nix
|
||||
uses: DeterminateSystems/nix-installer-action@main
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: ./go.mod
|
||||
|
||||
|
||||
11
.github/workflows/patterns.yaml
vendored
11
.github/workflows/patterns.yaml
vendored
@@ -11,22 +11,27 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Verify Changes in Patterns Folder
|
||||
id: check-changes
|
||||
run: |
|
||||
git fetch origin
|
||||
if git diff --quiet HEAD~1 -- data/patterns; then
|
||||
echo "No changes detected in patterns folder."
|
||||
exit 1
|
||||
echo "changes=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Changes detected in patterns folder."
|
||||
echo "changes=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Zip the Patterns Folder
|
||||
if: steps.check-changes.outputs.changes == 'true'
|
||||
run: zip -r patterns.zip data/patterns/
|
||||
|
||||
- name: Upload Patterns Artifact
|
||||
if: steps.check-changes.outputs.changes == 'true'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: patterns
|
||||
|
||||
142
.github/workflows/release.yml
vendored
142
.github/workflows/release.yml
vendored
@@ -15,149 +15,43 @@ jobs:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: ./go.mod
|
||||
|
||||
- name: Run tests
|
||||
run: go test -v ./...
|
||||
|
||||
get_version:
|
||||
name: Get version
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
latest_tag: ${{ steps.get_version.outputs.latest_tag }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get version from source
|
||||
id: get_version
|
||||
shell: bash
|
||||
run: |
|
||||
if [ ! -f "nix/pkgs/fabric/version.nix" ]; then
|
||||
echo "Error: version.nix file not found"
|
||||
exit 1
|
||||
fi
|
||||
version=$(cat nix/pkgs/fabric/version.nix | tr -d '"' | tr -cd '0-9.')
|
||||
if [ -z "$version" ]; then
|
||||
echo "Error: version is empty"
|
||||
exit 1
|
||||
fi
|
||||
if ! echo "$version" | grep -E '^[0-9]+\.[0-9]+\.[0-9]+' > /dev/null; then
|
||||
echo "Error: Invalid version format: $version"
|
||||
exit 1
|
||||
fi
|
||||
echo "latest_tag=v$version" >> $GITHUB_OUTPUT
|
||||
|
||||
build:
|
||||
name: Build binaries for Windows, macOS, and Linux
|
||||
needs: [test, get_version]
|
||||
runs-on: ${{ matrix.os }}
|
||||
permissions:
|
||||
contents: write
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
arch: [amd64, arm64]
|
||||
exclude:
|
||||
- os: windows-latest
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: ./go.mod
|
||||
|
||||
- name: Build binary on Linux and macOS
|
||||
if: matrix.os != 'windows-latest'
|
||||
env:
|
||||
GOOS: ${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
run: |
|
||||
OS_NAME="${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}"
|
||||
go build -o fabric-${OS_NAME}-${{ matrix.arch }} ./cmd/fabric
|
||||
|
||||
- name: Build binary on Windows
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
GOOS: windows
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
run: |
|
||||
go build -o fabric-windows-${{ matrix.arch }}.exe ./cmd/fabric
|
||||
|
||||
- name: Upload build artifact
|
||||
if: matrix.os != 'windows-latest'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fabric-${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}-${{ matrix.arch }}
|
||||
path: fabric-${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}-${{ matrix.arch }}
|
||||
|
||||
- name: Upload build artifact
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fabric-windows-${{ matrix.arch }}.exe
|
||||
path: fabric-windows-${{ matrix.arch }}.exe
|
||||
|
||||
- name: Create release if it doesn't exist
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
if ! gh release view ${{ needs.get_version.outputs.latest_tag }} >/dev/null 2>&1; then
|
||||
gh release create ${{ needs.get_version.outputs.latest_tag }} --title "Release ${{ needs.get_version.outputs.latest_tag }}" --notes "Automated release for ${{ needs.get_version.outputs.latest_tag }}"
|
||||
else
|
||||
echo "Release ${{ needs.get_version.outputs.latest_tag }} already exists."
|
||||
fi
|
||||
|
||||
- name: Upload release artifact
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh release upload ${{ needs.get_version.outputs.latest_tag }} fabric-windows-${{ matrix.arch }}.exe
|
||||
|
||||
- name: Upload release artifact
|
||||
if: matrix.os != 'windows-latest'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
OS_NAME="${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}"
|
||||
gh release upload ${{ needs.get_version.outputs.latest_tag }} fabric-${OS_NAME}-${{ matrix.arch }}
|
||||
|
||||
update_release_notes:
|
||||
needs: [build, get_version]
|
||||
# only run in main upstream repo
|
||||
if: ${{ github.repository_owner == 'danielmiessler' }}
|
||||
name: Build & Release with Goreleaser
|
||||
needs: [test]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: ./go.mod
|
||||
|
||||
- name: Update release description
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser
|
||||
args: release --clean
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
go run ./cmd/generate_changelog --sync-db
|
||||
go run ./cmd/generate_changelog --release ${{ needs.get_version.outputs.latest_tag }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Update Release Description
|
||||
run: go run ./cmd/generate_changelog --release ${{ github.event.client_payload.tag || github.ref_name }}
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -6,12 +6,13 @@ on:
|
||||
- main # Monitor the main branch
|
||||
paths-ignore:
|
||||
- "data/patterns/**"
|
||||
- "**/*.md"
|
||||
- "data/strategies/**"
|
||||
- "cmd/generate_changelog/*.db"
|
||||
- "cmd/generate_changelog/incoming/*.txt"
|
||||
- "scripts/pattern_descriptions/*.json"
|
||||
- "web/static/data/pattern_descriptions.json"
|
||||
- "**/*.md"
|
||||
- .vscode/**
|
||||
|
||||
permissions:
|
||||
contents: write # Ensure the workflow has write permissions
|
||||
@@ -22,12 +23,13 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
update-version:
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
if: >
|
||||
${{ github.repository_owner == 'danielmiessler' }} &&
|
||||
github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -49,12 +51,13 @@ jobs:
|
||||
run: |
|
||||
latest_tag=$(git tag --sort=-creatordate | head -n 1)
|
||||
echo "Latest tag is: $latest_tag"
|
||||
echo "tag=$latest_tag" >> $GITHUB_OUTPUT
|
||||
echo "tag=$latest_tag" >> $GITHUB_ENV # Save the latest tag to environment file
|
||||
|
||||
- name: Increment patch version
|
||||
id: increment_version
|
||||
run: |
|
||||
latest_tag=${{ env.tag }}
|
||||
latest_tag=${{ steps.get_latest_tag.outputs.tag }}
|
||||
major=$(echo "$latest_tag" | cut -d. -f1 | sed 's/v//')
|
||||
minor=$(echo "$latest_tag" | cut -d. -f2)
|
||||
patch=$(echo "$latest_tag" | cut -d. -f3)
|
||||
@@ -62,19 +65,21 @@ jobs:
|
||||
new_version="${major}.${minor}.${new_patch}"
|
||||
new_tag="v${new_version}"
|
||||
echo "New version is: $new_version"
|
||||
echo "new_version=$new_version" >> $GITHUB_OUTPUT
|
||||
echo "new_version=$new_version" >> $GITHUB_ENV # Save the new version to environment file
|
||||
echo "New tag is: $new_tag"
|
||||
echo "new_tag=$new_tag" >> $GITHUB_OUTPUT
|
||||
echo "new_tag=$new_tag" >> $GITHUB_ENV # Save the new tag to environment file
|
||||
|
||||
- name: Update version.go file
|
||||
run: |
|
||||
echo "package main" > cmd/fabric/version.go
|
||||
echo "" >> cmd/fabric/version.go
|
||||
echo "var version = \"${{ env.new_tag }}\"" >> cmd/fabric/version.go
|
||||
echo "var version = \"${{ steps.increment_version.outputs.new_tag }}\"" >> cmd/fabric/version.go
|
||||
|
||||
- name: Update version.nix file
|
||||
run: |
|
||||
echo "\"${{ env.new_version }}\"" > nix/pkgs/fabric/version.nix
|
||||
echo "\"${{ steps.increment_version.outputs.new_version }}\"" > nix/pkgs/fabric/version.nix
|
||||
|
||||
- name: Format source code
|
||||
run: |
|
||||
@@ -88,7 +93,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
go run ./cmd/generate_changelog --process-prs ${{ env.new_tag }}
|
||||
go run ./cmd/generate_changelog --process-prs ${{ steps.increment_version.outputs.new_tag }}
|
||||
go run ./cmd/generate_changelog --sync-db
|
||||
- name: Commit changes
|
||||
run: |
|
||||
@@ -101,7 +106,7 @@ jobs:
|
||||
# and removing the incoming/ directory.
|
||||
|
||||
if ! git diff --staged --quiet; then
|
||||
git commit -m "chore(release): Update version to ${{ env.new_tag }}"
|
||||
git commit -m "chore(release): Update version to ${{ steps.increment_version.outputs.new_tag }}"
|
||||
else
|
||||
echo "No changes to commit."
|
||||
fi
|
||||
@@ -114,10 +119,10 @@ jobs:
|
||||
|
||||
- name: Create a new tag
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.TAG_PAT }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
git tag ${{ env.new_tag }}
|
||||
git push origin ${{ env.new_tag }} # Push the new tag
|
||||
git tag ${{ steps.increment_version.outputs.new_tag }}
|
||||
git push origin ${{ steps.increment_version.outputs.new_tag }} # Push the new tag
|
||||
|
||||
- name: Dispatch event to trigger release workflow
|
||||
env:
|
||||
@@ -127,4 +132,4 @@ jobs:
|
||||
-H "Authorization: token $GITHUB_TOKEN" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/repos/${{ github.repository }}/dispatches \
|
||||
-d '{"event_type": "tag_created", "client_payload": {"tag": "${{ env.new_tag }}"}}'
|
||||
-d '{"event_type": "tag_created", "client_payload": {"tag": "${{ steps.increment_version.outputs.new_tag }}"}}'
|
||||
|
||||
36
.goreleaser.yaml
Normal file
36
.goreleaser.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
# Read the documentation at https://goreleaser.com
|
||||
|
||||
version: 2
|
||||
|
||||
project_name: fabric
|
||||
before:
|
||||
hooks:
|
||||
# You may remove this if you don't use go modules.
|
||||
- go mod tidy
|
||||
# you may remove this if you don't need go generate
|
||||
# - go generate ./...
|
||||
|
||||
builds:
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
- darwin
|
||||
main: ./cmd/fabric
|
||||
binary: fabric
|
||||
|
||||
archives:
|
||||
- formats: [tar.gz]
|
||||
# this name template makes the OS and Arch compatible with the results of `uname`.
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
{{- title .Os }}_
|
||||
{{- if eq .Arch "amd64" }}x86_64
|
||||
{{- else if eq .Arch "386" }}i386
|
||||
{{- else }}{{ .Arch }}{{ end }}
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
formats: [zip]
|
||||
37
.vscode/settings.json
vendored
37
.vscode/settings.json
vendored
@@ -7,12 +7,14 @@
|
||||
"Anki",
|
||||
"anthropics",
|
||||
"Aoede",
|
||||
"aplicar",
|
||||
"atotto",
|
||||
"Autonoe",
|
||||
"badfile",
|
||||
"Behrens",
|
||||
"blindspots",
|
||||
"Bombal",
|
||||
"Buildx",
|
||||
"Callirhoe",
|
||||
"Callirrhoe",
|
||||
"Cerebras",
|
||||
@@ -25,16 +27,20 @@
|
||||
"danielmiessler",
|
||||
"davidanson",
|
||||
"Debugf",
|
||||
"debuglog",
|
||||
"dedup",
|
||||
"deepseek",
|
||||
"Despina",
|
||||
"direnv",
|
||||
"DMARC",
|
||||
"DOCKERHUB",
|
||||
"dryrun",
|
||||
"dsrp",
|
||||
"editability",
|
||||
"Eisler",
|
||||
"elif",
|
||||
"Elister",
|
||||
"entrada",
|
||||
"envrc",
|
||||
"Erinome",
|
||||
"Errorf",
|
||||
@@ -55,11 +61,13 @@
|
||||
"godotenv",
|
||||
"gofmt",
|
||||
"goimports",
|
||||
"golint",
|
||||
"gomod",
|
||||
"gonic",
|
||||
"goopenai",
|
||||
"GOPATH",
|
||||
"gopkg",
|
||||
"Goreleaser",
|
||||
"GOROOT",
|
||||
"Graphviz",
|
||||
"grokai",
|
||||
@@ -71,6 +79,7 @@
|
||||
"Hormozi's",
|
||||
"horts",
|
||||
"HTMLURL",
|
||||
"imagetools",
|
||||
"jaredmontoya",
|
||||
"jessevdk",
|
||||
"Jina",
|
||||
@@ -82,6 +91,7 @@
|
||||
"Langdock",
|
||||
"Laomedeia",
|
||||
"ldflags",
|
||||
"legibilidad",
|
||||
"libexec",
|
||||
"libnotify",
|
||||
"listcontexts",
|
||||
@@ -99,13 +109,18 @@
|
||||
"mbed",
|
||||
"metacharacters",
|
||||
"Miessler",
|
||||
"modeline",
|
||||
"modelines",
|
||||
"mpga",
|
||||
"nicksnyder",
|
||||
"nometa",
|
||||
"numpy",
|
||||
"ollama",
|
||||
"ollamaapi",
|
||||
"Omri",
|
||||
"openaiapi",
|
||||
"opencode",
|
||||
"opencontainers",
|
||||
"openrouter",
|
||||
"Orus",
|
||||
"osascript",
|
||||
@@ -114,9 +129,11 @@
|
||||
"pipx",
|
||||
"PKCE",
|
||||
"pkgs",
|
||||
"porque",
|
||||
"presencepenalty",
|
||||
"printcontext",
|
||||
"printsession",
|
||||
"puede",
|
||||
"Pulcherrima",
|
||||
"pycache",
|
||||
"pyperclip",
|
||||
@@ -131,7 +148,9 @@
|
||||
"seaborn",
|
||||
"semgrep",
|
||||
"sess",
|
||||
"sgaunet",
|
||||
"shellquote",
|
||||
"SSEHTTP",
|
||||
"storer",
|
||||
"Streamlit",
|
||||
"stretchr",
|
||||
@@ -149,9 +168,12 @@
|
||||
"unconfigured",
|
||||
"unmarshalling",
|
||||
"updatepatterns",
|
||||
"useb",
|
||||
"USERPROFILE",
|
||||
"videoid",
|
||||
"webp",
|
||||
"WEBVTT",
|
||||
"winget",
|
||||
"wipecontext",
|
||||
"wipesession",
|
||||
"wireframes",
|
||||
@@ -162,7 +184,16 @@
|
||||
"youtu",
|
||||
"YTDLP"
|
||||
],
|
||||
"cSpell.ignorePaths": ["go.mod", ".gitignore", "CHANGELOG.md"],
|
||||
"cSpell.ignorePaths": [
|
||||
"go.mod",
|
||||
".gitignore",
|
||||
"CHANGELOG.md",
|
||||
"scripts/installer/install.*",
|
||||
"web/static/data/pattern_descriptions.json",
|
||||
"scripts/pattern_descriptions/*.json",
|
||||
"data/patterns/pattern_explanations.md",
|
||||
"internal/i18n/locales/es.json"
|
||||
],
|
||||
"markdownlint.config": {
|
||||
"MD004": false,
|
||||
"MD011": false,
|
||||
@@ -176,10 +207,12 @@
|
||||
"code",
|
||||
"div",
|
||||
"em",
|
||||
"h",
|
||||
"h4",
|
||||
"img",
|
||||
"module",
|
||||
"p"
|
||||
"p",
|
||||
"sup"
|
||||
]
|
||||
},
|
||||
"MD041": false
|
||||
|
||||
166
CHANGELOG.md
166
CHANGELOG.md
@@ -1,5 +1,171 @@
|
||||
# Changelog
|
||||
|
||||
## v1.4.309 (2025-09-09)
|
||||
|
||||
### PR [#1756](https://github.com/danielmiessler/Fabric/pull/1756) by [ksylvan](https://github.com/ksylvan): Add Internationalization Support with Custom Help System
|
||||
|
||||
- Add comprehensive internationalization support with English and Spanish locales
|
||||
- Replace hardcoded strings with i18n.T translations and add en and es JSON locale files
|
||||
- Implement custom translated help system with language detection from CLI args
|
||||
- Add locale download capability and localize error messages throughout codebase
|
||||
- Support TTS and notification translations
|
||||
|
||||
## v1.4.308 (2025-09-05)
|
||||
|
||||
### PR [#1755](https://github.com/danielmiessler/Fabric/pull/1755) by [ksylvan](https://github.com/ksylvan): Add i18n Support for Multi-Language Fabric Experience
|
||||
|
||||
- Add Spanish localization support with i18n
|
||||
- Create contexts and sessions tutorial documentation
|
||||
- Fix broken Warp sponsorship image URL
|
||||
- Remove solve_with_cot pattern from codebase
|
||||
- Update pattern descriptions and explanations
|
||||
### Direct commits
|
||||
|
||||
- Update Warp sponsor section with proper formatting
|
||||
|
||||
- Replace with correct div structure and styling
|
||||
- Use proper Warp image URL from brand assets
|
||||
|
||||
- Add 'Special thanks to:' text and platform availability
|
||||
- Maintains proper spacing and alignment
|
||||
- Fix unclosed div tag in README causing display issues
|
||||
|
||||
- Close the main div container properly after fabric screenshot
|
||||
- Fix HTML structure that was causing repetitive content display
|
||||
|
||||
- Ensure proper markdown rendering on GitHub
|
||||
🤖 Generated with [Claude Code](<https://claude.ai/code)>
|
||||
Co-Authored-By: Claude <noreply@anthropic.com>
|
||||
- Update Warp sponsor section with new banner and branding
|
||||
|
||||
- Replace old banner with new warp-banner-light.png image
|
||||
- Update styling to use modern p tags with proper centering
|
||||
|
||||
- Maintain existing go.warp.dev/fabric redirect URL
|
||||
- Add descriptive alt text and emphasis text for accessibility
|
||||
🤖 Generated with [Claude Code](<https://claude.ai/code)>
|
||||
Co-Authored-By: Claude <noreply@anthropic.com>
|
||||
|
||||
## v1.4.307 (2025-09-01)
|
||||
|
||||
### PR [#1745](https://github.com/danielmiessler/Fabric/pull/1745) by [ksylvan](https://github.com/ksylvan): Fabric Installation Improvements and Automated Release Updates
|
||||
|
||||
- Streamlined install process with one-line installer scripts and updated documentation
|
||||
- Added bash installer script for Unix systems
|
||||
- Added PowerShell installer script for Windows
|
||||
- Created installer documentation with usage examples
|
||||
- Simplified README installation with one-line installers
|
||||
|
||||
## v1.4.306 (2025-09-01)
|
||||
|
||||
### PR [#1742](https://github.com/danielmiessler/Fabric/pull/1742) by [ksylvan](https://github.com/ksylvan): Documentation and Pattern Updates
|
||||
|
||||
- Add winget installation method for Windows users
|
||||
- Include Docker Hub and GHCR image references with docker run examples
|
||||
- Remove deprecated PowerShell download link and unused show_fabric_options_markmap pattern
|
||||
- Update suggest_pattern with new AI patterns
|
||||
- Add personal development patterns for storytelling
|
||||
|
||||
## v1.4.305 (2025-08-31)
|
||||
|
||||
### PR [#1741](https://github.com/danielmiessler/Fabric/pull/1741) by [ksylvan](https://github.com/ksylvan): CI: Fix Release Description Update
|
||||
|
||||
- Fix: update release workflow to support manual dispatch with custom tag
|
||||
- Support custom tag from client payload in workflow
|
||||
- Fallback to github.ref_name when no custom tag provided
|
||||
- Enable manual release triggers with specified tag parameter
|
||||
|
||||
## v1.4.304 (2025-08-31)
|
||||
|
||||
### PR [#1740](https://github.com/danielmiessler/Fabric/pull/1740) by [ksylvan](https://github.com/ksylvan): Restore our custom Changelog Updates in GitHub Actions
|
||||
|
||||
- Add changelog generation step to GitHub release workflow
|
||||
- Create updateReleaseForRepo helper method for release updates
|
||||
- Add fork detection logic in UpdateReleaseDescription method
|
||||
- Implement upstream repository release update for forks
|
||||
- Enhance error handling with detailed repository context
|
||||
|
||||
## v1.4.303 (2025-08-28)
|
||||
|
||||
### PR [#1736](https://github.com/danielmiessler/Fabric/pull/1736) by [tonymet](https://github.com/tonymet): Winget Publishing and GoReleaser
|
||||
|
||||
- Added GoReleaser support for improved package distribution
|
||||
- Winget and Docker publishing moved to ksylvan/fabric-packager GitHub repo
|
||||
- Hardened release pipeline by gating workflows to upstream owner only
|
||||
- Migrated from custom tokens to built-in GITHUB_TOKEN for enhanced security
|
||||
- Removed docker-publish-on-tag workflow to reduce duplication and complexity
|
||||
- Added ARM binary release support with updated documentation
|
||||
|
||||
## v1.4.302 (2025-08-28)
|
||||
|
||||
### PR [#1737](https://github.com/danielmiessler/Fabric/pull/1737) by [ksylvan](https://github.com/ksylvan) and [OmriH-Elister](https://github.com/OmriH-Elister): Add New Psychological Analysis Patterns + devalue version bump
|
||||
|
||||
- Add create_story_about_person system pattern with narrative workflow
|
||||
- Add heal_person system pattern for compassionate healing plans
|
||||
- Update pattern_explanations to register new patterns and renumber indices
|
||||
- Extend pattern_descriptions with entries, tags, and concise descriptions
|
||||
- Bump devalue dependency from 5.1.1 to 5.3.2
|
||||
|
||||
## v1.4.301 (2025-08-28)
|
||||
|
||||
### PR [#1735](https://github.com/danielmiessler/Fabric/pull/1735) by [ksylvan](https://github.com/ksylvan): Fix Docker Build Path Configuration
|
||||
|
||||
- Fix: update Docker workflow to use specific Dockerfile and monitor markdown file changes
|
||||
- Add explicit Dockerfile path to Docker build action
|
||||
- Remove markdown files from workflow paths-ignore filter
|
||||
- Enable CI triggers for documentation file changes
|
||||
- Specify Docker build context with custom file location
|
||||
|
||||
## v1.4.300 (2025-08-28)
|
||||
|
||||
### PR [#1732](https://github.com/danielmiessler/Fabric/pull/1732) by [ksylvan](https://github.com/ksylvan): CI Infra: Changelog Generation Tool + Docker Image Pubishing
|
||||
|
||||
- Add GitHub Actions workflow to publish Docker images on tags
|
||||
- Build multi-arch images with Buildx and QEMU across amd64, arm64
|
||||
- Tag images using semver; push to GHCR and Docker Hub
|
||||
- Gate patterns workflow steps on detected changes instead of failing
|
||||
- Auto-detect GitHub owner and repo from git remote URL
|
||||
|
||||
## v1.4.299 (2025-08-27)
|
||||
|
||||
### PR [#1731](https://github.com/danielmiessler/Fabric/pull/1731) by [ksylvan](https://github.com/ksylvan): chore: upgrade ollama dependency from v0.9.0 to v0.11.7
|
||||
|
||||
- Updated ollama package from version 0.9.0 to 0.11.7
|
||||
- Fixed 8 security vulnerabilities including 5 high-severity CVEs that could cause denial of service attacks
|
||||
- Patched Ollama server vulnerabilities related to division by zero errors and memory exhaustion
|
||||
- Resolved security flaws that allowed malicious GGUF model file uploads to crash the server
|
||||
- Enhanced system stability and security posture through comprehensive dependency upgrade
|
||||
|
||||
## v1.4.298 (2025-08-27)
|
||||
|
||||
### PR [#1730](https://github.com/danielmiessler/Fabric/pull/1730) by [ksylvan](https://github.com/ksylvan): Modernize Dockerfile with Best Practices Implementation
|
||||
|
||||
- Remove docker-test framework and simplify production docker setup by eliminating complex testing infrastructure
|
||||
- Delete entire docker-test directory including test runner scripts and environment configuration files
|
||||
- Implement multi-stage build optimization in production Dockerfile to improve build efficiency
|
||||
- Remove docker-compose.yml and start-docker.sh helper scripts to streamline container workflow
|
||||
- Update README documentation with cleaner Docker usage instructions and reduced image size benefits
|
||||
|
||||
## v1.4.297 (2025-08-26)
|
||||
|
||||
### PR [#1729](https://github.com/danielmiessler/Fabric/pull/1729) by [ksylvan](https://github.com/ksylvan): Add GitHub Community Health Documents
|
||||
|
||||
- Add CODE_OF_CONDUCT defining respectful, collaborative community behavior
|
||||
- Add CONTRIBUTING with setup, testing, PR, changelog requirements
|
||||
- Add SECURITY policy with reporting process and response timelines
|
||||
- Add SUPPORT guide for bugs, features, discussions, expectations
|
||||
- Add docs README indexing guides, quick starts, contributor essentials
|
||||
|
||||
## v1.4.296 (2025-08-26)
|
||||
|
||||
### PR [#1728](https://github.com/danielmiessler/Fabric/pull/1728) by [ksylvan](https://github.com/ksylvan): Refactor Logging System to Use Centralized Debug Logger
|
||||
|
||||
- Replace fmt.Fprintf/os.Stderr with centralized debuglog.Log across CLI and add unconditional Log function for important messages
|
||||
- Improve OAuth flow messaging and token refresh diagnostics with better error handling
|
||||
- Update tests to capture debuglog output via SetOutput for better test coverage
|
||||
- Convert Perplexity streaming errors to unified debug logging and emit file write notifications through debuglog
|
||||
- Standardize extension registry warnings and announce large audio processing steps via centralized logger
|
||||
|
||||
## v1.4.295 (2025-08-24)
|
||||
|
||||
### PR [#1727](https://github.com/danielmiessler/Fabric/pull/1727) by [ksylvan](https://github.com/ksylvan): Standardize Anthropic Beta Failure Logging
|
||||
|
||||
103
README.md
103
README.md
@@ -1,7 +1,18 @@
|
||||
<div align="center">
|
||||
Fabric is graciously supported by…
|
||||
<a href="https://go.warp.dev/fabric" target="_blank">
|
||||
<sup>Special thanks to:</sup>
|
||||
<br>
|
||||
<img alt="Warp sponsorship" width="400" src="https://raw.githubusercontent.com/warpdotdev/brand-assets/refs/heads/main/Github/Sponsor/Warp-Github-LG-02.png">
|
||||
<br>
|
||||
<h>Warp, built for coding with multiple AI agents</b>
|
||||
<br>
|
||||
<sup>Available for macOS, Linux and Windows</sup>
|
||||
</a>
|
||||
</div>
|
||||
|
||||
[](https://warp.dev/fabric)
|
||||
<br>
|
||||
|
||||
<div align="center">
|
||||
|
||||
<img src="./docs/images/fabric-logo-gif.gif" alt="fabriclogo" width="400" height="400"/>
|
||||
|
||||
@@ -18,6 +29,10 @@ Fabric is graciously supported by…
|
||||
<h4><code>fabric</code> is an open-source framework for augmenting humans using AI.</h4>
|
||||
</div>
|
||||
|
||||

|
||||
|
||||
</div>
|
||||
|
||||
[Updates](#updates) •
|
||||
[What and Why](#what-and-why) •
|
||||
[Philosophy](#philosophy) •
|
||||
@@ -57,6 +72,8 @@ Below are the **new features and capabilities** we've added (newest first):
|
||||
|
||||
### Recent Major Features
|
||||
|
||||
- [v1.4.309](https://github.com/danielmiessler/fabric/releases/tag/v1.4.309) (Sep 9, 2025) — **Comprehensive internationalization support**: Includes English and Spanish locale files.
|
||||
- [v1.4.303](https://github.com/danielmiessler/fabric/releases/tag/v1.4.303) (Aug 29, 2025) — **New Binary Releases**: Linux ARM and Windows ARM targets. You can run Fabric on the Raspberry PI and on your Windows Surface!
|
||||
- [v1.4.294](https://github.com/danielmiessler/fabric/releases/tag/v1.4.294) (Aug 20, 2025) — **Venice AI Support**: Added the Venice AI provider. Venice is a Privacy-First, Open-Source AI provider. See their ["About Venice"](https://docs.venice.ai/overview/about-venice) page for details.
|
||||
- [v1.4.291](https://github.com/danielmiessler/fabric/releases/tag/v1.4.291) (Aug 18, 2025) — **Speech To Text**: Add OpenAI speech-to-text support with `--transcribe-file`, `--transcribe-model`, and `--split-media-file` flags.
|
||||
- [v1.4.287](https://github.com/danielmiessler/fabric/releases/tag/v1.4.287) (Aug 16, 2025) — **AI Reasoning**: Add Thinking to Gemini models and introduce `readme_updates` python script
|
||||
@@ -117,16 +134,14 @@ Keep in mind that many of these were recorded when Fabric was Python-based, so r
|
||||
- [Breaking problems into components](#breaking-problems-into-components)
|
||||
- [Too many prompts](#too-many-prompts)
|
||||
- [Installation](#installation)
|
||||
- [Get Latest Release Binaries](#get-latest-release-binaries)
|
||||
- [Windows](#windows)
|
||||
- [macOS (arm64)](#macos-arm64)
|
||||
- [macOS (amd64)](#macos-amd64)
|
||||
- [Linux (amd64)](#linux-amd64)
|
||||
- [Linux (arm64)](#linux-arm64)
|
||||
- [One-Line Install (Recommended)](#one-line-install-recommended)
|
||||
- [Manual Binary Downloads](#manual-binary-downloads)
|
||||
- [Using package managers](#using-package-managers)
|
||||
- [macOS (Homebrew)](#macos-homebrew)
|
||||
- [Arch Linux (AUR)](#arch-linux-aur)
|
||||
- [Windows](#windows)
|
||||
- [From Source](#from-source)
|
||||
- [Docker](#docker)
|
||||
- [Environment Variables](#environment-variables)
|
||||
- [Setup](#setup)
|
||||
- [Per-Pattern Model Mapping](#per-pattern-model-mapping)
|
||||
@@ -203,40 +218,25 @@ Fabric has Patterns for all sorts of life and work activities, including:
|
||||
|
||||
## Installation
|
||||
|
||||
To install Fabric, you can use the latest release binaries or install it from the source.
|
||||
### One-Line Install (Recommended)
|
||||
|
||||
### Get Latest Release Binaries
|
||||
**Unix/Linux/macOS:**
|
||||
|
||||
#### Windows
|
||||
|
||||
`https://github.com/danielmiessler/fabric/releases/latest/download/fabric-windows-amd64.exe`
|
||||
|
||||
Or via PowerShell, just copy and paste and run the following snippet to install the binary into `{HOME}\.local\bin`. Please make sure that directory is included in your `PATH`.
|
||||
|
||||
```powershell
|
||||
$ErrorActionPreference = "Stop"
|
||||
$LATEST="https://github.com/danielmiessler/fabric/releases/latest/download/fabric-windows-amd64.exe"
|
||||
$DIR="${HOME}\.local\bin"
|
||||
New-Item -Path $DIR -ItemType Directory -Force
|
||||
Invoke-WebRequest -URI "${LATEST}" -outfile "${DIR}\fabric.exe"
|
||||
& "${DIR}\fabric.exe" /version
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/danielmiessler/fabric/main/scripts/installer/install.sh | bash
|
||||
```
|
||||
|
||||
#### macOS (arm64)
|
||||
**Windows PowerShell:**
|
||||
|
||||
`curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-darwin-arm64 > fabric && chmod +x fabric && ./fabric --version`
|
||||
```powershell
|
||||
iwr -useb https://raw.githubusercontent.com/danielmiessler/fabric/main/scripts/installer/install.ps1 | iex
|
||||
```
|
||||
|
||||
#### macOS (amd64)
|
||||
> See [scripts/installer/README.md](./scripts/installer/README.md) for custom installation options and troubleshooting.
|
||||
|
||||
`curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-darwin-amd64 > fabric && chmod +x fabric && ./fabric --version`
|
||||
### Manual Binary Downloads
|
||||
|
||||
#### Linux (amd64)
|
||||
|
||||
`curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-linux-amd64 > fabric && chmod +x fabric && ./fabric --version`
|
||||
|
||||
#### Linux (arm64)
|
||||
|
||||
`curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-linux-arm64 > fabric && chmod +x fabric && ./fabric --version`
|
||||
The latest release binary archives and their expected SHA256 hashes can be found at <https://github.com/danielmiessler/fabric/releases/latest>
|
||||
|
||||
### Using package managers
|
||||
|
||||
@@ -255,6 +255,12 @@ alias fabric='fabric-ai'
|
||||
|
||||
`yay -S fabric-ai`
|
||||
|
||||
#### Windows
|
||||
|
||||
Use the official Microsoft supported `Winget` tool:
|
||||
|
||||
`winget install danielmiessler.Fabric`
|
||||
|
||||
### From Source
|
||||
|
||||
To install Fabric, [make sure Go is installed](https://go.dev/doc/install), and then run the following command.
|
||||
@@ -264,6 +270,35 @@ To install Fabric, [make sure Go is installed](https://go.dev/doc/install), and
|
||||
go install github.com/danielmiessler/fabric/cmd/fabric@latest
|
||||
```
|
||||
|
||||
### Docker
|
||||
|
||||
Run Fabric using pre-built Docker images:
|
||||
|
||||
```bash
|
||||
# Use latest image from Docker Hub
|
||||
docker run --rm -it kayvan/fabric:latest --version
|
||||
|
||||
# Use specific version from GHCR
|
||||
docker run --rm -it ghcr.io/ksylvan/fabric:v1.4.305 --version
|
||||
|
||||
# Run setup (first time)
|
||||
mkdir -p $HOME/.fabric-config
|
||||
docker run --rm -it -v $HOME/.fabric-config:/root/.config/fabric kayvan/fabric:latest --setup
|
||||
|
||||
# Use Fabric with your patterns
|
||||
docker run --rm -it -v $HOME/.fabric-config:/root/.config/fabric kayvan/fabric:latest -p summarize
|
||||
|
||||
# Run the REST API server
|
||||
docker run --rm -it -p 8080:8080 -v $HOME/.fabric-config:/root/.config/fabric kayvan/fabric:latest --serve
|
||||
```
|
||||
|
||||
**Images available at:**
|
||||
|
||||
- Docker Hub: [kayvan/fabric](https://hub.docker.com/repository/docker/kayvan/fabric/general)
|
||||
- GHCR: [ksylvan/fabric](https://github.com/ksylvan/fabric/pkgs/container/fabric)
|
||||
|
||||
See [scripts/docker/README.md](./scripts/docker/README.md) for building custom images and advanced configuration.
|
||||
|
||||
### Environment Variables
|
||||
|
||||
You may need to set some environment variables in your `~/.bashrc` on linux or `~/.zshrc` file on mac to be able to run the `fabric` command. Here is an example of what you can add:
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
package main
|
||||
|
||||
var version = "v1.4.295"
|
||||
var version = "v1.4.309"
|
||||
|
||||
Binary file not shown.
@@ -3,6 +3,9 @@ package internal
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/cache"
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/config"
|
||||
@@ -17,17 +20,50 @@ type ReleaseManager struct {
|
||||
repo string
|
||||
}
|
||||
|
||||
// getGitHubInfo extracts owner and repo from git remote origin URL
|
||||
func getGitHubInfo() (owner, repo string, err error) {
|
||||
cmd := exec.Command("git", "remote", "get-url", "origin")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to get git remote URL: %w", err)
|
||||
}
|
||||
|
||||
url := strings.TrimSpace(string(output))
|
||||
|
||||
// Handle both SSH and HTTPS URLs
|
||||
// SSH: git@github.com:owner/repo.git
|
||||
// HTTPS: https://github.com/owner/repo.git
|
||||
var re *regexp.Regexp
|
||||
if strings.HasPrefix(url, "git@") {
|
||||
re = regexp.MustCompile(`git@github\.com:([^/]+)/([^/.]+)(?:\.git)?`)
|
||||
} else {
|
||||
re = regexp.MustCompile(`https://github\.com/([^/]+)/([^/.]+)(?:\.git)?`)
|
||||
}
|
||||
|
||||
matches := re.FindStringSubmatch(url)
|
||||
if len(matches) < 3 {
|
||||
return "", "", fmt.Errorf("invalid GitHub URL format: %s", url)
|
||||
}
|
||||
|
||||
return matches[1], matches[2], nil
|
||||
}
|
||||
|
||||
func NewReleaseManager(cfg *config.Config) (*ReleaseManager, error) {
|
||||
cache, err := cache.New(cfg.CacheFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create cache: %w", err)
|
||||
}
|
||||
|
||||
owner, repo, err := getGitHubInfo()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get GitHub repository info: %w", err)
|
||||
}
|
||||
|
||||
return &ReleaseManager{
|
||||
cache: cache,
|
||||
githubToken: cfg.GitHubToken,
|
||||
owner: "danielmiessler",
|
||||
repo: "fabric",
|
||||
owner: owner,
|
||||
repo: repo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
@@ -65,17 +101,49 @@ func (rm *ReleaseManager) UpdateReleaseDescription(version string) error {
|
||||
client = github.NewClient(nil)
|
||||
}
|
||||
|
||||
release, _, err := client.Repositories.GetReleaseByTag(ctx, rm.owner, rm.repo, version)
|
||||
// Check if current repository is a fork by getting repo details
|
||||
repo, _, err := client.Repositories.Get(ctx, rm.owner, rm.repo)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get repository info: %w", err)
|
||||
}
|
||||
|
||||
// If repository is a fork, try updating the upstream (parent) repository first
|
||||
if repo.Parent != nil {
|
||||
parentOwner := repo.Parent.Owner.GetLogin()
|
||||
parentRepo := repo.Parent.GetName()
|
||||
|
||||
fmt.Printf("Repository is a fork of %s/%s, attempting to update upstream release...\n", parentOwner, parentRepo)
|
||||
|
||||
err := rm.updateReleaseForRepo(ctx, client, parentOwner, parentRepo, version, releaseBody)
|
||||
if err == nil {
|
||||
fmt.Printf("Successfully updated release description for %s in upstream repository %s/%s\n", version, parentOwner, parentRepo)
|
||||
return nil
|
||||
}
|
||||
|
||||
fmt.Printf("Failed to update upstream repository: %v\nFalling back to current repository...\n", err)
|
||||
}
|
||||
|
||||
// Update current repository (either not a fork or upstream update failed)
|
||||
err = rm.updateReleaseForRepo(ctx, client, rm.owner, rm.repo, version, releaseBody)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update release description for version %s in repository %s/%s: %w", version, rm.owner, rm.repo, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully updated release description for %s in repository %s/%s\n", version, rm.owner, rm.repo)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (rm *ReleaseManager) updateReleaseForRepo(ctx context.Context, client *github.Client, owner, repo, version, releaseBody string) error {
|
||||
release, _, err := client.Repositories.GetReleaseByTag(ctx, owner, repo, version)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to get release for version %s: %w", version, err)
|
||||
}
|
||||
|
||||
release.Body = &releaseBody
|
||||
_, _, err = client.Repositories.EditRelease(ctx, rm.owner, rm.repo, *release.ID, release)
|
||||
_, _, err = client.Repositories.EditRelease(ctx, owner, repo, *release.ID, release)
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to update release description for version %s: %w", version, err)
|
||||
}
|
||||
|
||||
fmt.Printf("Successfully updated release description for %s\n", version)
|
||||
return nil
|
||||
}
|
||||
|
||||
26
data/patterns/create_story_about_person/system.md
Normal file
26
data/patterns/create_story_about_person/system.md
Normal file
@@ -0,0 +1,26 @@
|
||||
You are an expert creative writer specializing in character-driven narratives, and a keen observer of human psychology. Your task is to craft a compelling, realistic short story based on a psychological profile or personal data provided by the user.
|
||||
|
||||
**Input:**
|
||||
The user will provide a psychological profile or descriptive data about a fictional or real person. This input will be clearly delimited by triple backticks (```). It may include personality traits, habits, fears, motivations, strengths, weaknesses, background information, or specific behavioral patterns.
|
||||
|
||||
**Task Steps:**
|
||||
|
||||
1. **Analyze Profile:** Carefully read and internalize the provided psychological profile. Identify the core personality traits, typical reactions, strengths, and vulnerabilities of the individual.
|
||||
2. **Brainstorm Challenges:** Based on the analysis from Step 1, generate 3-5 common, relatable, everyday problems or minor dilemmas that a person with this specific profile might genuinely encounter. These challenges should be varied and could span social, professional, personal, or emotional domains.
|
||||
3. **Develop Strategies:** For each identified problem from Step 2, devise 1-2 specific, plausible methods or strategies that the character, consistent with their psychological profile, would naturally employ (or attempt to employ) to navigate, cope with, or solve these challenges. Consider both internal thought processes and external actions.
|
||||
4. **Construct Narrative:** Weave these problems and the character's responses into a cohesive, engaging short story (approximately 500-700 words, 3-5 paragraphs). The story should have a clear narrative flow, introducing the character, presenting the challenges, and showing their journey through them.
|
||||
5. **Maintain Consistency:** Throughout the story, ensure the character's actions, dialogue, internal monologue, and emotional reactions are consistently aligned with the psychological profile provided. The story should feel authentic to the character.
|
||||
|
||||
**Output Requirements:**
|
||||
|
||||
* **Format:** A continuous narrative short story.
|
||||
* **Tone:** Empathetic, realistic, and engaging.
|
||||
* **Content:** The story must clearly depict the character facing everyday problems and demonstrate their unique methods and strategies for navigating these challenges, directly reflecting the input profile.
|
||||
* **Length:** Approximately 500-700 words.
|
||||
* **Avoid:** Overly dramatic or fantastical scenarios unless the profile explicitly suggests such a context. Focus on the 'everyday common problems'.
|
||||
|
||||
**Example of Input Format:**
|
||||
|
||||
```
|
||||
[Psychological Profile/Data Here]
|
||||
```
|
||||
53
data/patterns/heal_person/system.md
Normal file
53
data/patterns/heal_person/system.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# IDENTITY and PURPOSE
|
||||
|
||||
You are an AI assistant whose primary responsibility is to interpret and analyze psychological profiles and/or psychology data files provided as input. Your role is to carefully process this data and use your expertise to develop a tailored plan aimed at spiritual and mental healing, as well as overall life improvement for the subject. You must approach each case with sensitivity, applying psychological knowledge and holistic strategies to create actionable, personalized recommendations that address both mental and spiritual well-being. Your focus is on structured, compassionate, and practical guidance that can help the individual make meaningful improvements in their life.
|
||||
|
||||
Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
|
||||
|
||||
# STEPS
|
||||
|
||||
- Carefully review the psychological-profile and/or psychology data file provided as input.
|
||||
|
||||
- Analyze the data to identify key issues, strengths, and areas needing improvement related to the subject's mental and spiritual well-being.
|
||||
|
||||
- Develop a comprehensive plan that includes specific strategies for spiritual healing, mental health improvement, and overall life enhancement.
|
||||
|
||||
- Structure your output to clearly outline recommendations, resources, and actionable steps tailored to the individual's unique profile.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
- Only output Markdown.
|
||||
|
||||
- Ensure your output is organized, clear, and easy to follow, using headings, subheadings, and bullet points where appropriate.
|
||||
|
||||
- Ensure you follow ALL these instructions when creating your output.
|
||||
|
||||
# INPUT
|
||||
|
||||
INPUT:# IDENTITY and PURPOSE
|
||||
|
||||
You are an AI assistant whose primary responsibility is to interpret and analyze psychological profiles and/or psychology data files provided as input. Your role is to carefully process this data and use your expertise to develop a tailored plan aimed at spiritual and mental healing, as well as overall life improvement for the subject. You must approach each case with sensitivity, applying psychological knowledge and holistic strategies to create actionable, personalized recommendations that address both mental and spiritual well-being. Your focus is on structured, compassionate, and practical guidance that can help the individual make meaningful improvements in their life.
|
||||
|
||||
Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
|
||||
|
||||
# STEPS
|
||||
|
||||
- Carefully review the psychological-profile and/or psychology data file provided as input.
|
||||
|
||||
- Analyze the data to identify key issues, strengths, and areas needing improvement related to the subject's mental and spiritual well-being.
|
||||
|
||||
- Develop a comprehensive plan that includes specific strategies for spiritual healing, mental health improvement, and overall life enhancement.
|
||||
|
||||
- Structure your output to clearly outline recommendations, resources, and actionable steps tailored to the individual's unique profile.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
- Only output Markdown.
|
||||
|
||||
- Ensure your output is organized, clear, and easy to follow, using headings, subheadings, and bullet points where appropriate.
|
||||
|
||||
- Ensure you follow ALL these instructions when creating your output.
|
||||
|
||||
# INPUT
|
||||
|
||||
INPUT:
|
||||
@@ -88,96 +88,96 @@
|
||||
84. **create_security_update**: Creates concise security updates for newsletters, covering stories, threats, advisories, vulnerabilities, and a summary of key issues.
|
||||
85. **create_show_intro**: Creates compelling short intros for podcasts, summarizing key topics and themes discussed in the episode.
|
||||
86. **create_sigma_rules**: Extracts Tactics, Techniques, and Procedures (TTPs) from security news and converts them into Sigma detection rules for host-based detections.
|
||||
87. **create_story_explanation**: Summarizes complex content in a clear, approachable story format that makes the concepts easy to understand.
|
||||
88. **create_stride_threat_model**: Create a STRIDE-based threat model for a system design, identifying assets, trust boundaries, data flows, and prioritizing threats with mitigations.
|
||||
89. **create_summary**: Summarizes content into a 20-word sentence, 10 main points (16 words max), and 5 key takeaways in Markdown format.
|
||||
90. **create_tags**: Identifies at least 5 tags from text content for mind mapping tools, including authors and existing tags if present.
|
||||
91. **create_threat_scenarios**: Identifies likely attack methods for any system by providing a narrative-based threat model, balancing risk and opportunity.
|
||||
92. **create_ttrc_graph**: Creates a CSV file showing the progress of Time to Remediate Critical Vulnerabilities over time using given data.
|
||||
93. **create_ttrc_narrative**: Creates a persuasive narrative highlighting progress in reducing the Time to Remediate Critical Vulnerabilities metric over time.
|
||||
94. **create_upgrade_pack**: Extracts world model and task algorithm updates from content, providing beliefs about how the world works and task performance.
|
||||
95. **create_user_story**: Writes concise and clear technical user stories for new features in complex software programs, formatted for all stakeholders.
|
||||
96. **create_video_chapters**: Extracts interesting topics and timestamps from a transcript, providing concise summaries of key moments.
|
||||
97. **create_visualization**: Transforms complex ideas into visualizations using intricate ASCII art, simplifying concepts where necessary.
|
||||
98. **dialog_with_socrates**: Engages in deep, meaningful dialogues to explore and challenge beliefs using the Socratic method.
|
||||
99. **enrich_blog_post**: Enhances Markdown blog files by applying instructions to improve structure, visuals, and readability for HTML rendering.
|
||||
100. **explain_code**: Explains code, security tool output, configuration text, and answers questions based on the provided input.
|
||||
101. **explain_docs**: Improves and restructures tool documentation into clear, concise instructions, including overviews, usage, use cases, and key features.
|
||||
102. **explain_math**: Helps you understand mathematical concepts in a clear and engaging way.
|
||||
103. **explain_project**: Summarizes project documentation into clear, concise sections covering the project, problem, solution, installation, usage, and examples.
|
||||
104. **explain_terms**: Produces a glossary of advanced terms from content, providing a definition, analogy, and explanation of why each term matters.
|
||||
105. **export_data_as_csv**: Extracts and outputs all data structures from the input in properly formatted CSV data.
|
||||
106. **extract_algorithm_update_recommendations**: Extracts concise, practical algorithm update recommendations from the input and outputs them in a bulleted list.
|
||||
107. **extract_article_wisdom**: Extracts surprising, insightful, and interesting information from content, categorizing it into sections like summary, ideas, quotes, facts, references, and recommendations.
|
||||
108. **extract_book_ideas**: Extracts and outputs 50 to 100 of the most surprising, insightful, and interesting ideas from a book's content.
|
||||
109. **extract_book_recommendations**: Extracts and outputs 50 to 100 practical, actionable recommendations from a book's content.
|
||||
110. **extract_business_ideas**: Extracts top business ideas from content and elaborates on the best 10 with unique differentiators.
|
||||
111. **extract_controversial_ideas**: Extracts and outputs controversial statements and supporting quotes from the input in a structured Markdown list.
|
||||
112. **extract_core_message**: Extracts and outputs a clear, concise sentence that articulates the core message of a given text or body of work.
|
||||
113. **extract_ctf_writeup**: Extracts a short writeup from a warstory-like text about a cyber security engagement.
|
||||
114. **extract_domains**: Extracts domains and URLs from content to identify sources used for articles, newsletters, and other publications.
|
||||
115. **extract_extraordinary_claims**: Extracts and outputs a list of extraordinary claims from conversations, focusing on scientifically disputed or false statements.
|
||||
116. **extract_ideas**: Extracts and outputs all the key ideas from input, presented as 15-word bullet points in Markdown.
|
||||
117. **extract_insights**: Extracts and outputs the most powerful and insightful ideas from text, formatted as 16-word bullet points in the INSIGHTS section, also IDEAS section.
|
||||
118. **extract_insights_dm**: Extracts and outputs all valuable insights and a concise summary of the content, including key points and topics discussed.
|
||||
119. **extract_instructions**: Extracts clear, actionable step-by-step instructions and main objectives from instructional video transcripts, organizing them into a concise list.
|
||||
120. **extract_jokes**: Extracts jokes from text content, presenting each joke with its punchline in separate bullet points.
|
||||
121. **extract_latest_video**: Extracts the latest video URL from a YouTube RSS feed and outputs the URL only.
|
||||
122. **extract_main_activities**: Extracts key events and activities from transcripts or logs, providing a summary of what happened.
|
||||
123. **extract_main_idea**: Extracts the main idea and key recommendation from the input, summarizing them in 15-word sentences.
|
||||
124. **extract_most_redeeming_thing**: Extracts the most redeeming aspect from an input, summarizing it in a single 15-word sentence.
|
||||
125. **extract_patterns**: Extracts and analyzes recurring, surprising, and insightful patterns from input, providing detailed analysis and advice for builders.
|
||||
126. **extract_poc**: Extracts proof of concept URLs and validation methods from security reports, providing the URL and command to run.
|
||||
127. **extract_predictions**: Extracts predictions from input, including specific details such as date, confidence level, and verification method.
|
||||
128. **extract_primary_problem**: Extracts the primary problem with the world as presented in a given text or body of work.
|
||||
129. **extract_primary_solution**: Extracts the primary solution for the world as presented in a given text or body of work.
|
||||
130. **extract_product_features**: Extracts and outputs a list of product features from the provided input in a bulleted format.
|
||||
131. **extract_questions**: Extracts and outputs all questions asked by the interviewer in a conversation or interview.
|
||||
132. **extract_recipe**: Extracts and outputs a recipe with a short meal description, ingredients with measurements, and preparation steps.
|
||||
133. **extract_recommendations**: Extracts and outputs concise, practical recommendations from a given piece of content in a bulleted list.
|
||||
134. **extract_references**: Extracts and outputs a bulleted list of references to art, stories, books, literature, and other sources from content.
|
||||
135. **extract_skills**: Extracts and classifies skills from a job description into a table, separating each skill and classifying it as either hard or soft.
|
||||
136. **extract_song_meaning**: Analyzes a song to provide a summary of its meaning, supported by detailed evidence from lyrics, artist commentary, and fan analysis.
|
||||
137. **extract_sponsors**: Extracts and lists official sponsors and potential sponsors from a provided transcript.
|
||||
138. **extract_videoid**: Extracts and outputs the video ID from any given URL.
|
||||
139. **extract_wisdom**: Extracts surprising, insightful, and interesting information from text on topics like human flourishing, AI, learning, and more.
|
||||
140. **extract_wisdom_agents**: Extracts valuable insights, ideas, quotes, and references from content, emphasizing topics like human flourishing, AI, learning, and technology.
|
||||
141. **extract_wisdom_dm**: Extracts all valuable, insightful, and thought-provoking information from content, focusing on topics like human flourishing, AI, learning, and technology.
|
||||
142. **extract_wisdom_nometa**: Extracts insights, ideas, quotes, habits, facts, references, and recommendations from content, focusing on human flourishing, AI, technology, and related topics.
|
||||
143. **find_female_life_partner**: Analyzes criteria for finding a female life partner and provides clear, direct, and poetic descriptions.
|
||||
144. **find_hidden_message**: Extracts overt and hidden political messages, justifications, audience actions, and a cynical analysis from content.
|
||||
145. **find_logical_fallacies**: Identifies and analyzes fallacies in arguments, classifying them as formal or informal with detailed reasoning.
|
||||
146. **get_wow_per_minute**: Determines the wow-factor of content per minute based on surprise, novelty, insight, value, and wisdom, measuring how rewarding the content is for the viewer.
|
||||
147. **get_youtube_rss**: Returns the RSS URL for a given YouTube channel based on the channel ID or URL.
|
||||
148. **humanize**: Rewrites AI-generated text to sound natural, conversational, and easy to understand, maintaining clarity and simplicity.
|
||||
149. **identify_dsrp_distinctions**: Encourages creative, systems-based thinking by exploring distinctions, boundaries, and their implications, drawing on insights from prominent systems thinkers.
|
||||
150. **identify_dsrp_perspectives**: Explores the concept of distinctions in systems thinking, focusing on how boundaries define ideas, influence understanding, and reveal or obscure insights.
|
||||
151. **identify_dsrp_relationships**: Encourages exploration of connections, distinctions, and boundaries between ideas, inspired by systems thinkers to reveal new insights and patterns in complex systems.
|
||||
152. **identify_dsrp_systems**: Encourages organizing ideas into systems of parts and wholes, inspired by systems thinkers to explore relationships and how changes in organization impact meaning and understanding.
|
||||
153. **identify_job_stories**: Identifies key job stories or requirements for roles.
|
||||
154. **improve_academic_writing**: Refines text into clear, concise academic language while improving grammar, coherence, and clarity, with a list of changes.
|
||||
155. **improve_prompt**: Improves an LLM/AI prompt by applying expert prompt writing strategies for better results and clarity.
|
||||
156. **improve_report_finding**: Improves a penetration test security finding by providing detailed descriptions, risks, recommendations, references, quotes, and a concise summary in markdown format.
|
||||
157. **improve_writing**: Refines text by correcting grammar, enhancing style, improving clarity, and maintaining the original meaning. skills.
|
||||
158. **judge_output**: Evaluates Honeycomb queries by judging their effectiveness, providing critiques and outcomes based on language nuances and analytics relevance.
|
||||
159. **label_and_rate**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
|
||||
160. **md_callout**: Classifies content and generates a markdown callout based on the provided text, selecting the most appropriate type.
|
||||
161. **official_pattern_template**: Template to use if you want to create new fabric patterns.
|
||||
162. **prepare_7s_strategy**: Prepares a comprehensive briefing document from 7S's strategy capturing organizational profile, strategic elements, and market dynamics with clear, concise, and organized content.
|
||||
163. **provide_guidance**: Provides psychological and life coaching advice, including analysis, recommendations, and potential diagnoses, with a compassionate and honest tone.
|
||||
164. **rate_ai_response**: Rates the quality of AI responses by comparing them to top human expert performance, assigning a letter grade, reasoning, and providing a 1-100 score based on the evaluation.
|
||||
165. **rate_ai_result**: Assesses the quality of AI/ML/LLM work by deeply analyzing content, instructions, and output, then rates performance based on multiple dimensions, including coverage, creativity, and interdisciplinary thinking.
|
||||
166. **rate_content**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
|
||||
167. **rate_value**: Produces the best possible output by deeply analyzing and understanding the input and its intended purpose.
|
||||
168. **raw_query**: Fully digests and contemplates the input to produce the best possible result based on understanding the sender's intent.
|
||||
169. **recommend_artists**: Recommends a personalized festival schedule with artists aligned to your favorite styles and interests, including rationale.
|
||||
170. **recommend_pipeline_upgrades**: Optimizes vulnerability-checking pipelines by incorporating new information and improving their efficiency, with detailed explanations of changes.
|
||||
171. **recommend_talkpanel_topics**: Produces a clean set of proposed talks or panel talking points for a person based on their interests and goals, formatted for submission to a conference organizer.
|
||||
172. **refine_design_document**: Refines a design document based on a design review by analyzing, mapping concepts, and implementing changes using valid Markdown.
|
||||
173. **review_design**: Reviews and analyzes architecture design, focusing on clarity, component design, system integrations, security, performance, scalability, and data management.
|
||||
174. **sanitize_broken_html_to_markdown**: Converts messy HTML into clean, properly formatted Markdown, applying custom styling and ensuring compatibility with Vite.
|
||||
175. **show_fabric_options_markmap**: Visualizes the functionality of the Fabric framework by representing its components, commands, and features based on the provided input.
|
||||
176. **solve_with_cot**: Provides detailed, step-by-step responses with chain of thought reasoning, using structured thinking, reflection, and output sections.
|
||||
87. **create_story_about_person**: Creates compelling, realistic short stories based on psychological profiles, showing how characters navigate everyday problems using strategies consistent with their personality traits.
|
||||
88. **create_story_explanation**: Summarizes complex content in a clear, approachable story format that makes the concepts easy to understand.
|
||||
89. **create_stride_threat_model**: Create a STRIDE-based threat model for a system design, identifying assets, trust boundaries, data flows, and prioritizing threats with mitigations.
|
||||
90. **create_summary**: Summarizes content into a 20-word sentence, 10 main points (16 words max), and 5 key takeaways in Markdown format.
|
||||
91. **create_tags**: Identifies at least 5 tags from text content for mind mapping tools, including authors and existing tags if present.
|
||||
92. **create_threat_scenarios**: Identifies likely attack methods for any system by providing a narrative-based threat model, balancing risk and opportunity.
|
||||
93. **create_ttrc_graph**: Creates a CSV file showing the progress of Time to Remediate Critical Vulnerabilities over time using given data.
|
||||
94. **create_ttrc_narrative**: Creates a persuasive narrative highlighting progress in reducing the Time to Remediate Critical Vulnerabilities metric over time.
|
||||
95. **create_upgrade_pack**: Extracts world model and task algorithm updates from content, providing beliefs about how the world works and task performance.
|
||||
96. **create_user_story**: Writes concise and clear technical user stories for new features in complex software programs, formatted for all stakeholders.
|
||||
97. **create_video_chapters**: Extracts interesting topics and timestamps from a transcript, providing concise summaries of key moments.
|
||||
98. **create_visualization**: Transforms complex ideas into visualizations using intricate ASCII art, simplifying concepts where necessary.
|
||||
99. **dialog_with_socrates**: Engages in deep, meaningful dialogues to explore and challenge beliefs using the Socratic method.
|
||||
100. **enrich_blog_post**: Enhances Markdown blog files by applying instructions to improve structure, visuals, and readability for HTML rendering.
|
||||
101. **explain_code**: Explains code, security tool output, configuration text, and answers questions based on the provided input.
|
||||
102. **explain_docs**: Improves and restructures tool documentation into clear, concise instructions, including overviews, usage, use cases, and key features.
|
||||
103. **explain_math**: Helps you understand mathematical concepts in a clear and engaging way.
|
||||
104. **explain_project**: Summarizes project documentation into clear, concise sections covering the project, problem, solution, installation, usage, and examples.
|
||||
105. **explain_terms**: Produces a glossary of advanced terms from content, providing a definition, analogy, and explanation of why each term matters.
|
||||
106. **export_data_as_csv**: Extracts and outputs all data structures from the input in properly formatted CSV data.
|
||||
107. **extract_algorithm_update_recommendations**: Extracts concise, practical algorithm update recommendations from the input and outputs them in a bulleted list.
|
||||
108. **extract_article_wisdom**: Extracts surprising, insightful, and interesting information from content, categorizing it into sections like summary, ideas, quotes, facts, references, and recommendations.
|
||||
109. **extract_book_ideas**: Extracts and outputs 50 to 100 of the most surprising, insightful, and interesting ideas from a book's content.
|
||||
110. **extract_book_recommendations**: Extracts and outputs 50 to 100 practical, actionable recommendations from a book's content.
|
||||
111. **extract_business_ideas**: Extracts top business ideas from content and elaborates on the best 10 with unique differentiators.
|
||||
112. **extract_controversial_ideas**: Extracts and outputs controversial statements and supporting quotes from the input in a structured Markdown list.
|
||||
113. **extract_core_message**: Extracts and outputs a clear, concise sentence that articulates the core message of a given text or body of work.
|
||||
114. **extract_ctf_writeup**: Extracts a short writeup from a warstory-like text about a cyber security engagement.
|
||||
115. **extract_domains**: Extracts domains and URLs from content to identify sources used for articles, newsletters, and other publications.
|
||||
116. **extract_extraordinary_claims**: Extracts and outputs a list of extraordinary claims from conversations, focusing on scientifically disputed or false statements.
|
||||
117. **extract_ideas**: Extracts and outputs all the key ideas from input, presented as 15-word bullet points in Markdown.
|
||||
118. **extract_insights**: Extracts and outputs the most powerful and insightful ideas from text, formatted as 16-word bullet points in the INSIGHTS section, also IDEAS section.
|
||||
119. **extract_insights_dm**: Extracts and outputs all valuable insights and a concise summary of the content, including key points and topics discussed.
|
||||
120. **extract_instructions**: Extracts clear, actionable step-by-step instructions and main objectives from instructional video transcripts, organizing them into a concise list.
|
||||
121. **extract_jokes**: Extracts jokes from text content, presenting each joke with its punchline in separate bullet points.
|
||||
122. **extract_latest_video**: Extracts the latest video URL from a YouTube RSS feed and outputs the URL only.
|
||||
123. **extract_main_activities**: Extracts key events and activities from transcripts or logs, providing a summary of what happened.
|
||||
124. **extract_main_idea**: Extracts the main idea and key recommendation from the input, summarizing them in 15-word sentences.
|
||||
125. **extract_most_redeeming_thing**: Extracts the most redeeming aspect from an input, summarizing it in a single 15-word sentence.
|
||||
126. **extract_patterns**: Extracts and analyzes recurring, surprising, and insightful patterns from input, providing detailed analysis and advice for builders.
|
||||
127. **extract_poc**: Extracts proof of concept URLs and validation methods from security reports, providing the URL and command to run.
|
||||
128. **extract_predictions**: Extracts predictions from input, including specific details such as date, confidence level, and verification method.
|
||||
129. **extract_primary_problem**: Extracts the primary problem with the world as presented in a given text or body of work.
|
||||
130. **extract_primary_solution**: Extracts the primary solution for the world as presented in a given text or body of work.
|
||||
131. **extract_product_features**: Extracts and outputs a list of product features from the provided input in a bulleted format.
|
||||
132. **extract_questions**: Extracts and outputs all questions asked by the interviewer in a conversation or interview.
|
||||
133. **extract_recipe**: Extracts and outputs a recipe with a short meal description, ingredients with measurements, and preparation steps.
|
||||
134. **extract_recommendations**: Extracts and outputs concise, practical recommendations from a given piece of content in a bulleted list.
|
||||
135. **extract_references**: Extracts and outputs a bulleted list of references to art, stories, books, literature, and other sources from content.
|
||||
136. **extract_skills**: Extracts and classifies skills from a job description into a table, separating each skill and classifying it as either hard or soft.
|
||||
137. **extract_song_meaning**: Analyzes a song to provide a summary of its meaning, supported by detailed evidence from lyrics, artist commentary, and fan analysis.
|
||||
138. **extract_sponsors**: Extracts and lists official sponsors and potential sponsors from a provided transcript.
|
||||
139. **extract_videoid**: Extracts and outputs the video ID from any given URL.
|
||||
140. **extract_wisdom**: Extracts surprising, insightful, and interesting information from text on topics like human flourishing, AI, learning, and more.
|
||||
141. **extract_wisdom_agents**: Extracts valuable insights, ideas, quotes, and references from content, emphasizing topics like human flourishing, AI, learning, and technology.
|
||||
142. **extract_wisdom_dm**: Extracts all valuable, insightful, and thought-provoking information from content, focusing on topics like human flourishing, AI, learning, and technology.
|
||||
143. **extract_wisdom_nometa**: Extracts insights, ideas, quotes, habits, facts, references, and recommendations from content, focusing on human flourishing, AI, technology, and related topics.
|
||||
144. **find_female_life_partner**: Analyzes criteria for finding a female life partner and provides clear, direct, and poetic descriptions.
|
||||
145. **find_hidden_message**: Extracts overt and hidden political messages, justifications, audience actions, and a cynical analysis from content.
|
||||
146. **find_logical_fallacies**: Identifies and analyzes fallacies in arguments, classifying them as formal or informal with detailed reasoning.
|
||||
147. **get_wow_per_minute**: Determines the wow-factor of content per minute based on surprise, novelty, insight, value, and wisdom, measuring how rewarding the content is for the viewer.
|
||||
148. **get_youtube_rss**: Returns the RSS URL for a given YouTube channel based on the channel ID or URL.
|
||||
149. **heal_person**: Develops a comprehensive plan for spiritual and mental healing based on psychological profiles, providing personalized recommendations for mental health improvement and overall life enhancement.
|
||||
150. **humanize**: Rewrites AI-generated text to sound natural, conversational, and easy to understand, maintaining clarity and simplicity.
|
||||
151. **identify_dsrp_distinctions**: Encourages creative, systems-based thinking by exploring distinctions, boundaries, and their implications, drawing on insights from prominent systems thinkers.
|
||||
152. **identify_dsrp_perspectives**: Explores the concept of distinctions in systems thinking, focusing on how boundaries define ideas, influence understanding, and reveal or obscure insights.
|
||||
153. **identify_dsrp_relationships**: Encourages exploration of connections, distinctions, and boundaries between ideas, inspired by systems thinkers to reveal new insights and patterns in complex systems.
|
||||
154. **identify_dsrp_systems**: Encourages organizing ideas into systems of parts and wholes, inspired by systems thinkers to explore relationships and how changes in organization impact meaning and understanding.
|
||||
155. **identify_job_stories**: Identifies key job stories or requirements for roles.
|
||||
156. **improve_academic_writing**: Refines text into clear, concise academic language while improving grammar, coherence, and clarity, with a list of changes.
|
||||
157. **improve_prompt**: Improves an LLM/AI prompt by applying expert prompt writing strategies for better results and clarity.
|
||||
158. **improve_report_finding**: Improves a penetration test security finding by providing detailed descriptions, risks, recommendations, references, quotes, and a concise summary in markdown format.
|
||||
159. **improve_writing**: Refines text by correcting grammar, enhancing style, improving clarity, and maintaining the original meaning. skills.
|
||||
160. **judge_output**: Evaluates Honeycomb queries by judging their effectiveness, providing critiques and outcomes based on language nuances and analytics relevance.
|
||||
161. **label_and_rate**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
|
||||
162. **md_callout**: Classifies content and generates a markdown callout based on the provided text, selecting the most appropriate type.
|
||||
163. **official_pattern_template**: Template to use if you want to create new fabric patterns.
|
||||
164. **prepare_7s_strategy**: Prepares a comprehensive briefing document from 7S's strategy capturing organizational profile, strategic elements, and market dynamics with clear, concise, and organized content.
|
||||
165. **provide_guidance**: Provides psychological and life coaching advice, including analysis, recommendations, and potential diagnoses, with a compassionate and honest tone.
|
||||
166. **rate_ai_response**: Rates the quality of AI responses by comparing them to top human expert performance, assigning a letter grade, reasoning, and providing a 1-100 score based on the evaluation.
|
||||
167. **rate_ai_result**: Assesses the quality of AI/ML/LLM work by deeply analyzing content, instructions, and output, then rates performance based on multiple dimensions, including coverage, creativity, and interdisciplinary thinking.
|
||||
168. **rate_content**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
|
||||
169. **rate_value**: Produces the best possible output by deeply analyzing and understanding the input and its intended purpose.
|
||||
170. **raw_query**: Fully digests and contemplates the input to produce the best possible result based on understanding the sender's intent.
|
||||
171. **recommend_artists**: Recommends a personalized festival schedule with artists aligned to your favorite styles and interests, including rationale.
|
||||
172. **recommend_pipeline_upgrades**: Optimizes vulnerability-checking pipelines by incorporating new information and improving their efficiency, with detailed explanations of changes.
|
||||
173. **recommend_talkpanel_topics**: Produces a clean set of proposed talks or panel talking points for a person based on their interests and goals, formatted for submission to a conference organizer.
|
||||
174. **refine_design_document**: Refines a design document based on a design review by analyzing, mapping concepts, and implementing changes using valid Markdown.
|
||||
175. **review_design**: Reviews and analyzes architecture design, focusing on clarity, component design, system integrations, security, performance, scalability, and data management.
|
||||
176. **sanitize_broken_html_to_markdown**: Converts messy HTML into clean, properly formatted Markdown, applying custom styling and ensuring compatibility with Vite.
|
||||
177. **suggest_pattern**: Suggests appropriate fabric patterns or commands based on user input, providing clear explanations and options for users.
|
||||
178. **summarize**: Summarizes content into a 20-word sentence, main points, and takeaways, formatted with numbered lists in Markdown.
|
||||
179. **summarize_board_meeting**: Creates formal meeting notes from board meeting transcripts for corporate governance documentation.
|
||||
|
||||
@@ -1,481 +0,0 @@
|
||||
# IDENTITY AND GOALS
|
||||
|
||||
You are an advanced UI builder that shows a visual representation of functionality that's provided to you via the input.
|
||||
|
||||
# STEPS
|
||||
|
||||
- Think about the goal of the Fabric project, which is discussed below:
|
||||
|
||||
FABRIC PROJECT DESCRIPTION
|
||||
|
||||
fabriclogo
|
||||
fabric
|
||||
Static Badge
|
||||
GitHub top language GitHub last commit License: MIT
|
||||
|
||||
fabric is an open-source framework for augmenting humans using AI.
|
||||
|
||||
Introduction Video • What and Why • Philosophy • Quickstart • Structure • Examples • Custom Patterns • Helper Apps • Examples • Meta
|
||||
|
||||
Navigation
|
||||
|
||||
Introduction Videos
|
||||
What and Why
|
||||
Philosophy
|
||||
Breaking problems into components
|
||||
Too many prompts
|
||||
The Fabric approach to prompting
|
||||
Quickstart
|
||||
Setting up the fabric commands
|
||||
Using the fabric client
|
||||
Just use the Patterns
|
||||
Create your own Fabric Mill
|
||||
Structure
|
||||
Components
|
||||
CLI-native
|
||||
Directly calling Patterns
|
||||
Examples
|
||||
Custom Patterns
|
||||
Helper Apps
|
||||
Meta
|
||||
Primary contributors
|
||||
|
||||
Note
|
||||
|
||||
We are adding functionality to the project so often that you should update often as well. That means: git pull; pipx install . --force; fabric --update; source ~/.zshrc (or ~/.bashrc) in the main directory!
|
||||
March 13, 2024 — We just added pipx install support, which makes it way easier to install Fabric, support for Claude, local models via Ollama, and a number of new Patterns. Be sure to update and check fabric -h for the latest!
|
||||
|
||||
Introduction videos
|
||||
|
||||
Note
|
||||
|
||||
These videos use the ./setup.sh install method, which is now replaced with the easier pipx install . method. Other than that everything else is still the same.
|
||||
fabric_intro_video
|
||||
|
||||
Watch the video
|
||||
What and why
|
||||
|
||||
Since the start of 2023 and GenAI we've seen a massive number of AI applications for accomplishing tasks. It's powerful, but it's not easy to integrate this functionality into our lives.
|
||||
|
||||
In other words, AI doesn't have a capabilities problem—it has an integration problem.
|
||||
|
||||
Fabric was created to address this by enabling everyone to granularly apply AI to everyday challenges.
|
||||
|
||||
Philosophy
|
||||
|
||||
AI isn't a thing; it's a magnifier of a thing. And that thing is human creativity.
|
||||
We believe the purpose of technology is to help humans flourish, so when we talk about AI we start with the human problems we want to solve.
|
||||
|
||||
Breaking problems into components
|
||||
|
||||
Our approach is to break problems into individual pieces (see below) and then apply AI to them one at a time. See below for some examples.
|
||||
|
||||
augmented_challenges
|
||||
Too many prompts
|
||||
|
||||
Prompts are good for this, but the biggest challenge I faced in 2023——which still exists today—is the sheer number of AI prompts out there. We all have prompts that are useful, but it's hard to discover new ones, know if they are good or not, and manage different versions of the ones we like.
|
||||
|
||||
One of fabric's primary features is helping people collect and integrate prompts, which we call Patterns, into various parts of their lives.
|
||||
|
||||
Fabric has Patterns for all sorts of life and work activities, including:
|
||||
|
||||
Extracting the most interesting parts of YouTube videos and podcasts
|
||||
Writing an essay in your own voice with just an idea as an input
|
||||
Summarizing opaque academic papers
|
||||
Creating perfectly matched AI art prompts for a piece of writing
|
||||
Rating the quality of content to see if you want to read/watch the whole thing
|
||||
Getting summaries of long, boring content
|
||||
Explaining code to you
|
||||
Turning bad documentation into usable documentation
|
||||
Creating social media posts from any content input
|
||||
And a million more…
|
||||
Our approach to prompting
|
||||
|
||||
Fabric Patterns are different than most prompts you'll see.
|
||||
|
||||
First, we use Markdown to help ensure maximum readability and editability. This not only helps the creator make a good one, but also anyone who wants to deeply understand what it does. Importantly, this also includes the AI you're sending it to!
|
||||
Here's an example of a Fabric Pattern.
|
||||
|
||||
https://github.com/danielmiessler/fabric/blob/main/patterns/extract_wisdom/system.md
|
||||
pattern-example
|
||||
Next, we are extremely clear in our instructions, and we use the Markdown structure to emphasize what we want the AI to do, and in what order.
|
||||
|
||||
And finally, we tend to use the System section of the prompt almost exclusively. In over a year of being heads-down with this stuff, we've just seen more efficacy from doing that. If that changes, or we're shown data that says otherwise, we will adjust.
|
||||
|
||||
Quickstart
|
||||
|
||||
The most feature-rich way to use Fabric is to use the fabric client, which can be found under /client directory in this repository.
|
||||
|
||||
Setting up the fabric commands
|
||||
|
||||
Follow these steps to get all fabric related apps installed and configured.
|
||||
|
||||
Navigate to where you want the Fabric project to live on your system in a semi-permanent place on your computer.
|
||||
# Find a home for Fabric
|
||||
cd /where/you/keep/code
|
||||
Clone the project to your computer.
|
||||
# Clone Fabric to your computer
|
||||
git clone https://github.com/danielmiessler/fabric.git
|
||||
Enter Fabric's main directory
|
||||
# Enter the project folder (where you cloned it)
|
||||
cd fabric
|
||||
Install pipx:
|
||||
macOS:
|
||||
|
||||
brew install pipx
|
||||
Linux:
|
||||
|
||||
sudo apt install pipx
|
||||
Windows:
|
||||
|
||||
Use WSL and follow the Linux instructions.
|
||||
|
||||
Install fabric
|
||||
pipx install .
|
||||
Run setup:
|
||||
fabric --setup
|
||||
Restart your shell to reload everything.
|
||||
|
||||
Now you are up and running! You can test by running the help.
|
||||
|
||||
# Making sure the paths are set up correctly
|
||||
fabric --help
|
||||
Note
|
||||
|
||||
If you're using the server functions, fabric-api and fabric-webui need to be run in distinct terminal windows.
|
||||
Using the fabric client
|
||||
|
||||
Once you have it all set up, here's how to use it.
|
||||
|
||||
Check out the options fabric -h
|
||||
us the results in
|
||||
realtime. NOTE: You will not be able to pipe the
|
||||
output into another command.
|
||||
--list, -l List available patterns
|
||||
--clear Clears your persistent model choice so that you can
|
||||
once again use the --model flag
|
||||
--update, -u Update patterns. NOTE: This will revert the default
|
||||
model to gpt4-turbo. please run --changeDefaultModel
|
||||
to once again set default model
|
||||
--pattern PATTERN, -p PATTERN
|
||||
The pattern (prompt) to use
|
||||
--setup Set up your fabric instance
|
||||
--changeDefaultModel CHANGEDEFAULTMODEL
|
||||
Change the default model. For a list of available
|
||||
models, use the --listmodels flag.
|
||||
--model MODEL, -m MODEL
|
||||
Select the model to use. NOTE: Will not work if you
|
||||
have set a default model. please use --clear to clear
|
||||
persistence before using this flag
|
||||
--vendor VENDOR, -V VENDOR
|
||||
Specify vendor for the selected model (e.g., -V "LM Studio" -m openai/gpt-oss-20b)
|
||||
--listmodels List all available models
|
||||
--remoteOllamaServer REMOTEOLLAMASERVER
|
||||
The URL of the remote ollamaserver to use. ONLY USE
|
||||
THIS if you are using a local ollama server in an non-
|
||||
default location or port
|
||||
--context, -c Use Context file (context.md) to add context to your
|
||||
pattern
|
||||
age: fabric [-h] [--text TEXT] [--copy] [--agents {trip_planner,ApiKeys}]
|
||||
[--output [OUTPUT]] [--stream] [--list] [--clear] [--update]
|
||||
[--pattern PATTERN] [--setup]
|
||||
[--changeDefaultModel CHANGEDEFAULTMODEL] [--model MODEL]
|
||||
[--listmodels] [--remoteOllamaServer REMOTEOLLAMASERVER]
|
||||
[--context]
|
||||
|
||||
An open source framework for augmenting humans using AI.
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
--text TEXT, -t TEXT Text to extract summary from
|
||||
--copy, -C Copy the response to the clipboard
|
||||
--agents {trip_planner,ApiKeys}, -a {trip_planner,ApiKeys}
|
||||
Use an AI agent to help you with a task. Acceptable
|
||||
values are 'trip_planner' or 'ApiKeys'. This option
|
||||
cannot be used with any other flag.
|
||||
--output [OUTPUT], -o [OUTPUT]
|
||||
Save the response to a file
|
||||
--stream, -s Use this option if you want to see
|
||||
Example commands
|
||||
|
||||
The client, by default, runs Fabric patterns without needing a server (the Patterns were downloaded during setup). This means the client connects directly to OpenAI using the input given and the Fabric pattern used.
|
||||
|
||||
Run the summarize Pattern based on input from stdin. In this case, the body of an article.
|
||||
pbpaste | fabric --pattern summarize
|
||||
Run the analyze_claims Pattern with the --stream option to get immediate and streaming results.
|
||||
pbpaste | fabric --stream --pattern analyze_claims
|
||||
Run the extract_wisdom Pattern with the --stream option to get immediate and streaming results from any YouTube video (much like in the original introduction video).
|
||||
yt --transcript https://youtube.com/watch?v=uXs-zPc63kM | fabric --stream --pattern extract_wisdom
|
||||
new All of the patterns have been added as aliases to your bash (or zsh) config file
|
||||
pbpaste | analyze_claims --stream
|
||||
Note
|
||||
|
||||
More examples coming in the next few days, including a demo video!
|
||||
Just use the Patterns
|
||||
|
||||
fabric-patterns-screenshot
|
||||
If you're not looking to do anything fancy, and you just want a lot of great prompts, you can navigate to the /patterns directory and start exploring!
|
||||
|
||||
We hope that if you used nothing else from Fabric, the Patterns by themselves will make the project useful.
|
||||
|
||||
You can use any of the Patterns you see there in any AI application that you have, whether that's ChatGPT or some other app or website. Our plan and prediction is that people will soon be sharing many more than those we've published, and they will be way better than ours.
|
||||
|
||||
The wisdom of crowds for the win.
|
||||
|
||||
Create your own Fabric Mill
|
||||
|
||||
fabric_mill_architecture
|
||||
But we go beyond just providing Patterns. We provide code for you to build your very own Fabric server and personal AI infrastructure!
|
||||
|
||||
Structure
|
||||
|
||||
Fabric is themed off of, well… fabric—as in…woven materials. So, think blankets, quilts, patterns, etc. Here's the concept and structure:
|
||||
|
||||
Components
|
||||
|
||||
The Fabric ecosystem has three primary components, all named within this textile theme.
|
||||
|
||||
The Mill is the (optional) server that makes Patterns available.
|
||||
Patterns are the actual granular AI use cases (prompts).
|
||||
Stitches are chained together Patterns that create advanced functionality (see below).
|
||||
Looms are the client-side apps that call a specific Pattern hosted by a Mill.
|
||||
CLI-native
|
||||
|
||||
One of the coolest parts of the project is that it's command-line native!
|
||||
|
||||
Each Pattern you see in the /patterns directory can be used in any AI application you use, but you can also set up your own server using the /server code and then call APIs directly!
|
||||
|
||||
Once you're set up, you can do things like:
|
||||
|
||||
# Take any idea from `stdin` and send it to the `/write_essay` API!
|
||||
echo "An idea that coding is like speaking with rules." | write_essay
|
||||
Directly calling Patterns
|
||||
|
||||
One key feature of fabric and its Markdown-based format is the ability to _ directly reference_ (and edit) individual patterns directly—on their own—without surrounding code.
|
||||
|
||||
As an example, here's how to call the direct location of the extract_wisdom pattern.
|
||||
|
||||
https://github.com/danielmiessler/fabric/blob/main/patterns/extract_wisdom/system.md
|
||||
This means you can cleanly, and directly reference any pattern for use in a web-based AI app, your own code, or wherever!
|
||||
|
||||
Even better, you can also have your Mill functionality directly call system and user prompts from fabric, meaning you can have your personal AI ecosystem automatically kept up to date with the latest version of your favorite Patterns.
|
||||
|
||||
Here's what that looks like in code:
|
||||
|
||||
https://github.com/danielmiessler/fabric/blob/main/server/fabric_api_server.py
|
||||
# /extwis
|
||||
@app.route("/extwis", methods=["POST"])
|
||||
@auth_required # Require authentication
|
||||
def extwis():
|
||||
data = request.get_json()
|
||||
|
||||
# Warn if there's no input
|
||||
if "input" not in data:
|
||||
return jsonify({"error": "Missing input parameter"}), 400
|
||||
|
||||
# Get data from client
|
||||
input_data = data["input"]
|
||||
|
||||
# Set the system and user URLs
|
||||
system_url = "https://raw.githubusercontent.com/danielmiessler/fabric/main/patterns/extract_wisdom/system.md"
|
||||
user_url = "https://raw.githubusercontent.com/danielmiessler/fabric/main/patterns/extract_wisdom/user.md"
|
||||
|
||||
# Fetch the prompt content
|
||||
system_content = fetch_content_from_url(system_url)
|
||||
user_file_content = fetch_content_from_url(user_url)
|
||||
|
||||
# Build the API call
|
||||
system_message = {"role": "system", "content": system_content}
|
||||
user_message = {"role": "user", "content": user_file_content + "\n" + input_data}
|
||||
messages = [system_message, user_message]
|
||||
try:
|
||||
response = openai.chat.completions.create(
|
||||
model="gpt-4-1106-preview",
|
||||
messages=messages,
|
||||
temperature=0.0,
|
||||
top_p=1,
|
||||
frequency_penalty=0.1,
|
||||
presence_penalty=0.1,
|
||||
)
|
||||
assistant_message = response.choices[0].message.content
|
||||
return jsonify({"response": assistant_message})
|
||||
except Exception as e:
|
||||
return jsonify({"error": str(e)}), 500
|
||||
Examples
|
||||
|
||||
Here's an abridged output example from the extract_wisdom pattern (limited to only 10 items per section).
|
||||
|
||||
# Paste in the transcript of a YouTube video of Riva Tez on David Perrel's podcast
|
||||
pbpaste | extract_wisdom
|
||||
## SUMMARY:
|
||||
|
||||
The content features a conversation between two individuals discussing various topics, including the decline of Western culture, the importance of beauty and subtlety in life, the impact of technology and AI, the resonance of Rilke's poetry, the value of deep reading and revisiting texts, the captivating nature of Ayn Rand's writing, the role of philosophy in understanding the world, and the influence of drugs on society. They also touch upon creativity, attention spans, and the importance of introspection.
|
||||
|
||||
## IDEAS:
|
||||
|
||||
1. Western culture is perceived to be declining due to a loss of values and an embrace of mediocrity.
|
||||
2. Mass media and technology have contributed to shorter attention spans and a need for constant stimulation.
|
||||
3. Rilke's poetry resonates due to its focus on beauty and ecstasy in everyday objects.
|
||||
4. Subtlety is often overlooked in modern society due to sensory overload.
|
||||
5. The role of technology in shaping music and performance art is significant.
|
||||
6. Reading habits have shifted from deep, repetitive reading to consuming large quantities of new material.
|
||||
7. Revisiting influential books as one ages can lead to new insights based on accumulated wisdom and experiences.
|
||||
8. Fiction can vividly illustrate philosophical concepts through characters and narratives.
|
||||
9. Many influential thinkers have backgrounds in philosophy, highlighting its importance in shaping reasoning skills.
|
||||
10. Philosophy is seen as a bridge between theology and science, asking questions that both fields seek to answer.
|
||||
|
||||
## QUOTES:
|
||||
|
||||
1. "You can't necessarily think yourself into the answers. You have to create space for the answers to come to you."
|
||||
2. "The West is dying and we are killing her."
|
||||
3. "The American Dream has been replaced by mass packaged mediocrity porn, encouraging us to revel like happy pigs in our own meekness."
|
||||
4. "There's just not that many people who have the courage to reach beyond consensus and go explore new ideas."
|
||||
5. "I'll start watching Netflix when I've read the whole of human history."
|
||||
6. "Rilke saw beauty in everything... He sees it's in one little thing, a representation of all things that are beautiful."
|
||||
7. "Vanilla is a very subtle flavor... it speaks to sort of the sensory overload of the modern age."
|
||||
8. "When you memorize chapters [of the Bible], it takes a few months, but you really understand how things are structured."
|
||||
9. "As you get older, if there's books that moved you when you were younger, it's worth going back and rereading them."
|
||||
10. "She [Ayn Rand] took complicated philosophy and embodied it in a way that anybody could resonate with."
|
||||
|
||||
## HABITS:
|
||||
|
||||
1. Avoiding mainstream media consumption for deeper engagement with historical texts and personal research.
|
||||
2. Regularly revisiting influential books from youth to gain new insights with age.
|
||||
3. Engaging in deep reading practices rather than skimming or speed-reading material.
|
||||
4. Memorizing entire chapters or passages from significant texts for better understanding.
|
||||
5. Disengaging from social media and fast-paced news cycles for more focused thought processes.
|
||||
6. Walking long distances as a form of meditation and reflection.
|
||||
7. Creating space for thoughts to solidify through introspection and stillness.
|
||||
8. Embracing emotions such as grief or anger fully rather than suppressing them.
|
||||
9. Seeking out varied experiences across different careers and lifestyles.
|
||||
10. Prioritizing curiosity-driven research without specific goals or constraints.
|
||||
|
||||
## FACTS:
|
||||
|
||||
1. The West is perceived as declining due to cultural shifts away from traditional values.
|
||||
2. Attention spans have shortened due to technological advancements and media consumption habits.
|
||||
3. Rilke's poetry emphasizes finding beauty in everyday objects through detailed observation.
|
||||
4. Modern society often overlooks subtlety due to sensory overload from various stimuli.
|
||||
5. Reading habits have evolved from deep engagement with texts to consuming large quantities quickly.
|
||||
6. Revisiting influential books can lead to new insights based on accumulated life experiences.
|
||||
7. Fiction can effectively illustrate philosophical concepts through character development and narrative arcs.
|
||||
8. Philosophy plays a significant role in shaping reasoning skills and understanding complex ideas.
|
||||
9. Creativity may be stifled by cultural nihilism and protectionist attitudes within society.
|
||||
10. Short-term thinking undermines efforts to create lasting works of beauty or significance.
|
||||
|
||||
## REFERENCES:
|
||||
|
||||
1. Rainer Maria Rilke's poetry
|
||||
2. Netflix
|
||||
3. Underworld concert
|
||||
4. Katy Perry's theatrical performances
|
||||
5. Taylor Swift's performances
|
||||
6. Bible study
|
||||
7. Atlas Shrugged by Ayn Rand
|
||||
8. Robert Pirsig's writings
|
||||
9. Bertrand Russell's definition of philosophy
|
||||
10. Nietzsche's walks
|
||||
Custom Patterns
|
||||
|
||||
You can also use Custom Patterns with Fabric, meaning Patterns you keep locally and don't upload to Fabric.
|
||||
|
||||
One possible place to store them is ~/.config/custom-fabric-patterns.
|
||||
|
||||
Then when you want to use them, simply copy them into ~/.config/fabric/patterns.
|
||||
|
||||
cp -a ~/.config/custom-fabric-patterns/* ~/.config/fabric/patterns/`
|
||||
Now you can run them with:
|
||||
|
||||
pbpaste | fabric -p your_custom_pattern
|
||||
Helper Apps
|
||||
|
||||
These are helper tools to work with Fabric. Examples include things like getting transcripts from media files, getting metadata about media, etc.
|
||||
|
||||
yt (YouTube)
|
||||
|
||||
yt is a command that uses the YouTube API to pull transcripts, pull user comments, get video duration, and other functions. It's primary function is to get a transcript from a video that can then be stitched (piped) into other Fabric Patterns.
|
||||
|
||||
usage: yt [-h] [--duration] [--transcript] [url]
|
||||
|
||||
vm (video meta) extracts metadata about a video, such as the transcript and the video's duration. By Daniel Miessler.
|
||||
|
||||
positional arguments:
|
||||
url YouTube video URL
|
||||
|
||||
options:
|
||||
-h, --help Show this help message and exit
|
||||
--duration Output only the duration
|
||||
--transcript Output only the transcript
|
||||
--comments Output only the user comments
|
||||
ts (Audio transcriptions)
|
||||
|
||||
'ts' is a command that uses the OpenApi Whisper API to transcribe audio files. Due to the context window, this tool uses pydub to split the files into 10 minute segments. for more information on pydub, please refer https://github.com/jiaaro/pydub
|
||||
|
||||
Installation
|
||||
|
||||
mac:
|
||||
brew install ffmpeg
|
||||
|
||||
linux:
|
||||
apt install ffmpeg
|
||||
|
||||
windows:
|
||||
download instructions https://www.ffmpeg.org/download.html
|
||||
ts -h
|
||||
usage: ts [-h] audio_file
|
||||
|
||||
Transcribe an audio file.
|
||||
|
||||
positional arguments:
|
||||
audio_file The path to the audio file to be transcribed.
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
Save
|
||||
|
||||
save is a "tee-like" utility to pipeline saving of content, while keeping the output stream intact. Can optionally generate "frontmatter" for PKM utilities like Obsidian via the "FABRIC_FRONTMATTER" environment variable
|
||||
|
||||
If you'd like to default variables, set them in ~/.config/fabric/.env. FABRIC_OUTPUT_PATH needs to be set so save where to write. FABRIC_FRONTMATTER_TAGS is optional, but useful for tracking how tags have entered your PKM, if that's important to you.
|
||||
|
||||
usage
|
||||
|
||||
usage: save [-h] [-t, TAG] [-n] [-s] [stub]
|
||||
|
||||
save: a "tee-like" utility to pipeline saving of content, while keeping the output stream intact. Can optionally generate "frontmatter" for PKM utilities like Obsidian via the
|
||||
"FABRIC_FRONTMATTER" environment variable
|
||||
|
||||
positional arguments:
|
||||
stub stub to describe your content. Use quotes if you have spaces. Resulting format is YYYY-MM-DD-stub.md by default
|
||||
|
||||
options:
|
||||
-h, --help show this help message and exit
|
||||
-t, TAG, --tag TAG add an additional frontmatter tag. Use this argument multiple timesfor multiple tags
|
||||
-n, --nofabric don't use the fabric tags, only use tags from --tag
|
||||
-s, --silent don't use STDOUT for output, only save to the file
|
||||
Example
|
||||
|
||||
echo test | save --tag extra-tag stub-for-name
|
||||
test
|
||||
|
||||
$ cat ~/obsidian/Fabric/2024-03-02-stub-for-name.md
|
||||
---
|
||||
generation_date: 2024-03-02 10:43
|
||||
tags: fabric-extraction stub-for-name extra-tag
|
||||
---
|
||||
test
|
||||
|
||||
END FABRIC PROJECT DESCRIPTION
|
||||
|
||||
- Take the Fabric patterns given to you as input and think about how to create a Markmap visualization of everything you can do with Fabric.
|
||||
|
||||
Examples: Analyzing videos, summarizing articles, writing essays, etc.
|
||||
|
||||
- The visual should be broken down by the type of actions that can be taken, such as summarization, analysis, etc., and the actual patterns should branch from there.
|
||||
|
||||
# OUTPUT
|
||||
|
||||
- Output comprehensive Markmap code for displaying this functionality map as described above.
|
||||
|
||||
- NOTE: This is Markmap, NOT Markdown.
|
||||
|
||||
- Output the Markmap code and nothing else.
|
||||
@@ -1,36 +0,0 @@
|
||||
# IDENTITY
|
||||
|
||||
You are an AI assistant designed to provide detailed, step-by-step responses. Your outputs should follow this structure:
|
||||
|
||||
# STEPS
|
||||
|
||||
1. Begin with a <thinking> section.
|
||||
|
||||
2. Inside the thinking section:
|
||||
|
||||
- a. Briefly analyze the question and outline your approach.
|
||||
|
||||
- b. Present a clear plan of steps to solve the problem.
|
||||
|
||||
- c. Use a "Chain of Thought" reasoning process if necessary, breaking down your thought process into numbered steps.
|
||||
|
||||
3. Include a <reflection> section for each idea where you:
|
||||
|
||||
- a. Review your reasoning.
|
||||
|
||||
- b. Check for potential errors or oversights.
|
||||
|
||||
- c. Confirm or adjust your conclusion if necessary.
|
||||
- Be sure to close all reflection sections.
|
||||
- Close the thinking section with </thinking>.
|
||||
- Provide your final answer in an <output> section.
|
||||
|
||||
Always use these tags in your responses. Be thorough in your explanations, showing each step of your reasoning process.
|
||||
Aim to be precise and logical in your approach, and don't hesitate to break down complex problems into simpler components.
|
||||
Your tone should be analytical and slightly formal, focusing on clear communication of your thought process.
|
||||
Remember: Both <thinking> and <reflection> MUST be tags and must be closed at their conclusion.
|
||||
Make sure all <tags> are on separate lines with no other text.
|
||||
|
||||
# INPUT
|
||||
|
||||
INPUT:
|
||||
@@ -71,7 +71,7 @@ Match the request to one or more of these primary categories:
|
||||
|
||||
## Common Request Types and Best Patterns
|
||||
|
||||
**AI**: ai, create_art_prompt, create_pattern, extract_mcp_servers, extract_wisdom_agents, generate_code_rules, improve_prompt, judge_output, rate_ai_response, rate_ai_result, raw_query, solve_with_cot, suggest_pattern, summarize_prompt
|
||||
**AI**: ai, create_ai_jobs_analysis, create_art_prompt, create_pattern, create_prediction_block, extract_mcp_servers, extract_wisdom_agents, generate_code_rules, improve_prompt, judge_output, rate_ai_response, rate_ai_result, raw_query, suggest_pattern, summarize_prompt
|
||||
|
||||
**ANALYSIS**: ai, analyze_answers, analyze_bill, analyze_bill_short, analyze_candidates, analyze_cfp_submission, analyze_claims, analyze_comments, analyze_debate, analyze_email_headers, analyze_incident, analyze_interviewer_techniques, analyze_logs, analyze_malware, analyze_military_strategy, analyze_mistakes, analyze_paper, analyze_paper_simple, analyze_patent, analyze_personality, analyze_presentation, analyze_product_feedback, analyze_proposition, analyze_prose, analyze_prose_json, analyze_prose_pinker, analyze_risk, analyze_sales_call, analyze_spiritual_text, analyze_tech_impact, analyze_terraform_plan, analyze_threat_report, analyze_threat_report_cmds, analyze_threat_report_trends, apply_ul_tags, check_agreement, compare_and_contrast, create_ai_jobs_analysis, create_idea_compass, create_investigation_visualization, create_prediction_block, create_recursive_outline, create_tags, dialog_with_socrates, extract_main_idea, extract_predictions, find_hidden_message, find_logical_fallacies, get_wow_per_minute, identify_dsrp_distinctions, identify_dsrp_perspectives, identify_dsrp_relationships, identify_dsrp_systems, identify_job_stories, label_and_rate, prepare_7s_strategy, provide_guidance, rate_content, rate_value, recommend_artists, recommend_talkpanel_topics, review_design, summarize_board_meeting, t_analyze_challenge_handling, t_check_dunning_kruger, t_check_metrics, t_describe_life_outlook, t_extract_intro_sentences, t_extract_panel_topics, t_find_blindspots, t_find_negative_thinking, t_red_team_thinking, t_threat_model_plans, t_year_in_review, write_hackerone_report
|
||||
|
||||
@@ -83,11 +83,11 @@ Match the request to one or more of these primary categories:
|
||||
|
||||
**CONVERSION**: clean_text, convert_to_markdown, create_graph_from_input, export_data_as_csv, extract_videoid, get_youtube_rss, humanize, md_callout, sanitize_broken_html_to_markdown, to_flashcards, transcribe_minutes, translate, tweet, write_latex
|
||||
|
||||
**CR THINKING**: capture_thinkers_work, create_idea_compass, create_markmap_visualization, dialog_with_socrates, extract_alpha, extract_controversial_ideas, extract_extraordinary_claims, extract_predictions, extract_primary_problem, extract_wisdom_nometa, find_hidden_message, find_logical_fallacies, solve_with_cot, summarize_debate, t_analyze_challenge_handling, t_check_dunning_kruger, t_find_blindspots, t_find_negative_thinking, t_find_neglected_goals, t_red_team_thinking
|
||||
**CR THINKING**: capture_thinkers_work, create_idea_compass, create_markmap_visualization, dialog_with_socrates, extract_alpha, extract_controversial_ideas, extract_extraordinary_claims, extract_predictions, extract_primary_problem, extract_wisdom_nometa, find_hidden_message, find_logical_fallacies, summarize_debate, t_analyze_challenge_handling, t_check_dunning_kruger, t_find_blindspots, t_find_negative_thinking, t_find_neglected_goals, t_red_team_thinking
|
||||
|
||||
**CREATIVITY**: create_mnemonic_phrases, write_essay
|
||||
|
||||
**DEVELOPMENT**: agility_story, analyze_prose_json, answer_interview_question, ask_secure_by_design_questions, ask_uncle_duke, coding_master, create_coding_feature, create_coding_project, create_command, create_design_document, create_git_diff_commit, create_mermaid_visualization, create_mermaid_visualization_for_github, create_pattern, create_sigma_rules, create_user_story, explain_code, explain_docs, export_data_as_csv, extract_algorithm_update_recommendations, extract_mcp_servers, extract_poc, generate_code_rules, get_youtube_rss, improve_prompt, official_pattern_template, recommend_pipeline_upgrades, refine_design_document, review_code, review_design, sanitize_broken_html_to_markdown, show_fabric_options_markmap, suggest_pattern, summarize_git_changes, summarize_git_diff, summarize_pull-requests, write_nuclei_template_rule, write_pull-request, write_semgrep_rule
|
||||
**DEVELOPMENT**: agility_story, analyze_logs, analyze_prose_json, answer_interview_question, ask_secure_by_design_questions, ask_uncle_duke, coding_master, create_coding_feature, create_coding_project, create_command, create_design_document, create_git_diff_commit, create_loe_document, create_mermaid_visualization, create_mermaid_visualization_for_github, create_pattern, create_prd, create_sigma_rules, create_user_story, explain_code, explain_docs, explain_project, export_data_as_csv, extract_algorithm_update_recommendations, extract_mcp_servers, extract_poc, extract_product_features, generate_code_rules, get_youtube_rss, identify_job_stories, improve_prompt, official_pattern_template, recommend_pipeline_upgrades, refine_design_document, review_code, review_design, sanitize_broken_html_to_markdown, suggest_pattern, summarize_git_changes, summarize_git_diff, summarize_pull-requests, write_nuclei_template_rule, write_pull-request, write_semgrep_rule
|
||||
|
||||
**DEVOPS**: analyze_terraform_plan
|
||||
|
||||
@@ -95,7 +95,7 @@ Match the request to one or more of these primary categories:
|
||||
|
||||
**GAMING**: create_npc, create_rpg_summary, summarize_rpg_session
|
||||
|
||||
**LEARNING**: analyze_answers, ask_uncle_duke, coding_master, create_diy, create_flash_cards, create_quiz, create_reading_plan, create_story_explanation, dialog_with_socrates, explain_code, explain_docs, explain_math, explain_project, explain_terms, extract_references, improve_academic_writing, provide_guidance, solve_with_cot, summarize_lecture, summarize_paper, to_flashcards, write_essay_pg
|
||||
**LEARNING**: analyze_answers, ask_uncle_duke, coding_master, create_diy, create_flash_cards, create_quiz, create_reading_plan, create_story_explanation, dialog_with_socrates, explain_code, explain_docs, explain_math, explain_project, explain_terms, extract_references, improve_academic_writing, provide_guidance, summarize_lecture, summarize_paper, to_flashcards, write_essay_pg
|
||||
|
||||
**OTHER**: extract_jokes
|
||||
|
||||
@@ -105,13 +105,13 @@ Match the request to one or more of these primary categories:
|
||||
|
||||
**SECURITY**: analyze_email_headers, analyze_incident, analyze_logs, analyze_malware, analyze_risk, analyze_terraform_plan, analyze_threat_report, analyze_threat_report_cmds, analyze_threat_report_trends, ask_secure_by_design_questions, create_command, create_cyber_summary, create_graph_from_input, create_investigation_visualization, create_network_threat_landscape, create_report_finding, create_security_update, create_sigma_rules, create_stride_threat_model, create_threat_scenarios, create_ttrc_graph, create_ttrc_narrative, extract_ctf_writeup, improve_report_finding, recommend_pipeline_upgrades, review_code, t_red_team_thinking, t_threat_model_plans, write_hackerone_report, write_nuclei_template_rule, write_semgrep_rule
|
||||
|
||||
**SELF**: create_better_frame, create_diy, create_reading_plan, dialog_with_socrates, extract_article_wisdom, extract_book_ideas, extract_book_recommendations, extract_insights, extract_insights_dm, extract_most_redeeming_thing, extract_recipe, extract_recommendations, extract_song_meaning, extract_wisdom, extract_wisdom_dm, extract_wisdom_short, find_female_life_partner, provide_guidance, t_check_dunning_kruger, t_create_h3_career, t_describe_life_outlook, t_find_neglected_goals, t_give_encouragement
|
||||
**SELF**: analyze_mistakes, analyze_personality, analyze_spiritual_text, create_better_frame, create_diy, create_reading_plan, create_story_about_person, dialog_with_socrates, extract_article_wisdom, extract_book_ideas, extract_book_recommendations, extract_insights, extract_insights_dm, extract_most_redeeming_thing, extract_recipe, extract_recommendations, extract_song_meaning, extract_wisdom, extract_wisdom_dm, extract_wisdom_short, find_female_life_partner, heal_person, provide_guidance, recommend_artists, t_check_dunning_kruger, t_create_h3_career, t_describe_life_outlook, t_find_neglected_goals, t_give_encouragement
|
||||
|
||||
**STRATEGY**: analyze_military_strategy, create_better_frame, prepare_7s_strategy, t_analyze_challenge_handling, t_find_blindspots, t_find_negative_thinking, t_find_neglected_goals, t_red_team_thinking, t_threat_model_plans, t_visualize_mission_goals_projects
|
||||
|
||||
**SUMMARIZE**: capture_thinkers_work, create_5_sentence_summary, create_micro_summary, create_newsletter_entry, create_show_intro, create_summary, extract_core_message, extract_latest_video, extract_main_idea, summarize, summarize_board_meeting, summarize_debate, summarize_git_changes, summarize_git_diff, summarize_lecture, summarize_legislation, summarize_meeting, summarize_micro, summarize_newsletter, summarize_paper, summarize_pull-requests, summarize_rpg_session, youtube_summary
|
||||
|
||||
**VISUALIZE**: create_excalidraw_visualization, create_graph_from_input, create_idea_compass, create_investigation_visualization, create_keynote, create_logo, create_markmap_visualization, create_mermaid_visualization, create_mermaid_visualization_for_github, create_video_chapters, create_visualization, enrich_blog_post, show_fabric_options_markmap, t_visualize_mission_goals_projects
|
||||
**VISUALIZE**: create_excalidraw_visualization, create_graph_from_input, create_idea_compass, create_investigation_visualization, create_keynote, create_logo, create_markmap_visualization, create_mermaid_visualization, create_mermaid_visualization_for_github, create_video_chapters, create_visualization, enrich_blog_post, t_visualize_mission_goals_projects
|
||||
|
||||
**WISDOM**: extract_alpha, extract_article_wisdom, extract_book_ideas, extract_insights, extract_most_redeeming_thing, extract_recommendations, extract_wisdom, extract_wisdom_dm, extract_wisdom_nometa, extract_wisdom_short
|
||||
|
||||
|
||||
@@ -78,10 +78,6 @@ Assess AI outputs against criteria, providing scores and feedback.
|
||||
|
||||
Process direct queries by interpreting intent.
|
||||
|
||||
### solve_with_cot
|
||||
|
||||
Solve problems using chain-of-thought reasoning.
|
||||
|
||||
### suggest_pattern
|
||||
|
||||
Recommend Fabric patterns based on user requirements.
|
||||
@@ -904,10 +900,6 @@ Create Mermaid diagrams to visualize workflows in documentation.
|
||||
|
||||
Transform concepts to ASCII art with explanations of relationships.
|
||||
|
||||
### show_fabric_options_markmap
|
||||
|
||||
Visualize Fabric capabilities using Markmap syntax.
|
||||
|
||||
### t_visualize_mission_goals_projects
|
||||
|
||||
Visualize missions and goals to clarify relationships.
|
||||
@@ -942,6 +934,10 @@ Identify neglected goals to surface opportunities.
|
||||
|
||||
## PERSONAL DEVELOPMENT PATTERNS
|
||||
|
||||
### create_story_about_person
|
||||
|
||||
Infer everyday challenges and realistic coping strategies from a psychological profile and craft an empathetic 500–700-word story consistent with the character.
|
||||
|
||||
### extract_recipe
|
||||
|
||||
Extract/format recipes into instructions with ingredients and steps.
|
||||
@@ -950,6 +946,10 @@ Extract/format recipes into instructions with ingredients and steps.
|
||||
|
||||
Clarify and summarize partner criteria in direct language.
|
||||
|
||||
### heal_person
|
||||
|
||||
Analyze a psychological profile, pinpoint issues and strengths, and deliver compassionate, structured strategies for spiritual, mental, and life improvement.
|
||||
|
||||
## CREATIVITY PATTERNS
|
||||
|
||||
### create_mnemonic_phrases
|
||||
|
||||
26
docs/CODE_OF_CONDUCT.md
Normal file
26
docs/CODE_OF_CONDUCT.md
Normal file
@@ -0,0 +1,26 @@
|
||||
# Code of Conduct
|
||||
|
||||
## Our Expectation
|
||||
|
||||
We expect all contributors and community members to act with basic human decency and common sense.
|
||||
|
||||
This project exists to help people augment their capabilities with AI, and we welcome contributions from anyone who shares this mission. We assume good faith and trust that everyone involved is here to build something valuable together.
|
||||
|
||||
## Guidelines
|
||||
|
||||
- **Be respectful**: Treat others as you'd want to be treated in a professional setting
|
||||
- **Be constructive**: Focus on the work and help make the project better
|
||||
- **Be collaborative**: We're all working toward the same goal - making Fabric more useful
|
||||
- **Use good judgment**: If you're not sure whether something is appropriate, it probably isn't
|
||||
|
||||
## Reporting Issues
|
||||
|
||||
If someone is being genuinely disruptive or harmful, please email the maintainers directly. We'll address legitimate concerns promptly and fairly.
|
||||
|
||||
## Enforcement
|
||||
|
||||
Maintainers reserve the right to remove content and restrict access for anyone who consistently acts in bad faith or disrupts the community.
|
||||
|
||||
---
|
||||
|
||||
*This project assumes contributors are adults who can work together professionally. If you can't do that, this isn't the right place for you.*
|
||||
154
docs/CONTRIBUTING.md
Normal file
154
docs/CONTRIBUTING.md
Normal file
@@ -0,0 +1,154 @@
|
||||
# Contributing to Fabric
|
||||
|
||||
Thanks for contributing to Fabric! Here's what you need to know to get started quickly.
|
||||
|
||||
## Quick Setup
|
||||
|
||||
### Prerequisites
|
||||
|
||||
- Go 1.24+ installed
|
||||
- Git configured with your details
|
||||
- GitHub CLI (`gh`)
|
||||
|
||||
### Getting Started
|
||||
|
||||
```bash
|
||||
# Clone your fork (upstream is set automatically)
|
||||
gh repo clone YOUR_GITHUB_USER/fabric
|
||||
cd fabric
|
||||
go build -o fabric ./cmd/fabric
|
||||
./fabric --setup
|
||||
|
||||
# Run tests
|
||||
go test ./...
|
||||
```
|
||||
|
||||
## Development Guidelines
|
||||
|
||||
### Code Style
|
||||
|
||||
- Follow standard Go conventions (`gofmt`, `golint`)
|
||||
- Use meaningful variable and function names
|
||||
- Write tests for new functionality
|
||||
- Keep functions focused and small
|
||||
|
||||
### Commit Messages
|
||||
|
||||
Use descriptive commit messages:
|
||||
|
||||
```text
|
||||
feat: add new pattern for code analysis
|
||||
fix: resolve OAuth token refresh issue
|
||||
docs: update installation instructions
|
||||
```
|
||||
|
||||
### Project Structure
|
||||
|
||||
- `cmd/` - Executable commands
|
||||
- `internal/` - Private application code
|
||||
- `data/patterns/` - AI patterns
|
||||
- `docs/` - Documentation
|
||||
|
||||
## Pull Request Process
|
||||
|
||||
### Changelog Generation (REQUIRED)
|
||||
|
||||
After opening your PR, generate a changelog entry:
|
||||
|
||||
```bash
|
||||
go run ./cmd/generate_changelog --ai-summarize --incoming-pr YOUR_PR_NUMBER
|
||||
```
|
||||
|
||||
**Requirements:**
|
||||
|
||||
- PR must be open and mergeable
|
||||
- Working directory must be clean
|
||||
- GitHub token available (GITHUB_TOKEN env var)
|
||||
|
||||
**Optional flags:**
|
||||
|
||||
- `--ai-summarize` - Enhanced AI-generated summaries
|
||||
- `--push` - Auto-push the changelog commit
|
||||
|
||||
### PR Guidelines
|
||||
|
||||
1. Fork the repository
|
||||
2. Create a feature branch
|
||||
3. Make your changes
|
||||
4. Write/update tests
|
||||
5. Generate changelog entry (see above)
|
||||
6. Submit PR with clear description
|
||||
|
||||
### Review Process
|
||||
|
||||
- PRs require maintainer review
|
||||
- Address feedback promptly
|
||||
- Keep PRs focused on single features/fixes
|
||||
- Update changelog if you make significant changes
|
||||
|
||||
## Testing
|
||||
|
||||
### Run Tests
|
||||
|
||||
```bash
|
||||
# All tests
|
||||
go test ./...
|
||||
|
||||
# Specific package
|
||||
go test ./internal/cli
|
||||
|
||||
# With coverage
|
||||
go test -cover ./...
|
||||
```
|
||||
|
||||
### Test Requirements
|
||||
|
||||
- Unit tests for core functionality
|
||||
- Integration tests for external dependencies
|
||||
- Examples in documentation
|
||||
|
||||
## Patterns
|
||||
|
||||
### Creating Patterns
|
||||
|
||||
Patterns go in `data/patterns/[pattern-name]/system.md`:
|
||||
|
||||
```markdown
|
||||
# IDENTITY and PURPOSE
|
||||
You are an expert at...
|
||||
|
||||
# STEPS
|
||||
- Step 1
|
||||
- Step 2
|
||||
|
||||
# OUTPUT
|
||||
- Output format requirements
|
||||
|
||||
# EXAMPLE
|
||||
Example output here
|
||||
```
|
||||
|
||||
### Pattern Guidelines
|
||||
|
||||
- Use clear, actionable language
|
||||
- Provide specific output formats
|
||||
- Include examples when helpful
|
||||
- Test with multiple AI providers
|
||||
|
||||
## Documentation
|
||||
|
||||
- Update README.md for new features
|
||||
- Add docs to `docs/` for complex features
|
||||
- Include usage examples
|
||||
- Keep documentation current
|
||||
|
||||
## Getting Help
|
||||
|
||||
- Check existing issues first
|
||||
- Ask questions in discussions
|
||||
- Tag maintainers for urgent issues
|
||||
- Be patient - maintainers are volunteers
|
||||
|
||||
## License
|
||||
|
||||
By contributing, you agree your contributions will be licensed under the MIT License.
|
||||
88
docs/README.md
Normal file
88
docs/README.md
Normal file
@@ -0,0 +1,88 @@
|
||||
# Fabric Documentation
|
||||
|
||||
Welcome to the Fabric documentation! This directory contains detailed guides and technical documentation for various features and components of Fabric.
|
||||
|
||||
## 📚 Available Documentation
|
||||
|
||||
### Core Features
|
||||
|
||||
**[Automated-Changelog-Usage.md](./Automated-Changelog-Usage.md)**
|
||||
Complete guide for developers on using the automated changelog system. Covers the workflow for generating PR changelog entries during development, including setup, validation, and CI/CD integration.
|
||||
|
||||
**[YouTube-Processing.md](./YouTube-Processing.md)**
|
||||
Comprehensive guide for processing YouTube videos and playlists with Fabric. Covers transcript extraction, comment processing, metadata retrieval, and advanced yt-dlp configurations.
|
||||
|
||||
**[Using-Speech-To-Text.md](./Using-Speech-To-Text.md)**
|
||||
Documentation for Fabric's speech-to-text capabilities using OpenAI's Whisper models. Learn how to transcribe audio and video files and process them through Fabric patterns.
|
||||
|
||||
### User Interface & Experience
|
||||
|
||||
**[Desktop-Notifications.md](./Desktop-Notifications.md)**
|
||||
Guide to setting up desktop notifications for Fabric commands. Useful for long-running tasks and multitasking scenarios with cross-platform notification support.
|
||||
|
||||
**[Shell-Completions.md](./Shell-Completions.md)**
|
||||
Instructions for setting up intelligent tab completion for Fabric in Zsh, Bash, and Fish shells. Includes automated installation and manual setup options.
|
||||
|
||||
**[Gemini-TTS.md](./Gemini-TTS.md)**
|
||||
Complete guide for using Google Gemini's text-to-speech features with Fabric. Covers voice selection, audio generation, and integration with Fabric patterns.
|
||||
|
||||
### Development & Architecture
|
||||
|
||||
**[Automated-ChangeLog.md](./Automated-ChangeLog.md)**
|
||||
Technical documentation outlining the automated CHANGELOG system architecture for CI/CD integration. Details the infrastructure and workflow for maintainers.
|
||||
|
||||
**[Project-Restructured.md](./Project-Restructured.md)**
|
||||
Project restructuring plan and architectural decisions. Documents the transition to standard Go conventions and project organization improvements.
|
||||
|
||||
**[NOTES.md](./NOTES.md)**
|
||||
Development notes on refactoring efforts, model management improvements, and architectural changes. Includes technical details on vendor and model abstraction.
|
||||
|
||||
### Audio Resources
|
||||
|
||||
**[voices/README.md](./voices/README.md)**
|
||||
Index of Gemini TTS voice samples demonstrating different AI voice characteristics available in Fabric.
|
||||
|
||||
## 🗂️ Additional Resources
|
||||
|
||||
### Configuration Files
|
||||
|
||||
- `./notification-config.yaml` - Example notification configuration
|
||||
|
||||
### Images
|
||||
|
||||
- `images/` - Screenshots and visual documentation assets
|
||||
- `fabric-logo-gif.gif` - Animated Fabric logo
|
||||
- `fabric-summarize.png` - Screenshot of summarization feature
|
||||
- `svelte-preview.png` - Web interface preview
|
||||
|
||||
## 🚀 Quick Start
|
||||
|
||||
New to Fabric? Start with these essential docs:
|
||||
|
||||
1. **[../README.md](../README.md)** - Main project README with installation and basic usage
|
||||
2. **[Shell-Completions.md](./Shell-Completions.md)** - Set up tab completion for better CLI experience
|
||||
3. **[YouTube-Processing.md](./YouTube-Processing.md)** - Learn one of Fabric's most popular features
|
||||
4. **[Desktop-Notifications.md](./Desktop-Notifications.md)** - Get notified when long tasks complete
|
||||
|
||||
## 🔧 For Contributors
|
||||
|
||||
Contributing to Fabric? These docs are essential:
|
||||
|
||||
1. **[./CONTRIBUTING.md](./CONTRIBUTING.md)** - Contribution guidelines and setup
|
||||
2. **[Automated-Changelog-Usage.md](./Automated-Changelog-Usage.md)** - Required workflow for PR submissions
|
||||
3. **[Project-Restructured.md](./Project-Restructured.md)** - Understanding project architecture
|
||||
4. **[NOTES.md](./NOTES.md)** - Current development priorities and patterns
|
||||
|
||||
## 📝 Documentation Standards
|
||||
|
||||
When adding new documentation:
|
||||
|
||||
- Use clear, descriptive filenames
|
||||
- Include practical examples and use cases
|
||||
- Update this README index with your new docs
|
||||
- Follow the established markdown formatting conventions
|
||||
- Test all code examples before publication
|
||||
|
||||
---
|
||||
|
||||
*For general help and support, see [./SUPPORT.md](./SUPPORT.md)*
|
||||
158
docs/SECURITY.md
Normal file
158
docs/SECURITY.md
Normal file
@@ -0,0 +1,158 @@
|
||||
# Security Policy
|
||||
|
||||
## Supported Versions
|
||||
|
||||
We aim to provide security updates for the latest version of Fabric.
|
||||
|
||||
We recommend always using the latest version of Fabric for security fixes and improvements.
|
||||
|
||||
## Reporting Security Vulnerabilities
|
||||
|
||||
**Please DO NOT report security vulnerabilities through public GitHub issues.**
|
||||
|
||||
### Preferred Reporting Method
|
||||
|
||||
Send security reports directly to: **<kayvan@sylvan.com>** and CC to the project maintainer at **<daniel@danielmiessler.com>**
|
||||
|
||||
### What to Include
|
||||
|
||||
Please provide the following information:
|
||||
|
||||
1. **Vulnerability Type**: What kind of security issue (e.g., injection, authentication bypass, etc.)
|
||||
2. **Affected Components**: Which parts of Fabric are affected
|
||||
3. **Impact Assessment**: What could an attacker accomplish
|
||||
4. **Reproduction Steps**: Clear steps to reproduce the vulnerability
|
||||
5. **Proposed Fix**: If you have suggestions for remediation
|
||||
6. **Disclosure Timeline**: Your preferred timeline for public disclosure
|
||||
|
||||
### Example Report Format
|
||||
|
||||
```text
|
||||
Subject: [SECURITY] Brief description of vulnerability
|
||||
|
||||
Vulnerability Type: SQL Injection
|
||||
Affected Component: Pattern database queries
|
||||
Impact: Potential data exposure
|
||||
Severity: High
|
||||
|
||||
Reproduction Steps:
|
||||
1. Navigate to...
|
||||
2. Submit payload: ...
|
||||
3. Observe...
|
||||
|
||||
Evidence:
|
||||
[Screenshots, logs, or proof of concept]
|
||||
|
||||
Suggested Fix:
|
||||
Use parameterized queries instead of string concatenation...
|
||||
```
|
||||
|
||||
## Security Considerations
|
||||
|
||||
### API Keys and Secrets
|
||||
|
||||
- Never commit API keys to the repository
|
||||
- Store secrets in environment variables or secure configuration
|
||||
- Use the built-in setup process for key management
|
||||
- Regularly rotate API keys
|
||||
|
||||
### Input Validation
|
||||
|
||||
- All user inputs are validated before processing
|
||||
- Special attention to pattern definitions and user content
|
||||
- URL validation for web scraping features
|
||||
|
||||
### AI Provider Integration
|
||||
|
||||
- Secure communication with AI providers (HTTPS/TLS)
|
||||
- Token handling follows provider best practices
|
||||
- No sensitive data logged or cached unencrypted
|
||||
|
||||
### Network Security
|
||||
|
||||
- Web server endpoints properly authenticated when required
|
||||
- CORS policies appropriately configured
|
||||
- Rate limiting implemented where necessary
|
||||
|
||||
## Vulnerability Response Process
|
||||
|
||||
1. **Report Received**: We'll acknowledge receipt within 24 hours
|
||||
2. **Initial Assessment**: We'll evaluate severity and impact within 72 hours
|
||||
3. **Investigation**: We'll investigate and develop fixes
|
||||
4. **Fix Development**: We'll create and test patches
|
||||
5. **Coordinated Disclosure**: We'll work with reporter on disclosure timeline
|
||||
6. **Release**: We'll release patched version with security advisory
|
||||
|
||||
### Timeline Expectations
|
||||
|
||||
- **Critical**: 1-7 days
|
||||
- **High**: 7-30 days
|
||||
- **Medium**: 30-90 days
|
||||
- **Low**: Next scheduled release
|
||||
|
||||
## Bug Bounty
|
||||
|
||||
We don't currently offer a formal bug bounty program, but we deeply appreciate security research and will:
|
||||
|
||||
- Acknowledge contributors in release notes
|
||||
- Provide credit in security advisories
|
||||
- Consider swag or small rewards for significant findings
|
||||
|
||||
## Security Best Practices for Users
|
||||
|
||||
### Installation
|
||||
|
||||
- Download Fabric only from official sources
|
||||
- Verify checksums when available
|
||||
- Keep installations up to date
|
||||
|
||||
### Configuration
|
||||
|
||||
- Use strong, unique API keys
|
||||
- Don't share configuration files containing secrets
|
||||
- Set appropriate file permissions on config directories
|
||||
|
||||
### Usage
|
||||
|
||||
- Be cautious with patterns that process sensitive data
|
||||
- Review AI provider terms for data handling
|
||||
- Consider using local models for sensitive content
|
||||
|
||||
## Known Security Limitations
|
||||
|
||||
### AI Provider Dependencies
|
||||
|
||||
Fabric relies on external AI providers. Security depends partly on:
|
||||
|
||||
- Provider security practices
|
||||
- Data transmission security
|
||||
- Provider data handling policies
|
||||
|
||||
### Pattern Execution
|
||||
|
||||
Custom patterns could potentially:
|
||||
|
||||
- Process sensitive inputs inappropriately
|
||||
- Generate outputs containing sensitive information
|
||||
- Be used for adversarial prompt injection
|
||||
|
||||
**Recommendation**: Review patterns carefully, especially those from untrusted sources.
|
||||
|
||||
## Security Updates
|
||||
|
||||
Security updates are distributed through:
|
||||
|
||||
- GitHub Releases with security tags
|
||||
- Security advisories on GitHub
|
||||
- Project documentation updates
|
||||
|
||||
Subscribe to the repository to receive notifications about security updates.
|
||||
|
||||
## Contact
|
||||
|
||||
For non-security issues, please use GitHub issues.
|
||||
For security concerns, email: **<kayvan@sylvan.com>** and CC to **<daniel@danielmiessler.com>**
|
||||
|
||||
---
|
||||
|
||||
*We take security seriously and appreciate the security research community's help in keeping Fabric secure.*
|
||||
148
docs/SUPPORT.md
Normal file
148
docs/SUPPORT.md
Normal file
@@ -0,0 +1,148 @@
|
||||
# Support
|
||||
|
||||
## Getting Help with Fabric
|
||||
|
||||
Need help with Fabric? Here are the best ways to get assistance:
|
||||
|
||||
## 📖 Documentation First
|
||||
|
||||
Before reaching out, check these resources:
|
||||
|
||||
- **[README.md](../README.md)** - Installation, usage, and examples
|
||||
- **[docs/](./README.md)** - Detailed documentation
|
||||
- **[Patterns](../data/patterns/)** - Browse available AI patterns
|
||||
|
||||
## 🐛 Bug Reports
|
||||
|
||||
Found a bug? Please create an issue:
|
||||
|
||||
**[Report a Bug](https://github.com/danielmiessler/fabric/issues/new?template=bug.yml)**
|
||||
|
||||
Include:
|
||||
|
||||
- Fabric version (`fabric --version`)
|
||||
- Operating system
|
||||
- Steps to reproduce
|
||||
- Expected vs actual behavior
|
||||
- Error messages/logs
|
||||
|
||||
## 💡 Feature Requests
|
||||
|
||||
Have an idea for Fabric? We'd love to hear it:
|
||||
|
||||
**[Request a Feature](https://github.com/danielmiessler/fabric/issues/new)**
|
||||
|
||||
Describe:
|
||||
|
||||
- What you want to achieve
|
||||
- Why it would be useful
|
||||
- How you envision it working
|
||||
- Any alternatives you've considered
|
||||
|
||||
## 🤔 Questions & Discussions
|
||||
|
||||
For general questions, usage help, or community discussion:
|
||||
|
||||
**[GitHub Discussions](https://github.com/danielmiessler/fabric/discussions)**
|
||||
|
||||
Great for:
|
||||
|
||||
- "How do I...?" questions
|
||||
- Sharing patterns you've created
|
||||
- Getting community advice
|
||||
- Feature brainstorming
|
||||
|
||||
## 🏷️ Issue Labels
|
||||
|
||||
When creating issues, maintainers will add appropriate labels:
|
||||
|
||||
- `bug` - Something isn't working
|
||||
- `enhancement` - New feature request
|
||||
- `documentation` - Documentation improvements
|
||||
- `help wanted` - Community contributions welcome
|
||||
- `good first issue` - Great for new contributors
|
||||
- `question` - General questions
|
||||
- `pattern` - Related to AI patterns
|
||||
|
||||
## 📋 Issue Templates
|
||||
|
||||
We provide templates to help you create detailed reports:
|
||||
|
||||
- **Bug Report** - Structured bug reporting
|
||||
- **Feature Request** - Detailed feature proposals
|
||||
- **Pattern Submission** - New pattern contributions
|
||||
|
||||
## 🔒 Security Issues
|
||||
|
||||
**DO NOT create public issues for security vulnerabilities.**
|
||||
|
||||
See our [Security Policy](./SECURITY.md) for proper reporting procedures.
|
||||
|
||||
## ⚡ Response Times
|
||||
|
||||
We're a community-driven project with volunteer maintainers:
|
||||
|
||||
- **Bugs**: We aim to acknowledge within 48 hours
|
||||
- **Features**: Response time varies based on complexity
|
||||
- **Questions**: Community often responds quickly
|
||||
- **Security**: See security policy for timelines
|
||||
|
||||
## 🛠️ Self-Help Tips
|
||||
|
||||
Before creating an issue, try:
|
||||
|
||||
1. **Update Fabric**: `go install github.com/danielmiessler/fabric/cmd/fabric@latest`
|
||||
2. **Check existing issues**: Someone might have the same problem
|
||||
3. **Run setup**: `fabric --setup` can fix configuration issues
|
||||
4. **Test minimal example**: Isolate the problem
|
||||
|
||||
## 🤝 Community Guidelines
|
||||
|
||||
When asking for help:
|
||||
|
||||
- Be specific and provide context
|
||||
- Include relevant details and error messages
|
||||
- Be patient - maintainers are volunteers
|
||||
- Help others when you can
|
||||
- Say thanks when someone helps you
|
||||
|
||||
## 📞 Emergency Contact
|
||||
|
||||
For urgent security issues only:
|
||||
|
||||
- Email: <security@fabric.ai> (if available)
|
||||
- Maintainer: <daniel@danielmiessler.com>
|
||||
|
||||
## 🎯 What We Can Help With
|
||||
|
||||
✅ **We can help with:**
|
||||
|
||||
- Installation and setup issues
|
||||
- Usage questions and examples
|
||||
- Bug reports and fixes
|
||||
- Feature discussions
|
||||
- Pattern creation guidance
|
||||
- Integration questions
|
||||
|
||||
❌ **We cannot help with:**
|
||||
|
||||
- Custom development for your specific use case
|
||||
- Troubleshooting your specific AI provider issues
|
||||
- General AI or programming tutorials
|
||||
- Commercial support agreements
|
||||
|
||||
## 💪 Contributing Back
|
||||
|
||||
The best way to get help is to help others:
|
||||
|
||||
- Answer questions in discussions
|
||||
- Improve documentation
|
||||
- Share useful patterns
|
||||
- Report bugs clearly
|
||||
- Review pull requests
|
||||
|
||||
See our [Contributing Guide](./CONTRIBUTING.md) for details.
|
||||
|
||||
---
|
||||
|
||||
*Remember: We're all here to make Fabric better. Be kind, be helpful, and let's build something amazing together!*
|
||||
107
docs/contexts-and-sessions-tutorial.md
Normal file
107
docs/contexts-and-sessions-tutorial.md
Normal file
@@ -0,0 +1,107 @@
|
||||
# Contexts and Sessions in Fabric
|
||||
|
||||
Fabric uses **contexts** and **sessions** to manage conversation state and reusable prompt data. This guide focuses on how to use them from the CLI and REST API.
|
||||
|
||||
## What is a Context?
|
||||
|
||||
A context is named text that Fabric injects at the beginning of a conversation. Contexts live on disk under `~/.config/fabric/contexts`; each file name is the context name, and its contents are included as a system message.
|
||||
|
||||
Command-line helpers:
|
||||
|
||||
- `--context <name>` select a context
|
||||
- `--listcontexts` list available contexts
|
||||
- `--printcontext <name>` show the contents
|
||||
- `--wipecontext <name>` delete it
|
||||
|
||||
## What is a Session?
|
||||
|
||||
A session tracks the message history of a conversation. When you specify a session name, Fabric loads any existing messages, appends new ones, and saves back to disk. Sessions are stored as JSON under `~/.config/fabric/sessions`.
|
||||
|
||||
Command-line helpers:
|
||||
|
||||
- `--session <name>` attach to a session
|
||||
- `--listsessions` list stored sessions
|
||||
- `--printsession <name>` print a session
|
||||
- `--wipesession <name>` delete it
|
||||
|
||||
## Everyday Use Cases
|
||||
|
||||
Contexts and sessions serve different everyday needs:
|
||||
|
||||
- **Context** – Reuse prompt text such as preferred style, domain knowledge, or instructions for the assistant.
|
||||
- **Session** – Maintain ongoing conversation history so Fabric remembers earlier exchanges.
|
||||
|
||||
Example workflow:
|
||||
|
||||
1. Create a context file manually in `~/.config/fabric/contexts/writer` with your writing guidelines.
|
||||
2. Start a session while chatting to build on previous answers (`fabric --session mychat`). Sessions are automatically created if they don't exist.
|
||||
|
||||
## How Contexts and Sessions Interact
|
||||
|
||||
When Fabric handles a chat request, it loads any named context, combines it with pattern text, and adds the result as a system message before sending the conversation history to the model. The assistant's reply is appended to the session so future calls continue from the same state.
|
||||
|
||||
## REST API Endpoints
|
||||
|
||||
The REST server exposes CRUD endpoints for managing contexts and sessions:
|
||||
|
||||
- `/contexts/:name` – get or save a context
|
||||
- `/contexts/names` – list available contexts
|
||||
- `/sessions/:name` – get or save a session
|
||||
- `/sessions/names` – list available sessions
|
||||
|
||||
## Summary
|
||||
|
||||
Contexts provide reusable system-level instructions, while sessions maintain conversation history. Together they allow Fabric to build rich, stateful interactions with language models.
|
||||
|
||||
## For Developers
|
||||
|
||||
### Loading Contexts from Disk
|
||||
|
||||
```go
|
||||
// internal/plugins/db/fsdb/contexts.go
|
||||
func (o *ContextsEntity) Get(name string) (*Context, error) {
|
||||
content, err := o.Load(name)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &Context{Name: name, Content: string(content)}, nil
|
||||
}
|
||||
```
|
||||
|
||||
### Handling Sessions
|
||||
|
||||
```go
|
||||
// internal/plugins/db/fsdb/sessions.go
|
||||
type Session struct {
|
||||
Name string
|
||||
Messages []*chat.ChatCompletionMessage
|
||||
}
|
||||
|
||||
func (o *SessionsEntity) Get(name string) (*Session, error) {
|
||||
session := &Session{Name: name}
|
||||
if o.Exists(name) {
|
||||
err = o.LoadAsJson(name, &session.Messages)
|
||||
} else {
|
||||
fmt.Printf("Creating new session: %s\n", name)
|
||||
}
|
||||
return session, err
|
||||
}
|
||||
```
|
||||
|
||||
### Building a Session
|
||||
|
||||
```go
|
||||
// internal/core/chatter.go
|
||||
if request.ContextName != "" {
|
||||
ctx, err := o.db.Contexts.Get(request.ContextName)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("could not find context %s: %v", request.ContextName, err)
|
||||
}
|
||||
contextContent = ctx.Content
|
||||
}
|
||||
|
||||
systemMessage := strings.TrimSpace(contextContent) + strings.TrimSpace(patternContent)
|
||||
if systemMessage != "" {
|
||||
session.Append(&chat.ChatCompletionMessage{Role: chat.ChatMessageRoleSystem, Content: systemMessage})
|
||||
}
|
||||
```
|
||||
182
docs/i18n.md
Normal file
182
docs/i18n.md
Normal file
@@ -0,0 +1,182 @@
|
||||
# Internationalization (i18n) in Fabric
|
||||
|
||||
Fabric supports multiple languages through its internationalization system. The system automatically detects your preferred language from environment variables and provides localized messages.
|
||||
|
||||
## How Locale Detection Works
|
||||
|
||||
Fabric follows POSIX standards for locale detection with the following priority order:
|
||||
|
||||
1. **Explicit language flag**: `--language` or `-g` (highest priority)
|
||||
2. **LC_ALL**: Complete locale override environment variable
|
||||
3. **LC_MESSAGES**: Messages-specific locale environment variable
|
||||
4. **LANG**: General locale environment variable
|
||||
5. **Default fallback**: English (`en`) if none are set or valid
|
||||
|
||||
### Examples
|
||||
|
||||
```bash
|
||||
# Use explicit language flag
|
||||
fabric --language es --pattern summarize
|
||||
|
||||
# Use LC_ALL environment variable
|
||||
LC_ALL=fr_FR.UTF-8 fabric --pattern summarize
|
||||
|
||||
# Use LANG environment variable
|
||||
LANG=de_DE.UTF-8 fabric --pattern summarize
|
||||
|
||||
# Multiple environment variables (LC_ALL takes priority)
|
||||
LC_ALL=es_ES.UTF-8 LANG=fr_FR.UTF-8 fabric --pattern summarize
|
||||
# Uses Spanish (es_ES) because LC_ALL has higher priority
|
||||
```
|
||||
|
||||
## Supported Locale Formats
|
||||
|
||||
The system automatically normalizes various locale formats:
|
||||
|
||||
- `en_US.UTF-8` → `en-US`
|
||||
- `fr_FR@euro` → `fr-FR`
|
||||
- `zh_CN.GB2312` → `zh-CN`
|
||||
- `de_DE.UTF-8@traditional` → `de-DE`
|
||||
|
||||
Special cases:
|
||||
|
||||
- `C` or `POSIX` → treated as invalid, falls back to English
|
||||
|
||||
## Translation File Locations
|
||||
|
||||
Translations are loaded from multiple sources in this order:
|
||||
|
||||
1. **Embedded files** (highest priority): Compiled into the binary
|
||||
- Location: `internal/i18n/locales/*.json`
|
||||
- Always available, no download required
|
||||
|
||||
2. **User config directory**: Downloaded on demand
|
||||
- Location: `~/.config/fabric/locales/`
|
||||
- Downloaded from GitHub when needed
|
||||
|
||||
3. **GitHub repository**: Source for downloads
|
||||
- URL: `https://raw.githubusercontent.com/danielmiessler/Fabric/main/internal/i18n/locales/`
|
||||
|
||||
## Currently Supported Languages
|
||||
|
||||
- **English** (`en`): Default language, always available
|
||||
- **Spanish** (`es`): Available in embedded files
|
||||
|
||||
## Adding New Languages
|
||||
|
||||
To add support for a new language:
|
||||
|
||||
1. Create a new JSON file: `internal/i18n/locales/{lang}.json`
|
||||
2. Add translations in the format:
|
||||
|
||||
```json
|
||||
{
|
||||
"message_id": "localized message text"
|
||||
}
|
||||
```
|
||||
|
||||
3. Rebuild Fabric to embed the new translations
|
||||
|
||||
### Translation File Format
|
||||
|
||||
Translation files use JSON format with message IDs as keys:
|
||||
|
||||
```json
|
||||
{
|
||||
"html_readability_error": "use original input, because can't apply html readability"
|
||||
}
|
||||
```
|
||||
|
||||
Spanish example:
|
||||
|
||||
```json
|
||||
{
|
||||
"html_readability_error": "usa la entrada original, porque no se puede aplicar la legibilidad de html"
|
||||
}
|
||||
```
|
||||
|
||||
## Error Handling
|
||||
|
||||
The i18n system is designed to be robust:
|
||||
|
||||
- **Download failures**: Non-fatal, falls back to embedded translations
|
||||
- **Invalid locales**: Skipped, next priority locale used
|
||||
- **Missing translations**: Falls back to English
|
||||
- **Missing files**: Uses embedded defaults
|
||||
|
||||
Error messages are logged to stderr but don't prevent operation.
|
||||
|
||||
## Environment Variable Examples
|
||||
|
||||
### Common Unix Locale Settings
|
||||
|
||||
```bash
|
||||
# Set system-wide locale
|
||||
export LANG=en_US.UTF-8
|
||||
|
||||
# Override all locale categories
|
||||
export LC_ALL=fr_FR.UTF-8
|
||||
|
||||
# Set only message locale (for this session)
|
||||
LC_MESSAGES=es_ES.UTF-8 fabric --pattern summarize
|
||||
|
||||
# Check current locale settings
|
||||
locale
|
||||
```
|
||||
|
||||
### Testing Locale Detection
|
||||
|
||||
You can test locale detection without changing your system settings:
|
||||
|
||||
```bash
|
||||
# Test with French
|
||||
LC_ALL=fr_FR.UTF-8 fabric --version
|
||||
|
||||
# Test with Spanish (if available)
|
||||
LC_ALL=es_ES.UTF-8 fabric --version
|
||||
|
||||
# Test with German (will download if available)
|
||||
LC_ALL=de_DE.UTF-8 fabric --version
|
||||
```
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
### "i18n download failed" messages
|
||||
|
||||
This is normal when requesting a language not yet available. The system will fall back to English.
|
||||
|
||||
### Locale not detected
|
||||
|
||||
Check your environment variables:
|
||||
|
||||
```bash
|
||||
echo $LC_ALL
|
||||
echo $LC_MESSAGES
|
||||
echo $LANG
|
||||
```
|
||||
|
||||
Ensure they're in a valid format like `en_US.UTF-8` or `fr_FR`.
|
||||
|
||||
### Wrong language used
|
||||
|
||||
Remember the priority order:
|
||||
|
||||
1. `--language` flag overrides everything
|
||||
2. `LC_ALL` overrides `LC_MESSAGES` and `LANG`
|
||||
3. `LC_MESSAGES` overrides `LANG`
|
||||
|
||||
## Implementation Details
|
||||
|
||||
The locale detection system:
|
||||
|
||||
- Uses `golang.org/x/text/language` for parsing and validation
|
||||
- Follows BCP 47 language tag standards
|
||||
- Implements POSIX locale environment variable precedence
|
||||
- Provides comprehensive test coverage
|
||||
- Handles edge cases gracefully
|
||||
|
||||
For developers working on the codebase, see the implementation in:
|
||||
|
||||
- `internal/i18n/locale.go`: Locale detection logic
|
||||
- `internal/i18n/i18n.go`: Main i18n initialization
|
||||
- `internal/i18n/locale_test.go`: Test suite
|
||||
3
go.mod
3
go.mod
@@ -21,7 +21,8 @@ require (
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||
github.com/mattn/go-sqlite3 v1.14.28
|
||||
github.com/ollama/ollama v0.9.0
|
||||
github.com/nicksnyder/go-i18n/v2 v2.6.0
|
||||
github.com/ollama/ollama v0.11.7
|
||||
github.com/openai/openai-go v1.8.2
|
||||
github.com/otiai10/copy v1.14.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
|
||||
8
go.sum
8
go.sum
@@ -8,6 +8,8 @@ cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeO
|
||||
cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
|
||||
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
|
||||
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
|
||||
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
|
||||
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
|
||||
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
|
||||
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
|
||||
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
|
||||
@@ -180,8 +182,10 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/ollama/ollama v0.9.0 h1:GvdGhi8G/QMnFrY0TMLDy1bXua+Ify8KTkFe4ZY/OZs=
|
||||
github.com/ollama/ollama v0.9.0/go.mod h1:aio9yQ7nc4uwIbn6S0LkGEPgn8/9bNQLL1nHuH+OcD0=
|
||||
github.com/nicksnyder/go-i18n/v2 v2.6.0 h1:C/m2NNWNiTB6SK4Ao8df5EWm3JETSTIGNXBpMJTxzxQ=
|
||||
github.com/nicksnyder/go-i18n/v2 v2.6.0/go.mod h1:88sRqr0C6OPyJn0/KRNaEz1uWorjxIKP7rUUcvycecE=
|
||||
github.com/ollama/ollama v0.11.7 h1:CuYjaJ/YEnvLDpJocJbbVdpdVFyGA/OP6lKFyzZD4dI=
|
||||
github.com/ollama/ollama v0.11.7/go.mod h1:9+1//yWPsDE2u+l1a5mpaKrYw4VdnSsRU3ioq5BvMms=
|
||||
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
||||
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
|
||||
github.com/openai/openai-go v1.8.2 h1:UqSkJ1vCOPUpz9Ka5tS0324EJFEuOvMc+lA/EarJWP8=
|
||||
|
||||
@@ -9,6 +9,8 @@ import (
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/db/fsdb"
|
||||
"github.com/danielmiessler/fabric/internal/tools/notifications"
|
||||
)
|
||||
@@ -57,12 +59,12 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
isTTSModel := isTTSModel(currentFlags.Model)
|
||||
|
||||
if isTTSModel && !isAudioOutput {
|
||||
err = fmt.Errorf("TTS model '%s' requires audio output. Please specify an audio output file with -o flag (e.g., -o output.wav)", currentFlags.Model)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("tts_model_requires_audio_output"), currentFlags.Model))
|
||||
return
|
||||
}
|
||||
|
||||
if isAudioOutput && !isTTSModel {
|
||||
err = fmt.Errorf("audio output file '%s' specified but model '%s' is not a TTS model. Please use a TTS model like gemini-2.5-flash-preview-tts", currentFlags.Output, currentFlags.Model)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("audio_output_file_specified_but_not_tts_model"), currentFlags.Output, currentFlags.Model))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -74,7 +76,7 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
outputFile += ".wav"
|
||||
}
|
||||
if _, err = os.Stat(outputFile); err == nil {
|
||||
err = fmt.Errorf("file %s already exists. Please choose a different filename or remove the existing file", outputFile)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("file_already_exists_choose_different"), outputFile))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -94,7 +96,7 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
if !currentFlags.Stream || currentFlags.SuppressThink {
|
||||
// For TTS models with audio output, show a user-friendly message instead of raw data
|
||||
if isTTSModel && isAudioOutput && strings.HasPrefix(result, "FABRIC_AUDIO_DATA:") {
|
||||
fmt.Printf("TTS audio generated successfully and saved to: %s\n", currentFlags.Output)
|
||||
fmt.Printf(i18n.T("tts_audio_generated_successfully"), currentFlags.Output)
|
||||
} else {
|
||||
// print the result if it was not streamed already or suppress-think disabled streaming output
|
||||
fmt.Println(result)
|
||||
@@ -135,7 +137,7 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
if chatOptions.Notification {
|
||||
if err = sendNotification(chatOptions, chatReq.PatternName, result); err != nil {
|
||||
// Log notification error but don't fail the main command
|
||||
fmt.Fprintf(os.Stderr, "Failed to send notification: %v\n", err)
|
||||
debuglog.Log("Failed to send notification: %v\n", err)
|
||||
}
|
||||
}
|
||||
|
||||
@@ -148,20 +150,20 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
// not grapheme clusters. As a result, complex emoji or accented characters with multiple combining
|
||||
// characters may be truncated improperly. This is a limitation of the current implementation.
|
||||
func sendNotification(options *domain.ChatOptions, patternName, result string) error {
|
||||
title := "Fabric Command Complete"
|
||||
title := i18n.T("fabric_command_complete")
|
||||
if patternName != "" {
|
||||
title = fmt.Sprintf("Fabric: %s Complete", patternName)
|
||||
title = fmt.Sprintf(i18n.T("fabric_command_complete_with_pattern"), patternName)
|
||||
}
|
||||
|
||||
// Limit message length for notification display (counts Unicode code points)
|
||||
message := "Command completed successfully"
|
||||
message := i18n.T("command_completed_successfully")
|
||||
if result != "" {
|
||||
maxLength := 100
|
||||
runes := []rune(result)
|
||||
if len(runes) > maxLength {
|
||||
message = fmt.Sprintf("Output: %s...", string(runes[:maxLength]))
|
||||
message = fmt.Sprintf(i18n.T("output_truncated"), string(runes[:maxLength]))
|
||||
} else {
|
||||
message = fmt.Sprintf("Output: %s", result)
|
||||
message = fmt.Sprintf(i18n.T("output_full"), result)
|
||||
}
|
||||
// Clean up newlines for notification display
|
||||
message = strings.ReplaceAll(message, "\n", " ")
|
||||
@@ -183,7 +185,7 @@ func sendNotification(options *domain.ChatOptions, patternName, result string) e
|
||||
// Use built-in notification system
|
||||
notificationManager := notifications.NewNotificationManager()
|
||||
if !notificationManager.IsAvailable() {
|
||||
return fmt.Errorf("no notification system available")
|
||||
return fmt.Errorf("%s", i18n.T("no_notification_system_available"))
|
||||
}
|
||||
|
||||
return notificationManager.Send(title, message)
|
||||
|
||||
@@ -3,10 +3,11 @@ package cli
|
||||
import (
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/openai"
|
||||
"github.com/danielmiessler/fabric/internal/tools/converter"
|
||||
"github.com/danielmiessler/fabric/internal/tools/youtube"
|
||||
@@ -19,6 +20,11 @@ func Cli(version string) (err error) {
|
||||
return
|
||||
}
|
||||
|
||||
// initialize internationalization using requested language
|
||||
if _, err = i18n.Init(currentFlags.Language); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
if currentFlags.Setup {
|
||||
if err = ensureEnvFile(); err != nil {
|
||||
return
|
||||
@@ -34,7 +40,7 @@ func Cli(version string) (err error) {
|
||||
var registry, err2 = initializeFabric()
|
||||
if err2 != nil {
|
||||
if !currentFlags.Setup {
|
||||
fmt.Fprintln(os.Stderr, err2.Error())
|
||||
debuglog.Log("%s\n", err2.Error())
|
||||
currentFlags.Setup = true
|
||||
}
|
||||
// Return early if registry is nil to prevent panics in subsequent handlers
|
||||
@@ -86,7 +92,7 @@ func Cli(version string) (err error) {
|
||||
// Process HTML readability if needed
|
||||
if currentFlags.HtmlReadability {
|
||||
if msg, cleanErr := converter.HtmlReadability(currentFlags.Message); cleanErr != nil {
|
||||
fmt.Println("use original input, because can't apply html readability", cleanErr)
|
||||
fmt.Println(i18n.T("html_readability_error"), cleanErr)
|
||||
} else {
|
||||
currentFlags.Message = msg
|
||||
}
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/chat"
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/util"
|
||||
"github.com/jessevdk/go-flags"
|
||||
@@ -146,9 +147,15 @@ func Init() (ret *Flags, err error) {
|
||||
|
||||
// Parse CLI flags first
|
||||
ret = &Flags{}
|
||||
parser := flags.NewParser(ret, flags.Default)
|
||||
parser := flags.NewParser(ret, flags.HelpFlag|flags.PassDoubleDash)
|
||||
|
||||
var args []string
|
||||
if args, err = parser.Parse(); err != nil {
|
||||
// Check if this is a help request and handle it with our custom help
|
||||
if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp {
|
||||
CustomHelpHandler(parser, os.Stdout)
|
||||
os.Exit(0)
|
||||
}
|
||||
return
|
||||
}
|
||||
debuglog.SetLevel(debuglog.LevelFromInt(ret.Debug))
|
||||
@@ -275,30 +282,30 @@ func assignWithConversion(targetField, sourceField reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("cannot convert string %q to %v", str, targetField.Kind())
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("cannot_convert_string"), str, targetField.Kind()))
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported conversion from %v to %v", sourceField.Kind(), targetField.Kind())
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("unsupported_conversion"), sourceField.Kind(), targetField.Kind()))
|
||||
}
|
||||
|
||||
func loadYAMLConfig(configPath string) (*Flags, error) {
|
||||
absPath, err := util.GetAbsolutePath(configPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid config path: %w", err)
|
||||
return nil, fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_config_path"), err))
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(absPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("config file not found: %s", absPath)
|
||||
return nil, fmt.Errorf("%s", fmt.Sprintf(i18n.T("config_file_not_found"), absPath))
|
||||
}
|
||||
return nil, fmt.Errorf("error reading config file: %w", err)
|
||||
return nil, fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_reading_config_file"), err))
|
||||
}
|
||||
|
||||
// Use the existing Flags struct for YAML unmarshal
|
||||
config := &Flags{}
|
||||
if err := yaml.Unmarshal(data, config); err != nil {
|
||||
return nil, fmt.Errorf("error parsing config file: %w", err)
|
||||
return nil, fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_parsing_config_file"), err))
|
||||
}
|
||||
|
||||
debuglog.Debug(debuglog.Detailed, "Config: %v\n", config)
|
||||
@@ -316,7 +323,7 @@ func readStdin() (ret string, err error) {
|
||||
sb.WriteString(line)
|
||||
break
|
||||
}
|
||||
err = fmt.Errorf("error reading piped message from stdin: %w", readErr)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_reading_piped_message"), readErr))
|
||||
return
|
||||
} else {
|
||||
sb.WriteString(line)
|
||||
@@ -334,7 +341,7 @@ func validateImageFile(imagePath string) error {
|
||||
|
||||
// Check if file already exists
|
||||
if _, err := os.Stat(imagePath); err == nil {
|
||||
return fmt.Errorf("image file already exists: %s", imagePath)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("image_file_already_exists"), imagePath))
|
||||
}
|
||||
|
||||
// Check file extension
|
||||
@@ -347,7 +354,7 @@ func validateImageFile(imagePath string) error {
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid image file extension '%s'. Supported formats: .png, .jpeg, .jpg, .webp", ext)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_image_file_extension"), ext))
|
||||
}
|
||||
|
||||
// validateImageParameters validates image generation parameters
|
||||
@@ -355,7 +362,7 @@ func validateImageParameters(imagePath, size, quality, background string, compre
|
||||
if imagePath == "" {
|
||||
// Check if any image parameters are specified without --image-file
|
||||
if size != "" || quality != "" || background != "" || compression != 0 {
|
||||
return fmt.Errorf("image parameters (--image-size, --image-quality, --image-background, --image-compression) can only be used with --image-file")
|
||||
return fmt.Errorf("%s", i18n.T("image_parameters_require_image_file"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -371,7 +378,7 @@ func validateImageParameters(imagePath, size, quality, background string, compre
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
return fmt.Errorf("invalid image size '%s'. Supported sizes: 1024x1024, 1536x1024, 1024x1536, auto", size)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_image_size"), size))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -386,7 +393,7 @@ func validateImageParameters(imagePath, size, quality, background string, compre
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
return fmt.Errorf("invalid image quality '%s'. Supported qualities: low, medium, high, auto", quality)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_image_quality"), quality))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -401,7 +408,7 @@ func validateImageParameters(imagePath, size, quality, background string, compre
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
return fmt.Errorf("invalid image background '%s'. Supported backgrounds: opaque, transparent", background)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_image_background"), background))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -411,17 +418,17 @@ func validateImageParameters(imagePath, size, quality, background string, compre
|
||||
// Validate compression (only for jpeg/webp)
|
||||
if compression != 0 { // 0 means not set
|
||||
if ext != ".jpg" && ext != ".jpeg" && ext != ".webp" {
|
||||
return fmt.Errorf("image compression can only be used with JPEG and WebP formats, not %s", ext)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("image_compression_jpeg_webp_only"), ext))
|
||||
}
|
||||
if compression < 0 || compression > 100 {
|
||||
return fmt.Errorf("image compression must be between 0 and 100, got %d", compression)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("image_compression_range_error"), compression))
|
||||
}
|
||||
}
|
||||
|
||||
// Validate background transparency (only for png/webp)
|
||||
if background == "transparent" {
|
||||
if ext != ".png" && ext != ".webp" {
|
||||
return fmt.Errorf("transparent background can only be used with PNG and WebP formats, not %s", ext)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("transparent_background_png_webp_only"), ext))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
286
internal/cli/help.go
Normal file
286
internal/cli/help.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
"github.com/jessevdk/go-flags"
|
||||
)
|
||||
|
||||
// flagDescriptionMap maps flag names to their i18n keys
|
||||
var flagDescriptionMap = map[string]string{
|
||||
"pattern": "choose_pattern_from_available",
|
||||
"variable": "pattern_variables_help",
|
||||
"context": "choose_context_from_available",
|
||||
"session": "choose_session_from_available",
|
||||
"attachment": "attachment_path_or_url_help",
|
||||
"setup": "run_setup_for_reconfigurable_parts",
|
||||
"temperature": "set_temperature",
|
||||
"topp": "set_top_p",
|
||||
"stream": "stream_help",
|
||||
"presencepenalty": "set_presence_penalty",
|
||||
"raw": "use_model_defaults_raw_help",
|
||||
"frequencypenalty": "set_frequency_penalty",
|
||||
"listpatterns": "list_all_patterns",
|
||||
"listmodels": "list_all_available_models",
|
||||
"listcontexts": "list_all_contexts",
|
||||
"listsessions": "list_all_sessions",
|
||||
"updatepatterns": "update_patterns",
|
||||
"copy": "copy_to_clipboard",
|
||||
"model": "choose_model",
|
||||
"vendor": "specify_vendor_for_model",
|
||||
"modelContextLength": "model_context_length_ollama",
|
||||
"output": "output_to_file",
|
||||
"output-session": "output_entire_session",
|
||||
"latest": "number_of_latest_patterns",
|
||||
"changeDefaultModel": "change_default_model",
|
||||
"youtube": "youtube_url_help",
|
||||
"playlist": "prefer_playlist_over_video",
|
||||
"transcript": "grab_transcript_from_youtube",
|
||||
"transcript-with-timestamps": "grab_transcript_with_timestamps",
|
||||
"comments": "grab_comments_from_youtube",
|
||||
"metadata": "output_video_metadata",
|
||||
"yt-dlp-args": "additional_yt_dlp_args",
|
||||
"language": "specify_language_code",
|
||||
"scrape_url": "scrape_website_url",
|
||||
"scrape_question": "search_question_jina",
|
||||
"seed": "seed_for_lmm_generation",
|
||||
"wipecontext": "wipe_context",
|
||||
"wipesession": "wipe_session",
|
||||
"printcontext": "print_context",
|
||||
"printsession": "print_session",
|
||||
"readability": "convert_html_readability",
|
||||
"input-has-vars": "apply_variables_to_input",
|
||||
"no-variable-replacement": "disable_pattern_variable_replacement",
|
||||
"dry-run": "show_dry_run",
|
||||
"serve": "serve_fabric_rest_api",
|
||||
"serveOllama": "serve_fabric_api_ollama_endpoints",
|
||||
"address": "address_to_bind_rest_api",
|
||||
"api-key": "api_key_secure_server_routes",
|
||||
"config": "path_to_yaml_config",
|
||||
"version": "print_current_version",
|
||||
"listextensions": "list_all_registered_extensions",
|
||||
"addextension": "register_new_extension",
|
||||
"rmextension": "remove_registered_extension",
|
||||
"strategy": "choose_strategy_from_available",
|
||||
"liststrategies": "list_all_strategies",
|
||||
"listvendors": "list_all_vendors",
|
||||
"shell-complete-list": "output_raw_list_shell_completion",
|
||||
"search": "enable_web_search_tool",
|
||||
"search-location": "set_location_web_search",
|
||||
"image-file": "save_generated_image_to_file",
|
||||
"image-size": "image_dimensions_help",
|
||||
"image-quality": "image_quality_help",
|
||||
"image-compression": "compression_level_jpeg_webp",
|
||||
"image-background": "background_type_help",
|
||||
"suppress-think": "suppress_thinking_tags",
|
||||
"think-start-tag": "start_tag_thinking_sections",
|
||||
"think-end-tag": "end_tag_thinking_sections",
|
||||
"disable-responses-api": "disable_openai_responses_api",
|
||||
"transcribe-file": "audio_video_file_transcribe",
|
||||
"transcribe-model": "model_for_transcription",
|
||||
"split-media-file": "split_media_files_ffmpeg",
|
||||
"voice": "tts_voice_name",
|
||||
"list-gemini-voices": "list_gemini_tts_voices",
|
||||
"list-transcription-models": "list_transcription_models",
|
||||
"notification": "send_desktop_notification",
|
||||
"notification-command": "custom_notification_command",
|
||||
"thinking": "set_reasoning_thinking_level",
|
||||
"debug": "set_debug_level",
|
||||
}
|
||||
|
||||
// TranslatedHelpWriter provides custom help output with translated descriptions
|
||||
type TranslatedHelpWriter struct {
|
||||
parser *flags.Parser
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
// NewTranslatedHelpWriter creates a new help writer with translations
|
||||
func NewTranslatedHelpWriter(parser *flags.Parser, writer io.Writer) *TranslatedHelpWriter {
|
||||
return &TranslatedHelpWriter{
|
||||
parser: parser,
|
||||
writer: writer,
|
||||
}
|
||||
}
|
||||
|
||||
// WriteHelp writes the help output with translated flag descriptions
|
||||
func (h *TranslatedHelpWriter) WriteHelp() {
|
||||
fmt.Fprintf(h.writer, "%s\n", i18n.T("usage_header"))
|
||||
fmt.Fprintf(h.writer, " %s %s\n\n", h.parser.Name, i18n.T("options_placeholder"))
|
||||
|
||||
fmt.Fprintf(h.writer, "%s\n", i18n.T("application_options_header"))
|
||||
h.writeAllFlags()
|
||||
|
||||
fmt.Fprintf(h.writer, "\n%s\n", i18n.T("help_options_header"))
|
||||
fmt.Fprintf(h.writer, " -h, --help %s\n", i18n.T("help_message"))
|
||||
}
|
||||
|
||||
// getTranslatedDescription gets the translated description for a flag
|
||||
func (h *TranslatedHelpWriter) getTranslatedDescription(flagName string) string {
|
||||
if i18nKey, exists := flagDescriptionMap[flagName]; exists {
|
||||
return i18n.T(i18nKey)
|
||||
}
|
||||
|
||||
// Fallback 1: Try to get original description from struct tag
|
||||
if desc := h.getOriginalDescription(flagName); desc != "" {
|
||||
return desc
|
||||
}
|
||||
|
||||
// Fallback 2: Provide a user-friendly default message
|
||||
return i18n.T("no_description_available")
|
||||
}
|
||||
|
||||
// getOriginalDescription retrieves the original description from struct tags
|
||||
func (h *TranslatedHelpWriter) getOriginalDescription(flagName string) string {
|
||||
flags := &Flags{}
|
||||
flagsType := reflect.TypeOf(flags).Elem()
|
||||
|
||||
for i := 0; i < flagsType.NumField(); i++ {
|
||||
field := flagsType.Field(i)
|
||||
longTag := field.Tag.Get("long")
|
||||
|
||||
if longTag == flagName {
|
||||
if description := field.Tag.Get("description"); description != "" {
|
||||
return description
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// CustomHelpHandler handles help output with translations
|
||||
func CustomHelpHandler(parser *flags.Parser, writer io.Writer) {
|
||||
// Initialize i18n system with detected language if not already initialized
|
||||
ensureI18nInitialized()
|
||||
|
||||
helpWriter := NewTranslatedHelpWriter(parser, writer)
|
||||
helpWriter.WriteHelp()
|
||||
}
|
||||
|
||||
// ensureI18nInitialized initializes the i18n system if not already done
|
||||
func ensureI18nInitialized() {
|
||||
// Try to detect language from command line args or environment
|
||||
lang := detectLanguageFromArgs()
|
||||
if lang == "" {
|
||||
// Try to detect from environment variables
|
||||
lang = detectLanguageFromEnv()
|
||||
}
|
||||
|
||||
// Initialize i18n with detected language (or empty for system default)
|
||||
i18n.Init(lang)
|
||||
}
|
||||
|
||||
// detectLanguageFromArgs looks for --language/-g flag in os.Args
|
||||
func detectLanguageFromArgs() string {
|
||||
args := os.Args[1:]
|
||||
for i, arg := range args {
|
||||
if arg == "--language" || arg == "-g" {
|
||||
if i+1 < len(args) {
|
||||
return args[i+1]
|
||||
}
|
||||
} else if strings.HasPrefix(arg, "--language=") {
|
||||
return strings.TrimPrefix(arg, "--language=")
|
||||
} else if strings.HasPrefix(arg, "-g=") {
|
||||
return strings.TrimPrefix(arg, "-g=")
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// detectLanguageFromEnv detects language from environment variables
|
||||
func detectLanguageFromEnv() string {
|
||||
// Check standard locale environment variables
|
||||
envVars := []string{"LC_ALL", "LC_MESSAGES", "LANG"}
|
||||
for _, envVar := range envVars {
|
||||
if value := os.Getenv(envVar); value != "" {
|
||||
// Extract language code from locale (e.g., "es_ES.UTF-8" -> "es")
|
||||
if strings.Contains(value, "_") {
|
||||
return strings.Split(value, "_")[0]
|
||||
}
|
||||
if value != "C" && value != "POSIX" {
|
||||
return value
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// writeAllFlags writes all flags with translated descriptions
|
||||
func (h *TranslatedHelpWriter) writeAllFlags() {
|
||||
// Use direct reflection on the Flags struct to get all flag definitions
|
||||
flags := &Flags{}
|
||||
flagsType := reflect.TypeOf(flags).Elem()
|
||||
|
||||
for i := 0; i < flagsType.NumField(); i++ {
|
||||
field := flagsType.Field(i)
|
||||
|
||||
shortTag := field.Tag.Get("short")
|
||||
longTag := field.Tag.Get("long")
|
||||
defaultTag := field.Tag.Get("default")
|
||||
|
||||
if longTag == "" {
|
||||
continue // Skip fields without long tags
|
||||
}
|
||||
|
||||
// Get translated description
|
||||
description := h.getTranslatedDescription(longTag)
|
||||
|
||||
// Format the flag line
|
||||
var flagLine strings.Builder
|
||||
flagLine.WriteString(" ")
|
||||
|
||||
if shortTag != "" {
|
||||
flagLine.WriteString(fmt.Sprintf("-%s, ", shortTag))
|
||||
}
|
||||
|
||||
flagLine.WriteString(fmt.Sprintf("--%s", longTag))
|
||||
|
||||
// Add parameter indicator for non-boolean flags
|
||||
isBoolFlag := field.Type.Kind() == reflect.Bool ||
|
||||
strings.HasSuffix(longTag, "patterns") ||
|
||||
strings.HasSuffix(longTag, "models") ||
|
||||
strings.HasSuffix(longTag, "contexts") ||
|
||||
strings.HasSuffix(longTag, "sessions") ||
|
||||
strings.HasSuffix(longTag, "extensions") ||
|
||||
strings.HasSuffix(longTag, "strategies") ||
|
||||
strings.HasSuffix(longTag, "vendors") ||
|
||||
strings.HasSuffix(longTag, "voices") ||
|
||||
longTag == "setup" || longTag == "stream" || longTag == "raw" ||
|
||||
longTag == "copy" || longTag == "updatepatterns" ||
|
||||
longTag == "output-session" || longTag == "changeDefaultModel" ||
|
||||
longTag == "playlist" || longTag == "transcript" ||
|
||||
longTag == "transcript-with-timestamps" || longTag == "comments" ||
|
||||
longTag == "metadata" || longTag == "readability" ||
|
||||
longTag == "input-has-vars" || longTag == "no-variable-replacement" ||
|
||||
longTag == "dry-run" || longTag == "serve" || longTag == "serveOllama" ||
|
||||
longTag == "version" || longTag == "shell-complete-list" ||
|
||||
longTag == "search" || longTag == "suppress-think" ||
|
||||
longTag == "disable-responses-api" || longTag == "split-media-file" ||
|
||||
longTag == "notification"
|
||||
|
||||
if !isBoolFlag {
|
||||
flagLine.WriteString("=")
|
||||
}
|
||||
|
||||
// Pad to align descriptions
|
||||
flagStr := flagLine.String()
|
||||
padding := 34 - len(flagStr)
|
||||
if padding < 2 {
|
||||
padding = 2
|
||||
}
|
||||
|
||||
fmt.Fprintf(h.writer, "%s%s%s", flagStr, strings.Repeat(" ", padding), description)
|
||||
|
||||
// Add default value if present
|
||||
if defaultTag != "" && defaultTag != "0" && defaultTag != "false" {
|
||||
fmt.Fprintf(h.writer, " (default: %s)", defaultTag)
|
||||
}
|
||||
|
||||
fmt.Fprintf(h.writer, "\n")
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/db/fsdb"
|
||||
)
|
||||
|
||||
@@ -36,20 +37,20 @@ func initializeFabric() (registry *core.PluginRegistry, err error) {
|
||||
func ensureEnvFile() (err error) {
|
||||
var homedir string
|
||||
if homedir, err = os.UserHomeDir(); err != nil {
|
||||
return fmt.Errorf("could not determine user home directory: %w", err)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("could_not_determine_home_dir"), err))
|
||||
}
|
||||
configDir := filepath.Join(homedir, ".config", "fabric")
|
||||
envPath := filepath.Join(configDir, ".env")
|
||||
|
||||
if _, statErr := os.Stat(envPath); statErr != nil {
|
||||
if !os.IsNotExist(statErr) {
|
||||
return fmt.Errorf("could not stat .env file: %w", statErr)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("could_not_stat_env_file"), statErr))
|
||||
}
|
||||
if err = os.MkdirAll(configDir, ConfigDirPerms); err != nil {
|
||||
return fmt.Errorf("could not create config directory: %w", err)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("could_not_create_config_dir"), err))
|
||||
}
|
||||
if err = os.WriteFile(envPath, []byte{}, EnvFilePerms); err != nil {
|
||||
return fmt.Errorf("could not create .env file: %w", err)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("could_not_create_env_file"), err))
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
openai "github.com/openai/openai-go"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/gemini"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/db/fsdb"
|
||||
@@ -93,7 +94,7 @@ func listTranscriptionModels(shellComplete bool) {
|
||||
fmt.Println(model)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Available transcription models:")
|
||||
fmt.Println(i18n.T("available_transcription_models"))
|
||||
for _, model := range models {
|
||||
fmt.Printf(" %s\n", model)
|
||||
}
|
||||
|
||||
@@ -7,30 +7,32 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/atotto/clipboard"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
)
|
||||
|
||||
func CopyToClipboard(message string) (err error) {
|
||||
if err = clipboard.WriteAll(message); err != nil {
|
||||
err = fmt.Errorf("could not copy to clipboard: %v", err)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("could_not_copy_to_clipboard"), err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CreateOutputFile(message string, fileName string) (err error) {
|
||||
if _, err = os.Stat(fileName); err == nil {
|
||||
err = fmt.Errorf("file %s already exists, not overwriting. Rename the existing file or choose a different name", fileName)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("file_already_exists_not_overwriting"), fileName))
|
||||
return
|
||||
}
|
||||
var file *os.File
|
||||
if file, err = os.Create(fileName); err != nil {
|
||||
err = fmt.Errorf("error creating file: %v", err)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_creating_file"), err))
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
if _, err = file.WriteString(message); err != nil {
|
||||
err = fmt.Errorf("error writing to file: %v", err)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_writing_to_file"), err))
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "\n\n[Output also written to %s]\n", fileName)
|
||||
debuglog.Log("\n\n[Output also written to %s]\n", fileName)
|
||||
}
|
||||
return
|
||||
}
|
||||
@@ -45,13 +47,13 @@ func CreateAudioOutputFile(audioData []byte, fileName string) (err error) {
|
||||
// File existence check is now done in the CLI layer before TTS generation
|
||||
var file *os.File
|
||||
if file, err = os.Create(fileName); err != nil {
|
||||
err = fmt.Errorf("error creating audio file: %v", err)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_creating_audio_file"), err))
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if _, err = file.Write(audioData); err != nil {
|
||||
err = fmt.Errorf("error writing audio data to file: %v", err)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_writing_audio_data"), err))
|
||||
}
|
||||
// No redundant output message here - the CLI layer handles success messaging
|
||||
return
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
"github.com/danielmiessler/fabric/internal/tools/youtube"
|
||||
)
|
||||
|
||||
@@ -11,7 +12,7 @@ import (
|
||||
func handleToolProcessing(currentFlags *Flags, registry *core.PluginRegistry) (messageTools string, err error) {
|
||||
if currentFlags.YouTube != "" {
|
||||
if !registry.YouTube.IsConfigured() {
|
||||
err = fmt.Errorf("YouTube is not configured, please run the setup procedure")
|
||||
err = fmt.Errorf("%s", i18n.T("youtube_not_configured"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -25,7 +26,7 @@ func handleToolProcessing(currentFlags *Flags, registry *core.PluginRegistry) (m
|
||||
} else {
|
||||
var videos []*youtube.VideoMeta
|
||||
if videos, err = registry.YouTube.FetchPlaylistVideos(playlistId); err != nil {
|
||||
err = fmt.Errorf("error fetching playlist videos: %w", err)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_fetching_playlist_videos"), err))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -58,7 +59,7 @@ func handleToolProcessing(currentFlags *Flags, registry *core.PluginRegistry) (m
|
||||
|
||||
if currentFlags.ScrapeURL != "" || currentFlags.ScrapeQuestion != "" {
|
||||
if !registry.Jina.IsConfigured() {
|
||||
err = fmt.Errorf("scraping functionality is not configured. Please set up Jina to enable scraping")
|
||||
err = fmt.Errorf("%s", i18n.T("scraping_not_configured"))
|
||||
return
|
||||
}
|
||||
// Check if the scrape_url flag is set and call ScrapeURL
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
)
|
||||
|
||||
type transcriber interface {
|
||||
@@ -18,15 +19,15 @@ func handleTranscription(flags *Flags, registry *core.PluginRegistry) (message s
|
||||
}
|
||||
vendor, ok := registry.VendorManager.VendorsByName[vendorName]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("vendor %s not configured", vendorName)
|
||||
return "", fmt.Errorf("%s", fmt.Sprintf(i18n.T("vendor_not_configured"), vendorName))
|
||||
}
|
||||
tr, ok := vendor.(transcriber)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("vendor %s does not support audio transcription", vendorName)
|
||||
return "", fmt.Errorf("%s", fmt.Sprintf(i18n.T("vendor_no_transcription_support"), vendorName))
|
||||
}
|
||||
model := flags.TranscribeModel
|
||||
if model == "" {
|
||||
return "", fmt.Errorf("transcription model is required (use --transcribe-model)")
|
||||
return "", fmt.Errorf("%s", i18n.T("transcription_model_required"))
|
||||
}
|
||||
if message, err = tr.TranscribeFile(context.Background(), flags.TranscribeFile, model, flags.SplitMediaFile); err != nil {
|
||||
return
|
||||
|
||||
@@ -10,6 +10,8 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/anthropic"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/azure"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/bedrock"
|
||||
@@ -20,7 +22,7 @@ import (
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/ollama"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/openai"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/openai_compatible"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/perplexity" // Added Perplexity plugin
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/perplexity"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/strategy"
|
||||
|
||||
"github.com/samber/lo"
|
||||
@@ -130,7 +132,7 @@ func (o *PluginRegistry) ListVendors(out io.Writer) error {
|
||||
vendors := lo.Map(o.VendorsAll.Vendors, func(vendor ai.Vendor, _ int) string {
|
||||
return vendor.GetName()
|
||||
})
|
||||
fmt.Fprint(out, "Available Vendors:\n\n")
|
||||
fmt.Fprintf(out, "%s\n\n", i18n.T("available_vendors_header"))
|
||||
for _, vendor := range vendors {
|
||||
fmt.Fprintf(out, "%s\n", vendor)
|
||||
}
|
||||
@@ -339,7 +341,7 @@ func (o *PluginRegistry) GetChatter(model string, modelContextLength int, vendor
|
||||
} else {
|
||||
availableVendors := models.FindGroupsByItem(model)
|
||||
if len(availableVendors) > 1 {
|
||||
fmt.Fprintf(os.Stderr, "Warning: multiple vendors provide model %s: %s. Using %s. Specify --vendor to select a vendor.\n", model, strings.Join(availableVendors, ", "), availableVendors[0])
|
||||
debuglog.Log("Warning: multiple vendors provide model %s: %s. Using %s. Specify --vendor to select a vendor.\n", model, strings.Join(availableVendors, ", "), availableVendors[0])
|
||||
}
|
||||
ret.vendor = vendorManager.FindByName(models.FindGroupsByItemFirst(model))
|
||||
}
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/chat"
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/plugins"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/db/fsdb"
|
||||
@@ -72,7 +73,12 @@ func TestGetChatter_WarnsOnAmbiguousModel(t *testing.T) {
|
||||
r, w, _ := os.Pipe()
|
||||
oldStderr := os.Stderr
|
||||
os.Stderr = w
|
||||
defer func() { os.Stderr = oldStderr }()
|
||||
// Redirect log output to our pipe to capture unconditional log messages
|
||||
debuglog.SetOutput(w)
|
||||
defer func() {
|
||||
os.Stderr = oldStderr
|
||||
debuglog.SetOutput(oldStderr)
|
||||
}()
|
||||
|
||||
chatter, err := registry.GetChatter("shared-model", 0, "", "", false, false)
|
||||
w.Close()
|
||||
|
||||
160
internal/i18n/i18n.go
Normal file
160
internal/i18n/i18n.go
Normal file
@@ -0,0 +1,160 @@
|
||||
package i18n
|
||||
|
||||
import (
|
||||
"embed"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"sync"
|
||||
|
||||
"github.com/nicksnyder/go-i18n/v2/i18n"
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
// embedded default locales
|
||||
//
|
||||
//go:embed locales/*.json
|
||||
var localeFS embed.FS
|
||||
|
||||
var (
|
||||
translator *i18n.Localizer
|
||||
initOnce sync.Once
|
||||
)
|
||||
|
||||
// Init initializes the i18n bundle and localizer. It loads the specified locale
|
||||
// and falls back to English if loading fails.
|
||||
// Translation files are searched in the user config directory and downloaded
|
||||
// from GitHub if missing.
|
||||
//
|
||||
// If locale is empty, it will attempt to detect the system locale from
|
||||
// environment variables (LC_ALL, LC_MESSAGES, LANG) following POSIX standards.
|
||||
func Init(locale string) (*i18n.Localizer, error) {
|
||||
// Use preferred locale detection if no explicit locale provided
|
||||
locale = getPreferredLocale(locale)
|
||||
if locale == "" {
|
||||
locale = "en"
|
||||
}
|
||||
|
||||
bundle := i18n.NewBundle(language.English)
|
||||
bundle.RegisterUnmarshalFunc("json", json.Unmarshal)
|
||||
|
||||
// load embedded translations for the requested locale if available
|
||||
embedded := false
|
||||
if data, err := localeFS.ReadFile("locales/" + locale + ".json"); err == nil {
|
||||
_, _ = bundle.ParseMessageFileBytes(data, locale+".json")
|
||||
embedded = true
|
||||
} else if strings.Contains(locale, "-") {
|
||||
// Try base language if regional variant not found (e.g., es-ES -> es)
|
||||
baseLang := strings.Split(locale, "-")[0]
|
||||
if data, err := localeFS.ReadFile("locales/" + baseLang + ".json"); err == nil {
|
||||
_, _ = bundle.ParseMessageFileBytes(data, baseLang+".json")
|
||||
embedded = true
|
||||
}
|
||||
}
|
||||
if !embedded {
|
||||
if data, err := localeFS.ReadFile("locales/en.json"); err == nil {
|
||||
_, _ = bundle.ParseMessageFileBytes(data, "en.json")
|
||||
}
|
||||
}
|
||||
|
||||
// load locale from disk or download when not embedded
|
||||
path := filepath.Join(userLocaleDir(), locale+".json")
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) && !embedded {
|
||||
if err := downloadLocale(path, locale); err != nil {
|
||||
// if download fails, still continue with embedded translations
|
||||
fmt.Fprintf(os.Stderr, "%s\n", fmt.Sprintf(getErrorMessage("i18n_download_failed", "Failed to download translation for language '%s': %v"), locale, err))
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
if _, err := bundle.LoadMessageFile(path); err != nil {
|
||||
fmt.Fprintf(os.Stderr, "%s\n", fmt.Sprintf(getErrorMessage("i18n_load_failed", "Failed to load translation file: %v"), err))
|
||||
}
|
||||
}
|
||||
|
||||
translator = i18n.NewLocalizer(bundle, locale)
|
||||
return translator, nil
|
||||
}
|
||||
|
||||
// T returns the localized string for the given message id.
|
||||
// If the translator is not initialized, it will automatically initialize
|
||||
// with system locale detection.
|
||||
func T(messageID string) string {
|
||||
initOnce.Do(func() {
|
||||
if translator == nil {
|
||||
Init("") // Empty string triggers system locale detection
|
||||
}
|
||||
})
|
||||
return translator.MustLocalize(&i18n.LocalizeConfig{MessageID: messageID})
|
||||
}
|
||||
|
||||
func userLocaleDir() string {
|
||||
dir, err := os.UserConfigDir()
|
||||
if err != nil {
|
||||
dir = "."
|
||||
}
|
||||
path := filepath.Join(dir, "fabric", "locales")
|
||||
os.MkdirAll(path, 0o755)
|
||||
return path
|
||||
}
|
||||
|
||||
func downloadLocale(path, locale string) error {
|
||||
url := fmt.Sprintf("https://raw.githubusercontent.com/danielmiessler/Fabric/main/internal/i18n/locales/%s.json", locale)
|
||||
resp, err := http.Get(url)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer resp.Body.Close()
|
||||
if resp.StatusCode != http.StatusOK {
|
||||
return fmt.Errorf("unexpected status: %s", resp.Status)
|
||||
}
|
||||
f, err := os.Create(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
defer f.Close()
|
||||
_, err = io.Copy(f, resp.Body)
|
||||
return err
|
||||
}
|
||||
|
||||
// getErrorMessage tries to get a translated error message, falling back to system locale
|
||||
// and then to the provided fallback message. This is used during initialization when
|
||||
// the translator may not be fully ready.
|
||||
func getErrorMessage(messageID, fallback string) string {
|
||||
// Try to get system locale for error messages
|
||||
systemLocale := getPreferredLocale("")
|
||||
if systemLocale == "" {
|
||||
systemLocale = "en"
|
||||
}
|
||||
|
||||
// First try the system locale
|
||||
if msg := tryGetMessage(systemLocale, messageID); msg != "" {
|
||||
return msg
|
||||
}
|
||||
|
||||
// Fall back to English
|
||||
if systemLocale != "en" {
|
||||
if msg := tryGetMessage("en", messageID); msg != "" {
|
||||
return msg
|
||||
}
|
||||
}
|
||||
|
||||
// Final fallback to hardcoded message
|
||||
return fallback
|
||||
}
|
||||
|
||||
// tryGetMessage attempts to get a message from embedded locale files
|
||||
func tryGetMessage(locale, messageID string) string {
|
||||
if data, err := localeFS.ReadFile("locales/" + locale + ".json"); err == nil {
|
||||
var messages map[string]string
|
||||
if json.Unmarshal(data, &messages) == nil {
|
||||
if msg, exists := messages[messageID]; exists {
|
||||
return msg
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
22
internal/i18n/i18n_test.go
Normal file
22
internal/i18n/i18n_test.go
Normal file
@@ -0,0 +1,22 @@
|
||||
package i18n
|
||||
|
||||
import (
|
||||
"testing"
|
||||
|
||||
gi18n "github.com/nicksnyder/go-i18n/v2/i18n"
|
||||
)
|
||||
|
||||
func TestTranslation(t *testing.T) {
|
||||
loc, err := Init("es")
|
||||
if err != nil {
|
||||
t.Fatalf("init failed: %v", err)
|
||||
}
|
||||
msg, err := loc.Localize(&gi18n.LocalizeConfig{MessageID: "html_readability_error"})
|
||||
if err != nil {
|
||||
t.Fatalf("localize failed: %v", err)
|
||||
}
|
||||
expected := "usa la entrada original, porque no se puede aplicar la legibilidad de html"
|
||||
if msg != expected {
|
||||
t.Fatalf("unexpected translation: %s", msg)
|
||||
}
|
||||
}
|
||||
82
internal/i18n/locale.go
Normal file
82
internal/i18n/locale.go
Normal file
@@ -0,0 +1,82 @@
|
||||
package i18n
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"golang.org/x/text/language"
|
||||
)
|
||||
|
||||
// detectSystemLocale detects the system locale using standard Unix environment variables.
|
||||
// Follows the POSIX priority order for locale environment variables:
|
||||
// 1. LC_ALL (highest priority - overrides all others)
|
||||
// 2. LC_MESSAGES (for messages specifically)
|
||||
// 3. LANG (general locale setting)
|
||||
// 4. Returns empty string if none are set or valid
|
||||
//
|
||||
// This implementation follows POSIX standards and Unix best practices for locale detection.
|
||||
func detectSystemLocale() string {
|
||||
// Check environment variables in priority order
|
||||
envVars := []string{"LC_ALL", "LC_MESSAGES", "LANG"}
|
||||
|
||||
for _, envVar := range envVars {
|
||||
if value := os.Getenv(envVar); value != "" {
|
||||
locale := normalizeLocale(value)
|
||||
if locale != "" && isValidLocale(locale) {
|
||||
return locale
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
return ""
|
||||
}
|
||||
|
||||
// normalizeLocale converts various locale formats to BCP 47 language tags.
|
||||
// Examples:
|
||||
// - "en_US.UTF-8" -> "en-US"
|
||||
// - "fr_FR@euro" -> "fr-FR"
|
||||
// - "zh_CN.GB2312" -> "zh-CN"
|
||||
// - "C" or "POSIX" -> "" (invalid, falls back to default)
|
||||
func normalizeLocale(locale string) string {
|
||||
// Handle special cases
|
||||
if locale == "C" || locale == "POSIX" || locale == "" {
|
||||
return ""
|
||||
}
|
||||
|
||||
// Remove encoding and modifiers
|
||||
// Examples: en_US.UTF-8@euro -> en_US
|
||||
locale = strings.Split(locale, ".")[0] // Remove encoding (.UTF-8)
|
||||
locale = strings.Split(locale, "@")[0] // Remove modifiers (@euro)
|
||||
|
||||
// Convert underscore to hyphen for BCP 47 compliance
|
||||
// en_US -> en-US
|
||||
locale = strings.ReplaceAll(locale, "_", "-")
|
||||
|
||||
return locale
|
||||
}
|
||||
|
||||
// isValidLocale checks if a locale string can be parsed as a valid language tag.
|
||||
func isValidLocale(locale string) bool {
|
||||
if locale == "" {
|
||||
return false
|
||||
}
|
||||
|
||||
// Use golang.org/x/text/language to validate
|
||||
_, err := language.Parse(locale)
|
||||
return err == nil
|
||||
}
|
||||
|
||||
// getPreferredLocale returns the best locale to use based on user preferences.
|
||||
// Priority order:
|
||||
// 1. Explicit language flag (if provided)
|
||||
// 2. System environment variables (LC_ALL, LC_MESSAGES, LANG)
|
||||
// 3. Default fallback (empty string, which triggers "en" in Init)
|
||||
func getPreferredLocale(explicitLang string) string {
|
||||
// If explicitly set via flag, use that
|
||||
if explicitLang != "" {
|
||||
return explicitLang
|
||||
}
|
||||
|
||||
// Otherwise try to detect from system environment
|
||||
return detectSystemLocale()
|
||||
}
|
||||
288
internal/i18n/locale_test.go
Normal file
288
internal/i18n/locale_test.go
Normal file
@@ -0,0 +1,288 @@
|
||||
package i18n
|
||||
|
||||
import (
|
||||
"os"
|
||||
"testing"
|
||||
)
|
||||
|
||||
func TestDetectSystemLocale(t *testing.T) {
|
||||
// Save original environment
|
||||
originalLC_ALL := os.Getenv("LC_ALL")
|
||||
originalLC_MESSAGES := os.Getenv("LC_MESSAGES")
|
||||
originalLANG := os.Getenv("LANG")
|
||||
|
||||
// Clean up after test
|
||||
defer func() {
|
||||
os.Setenv("LC_ALL", originalLC_ALL)
|
||||
os.Setenv("LC_MESSAGES", originalLC_MESSAGES)
|
||||
os.Setenv("LANG", originalLANG)
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
LC_ALL string
|
||||
LC_MESSAGES string
|
||||
LANG string
|
||||
expected string
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "LC_ALL takes highest priority",
|
||||
LC_ALL: "fr_FR.UTF-8",
|
||||
LC_MESSAGES: "de_DE.UTF-8",
|
||||
LANG: "es_ES.UTF-8",
|
||||
expected: "fr-FR",
|
||||
description: "LC_ALL should override all other variables",
|
||||
},
|
||||
{
|
||||
name: "LC_MESSAGES used when LC_ALL empty",
|
||||
LC_ALL: "",
|
||||
LC_MESSAGES: "ja_JP.UTF-8",
|
||||
LANG: "ko_KR.UTF-8",
|
||||
expected: "ja-JP",
|
||||
description: "LC_MESSAGES should be used when LC_ALL is not set",
|
||||
},
|
||||
{
|
||||
name: "LANG used when LC_ALL and LC_MESSAGES empty",
|
||||
LC_ALL: "",
|
||||
LC_MESSAGES: "",
|
||||
LANG: "zh_CN.GB2312",
|
||||
expected: "zh-CN",
|
||||
description: "LANG should be fallback when others are not set",
|
||||
},
|
||||
{
|
||||
name: "Empty when no valid locale set",
|
||||
LC_ALL: "",
|
||||
LC_MESSAGES: "",
|
||||
LANG: "",
|
||||
expected: "",
|
||||
description: "Should return empty when no environment variables set",
|
||||
},
|
||||
{
|
||||
name: "Handle C locale",
|
||||
LC_ALL: "C",
|
||||
LC_MESSAGES: "",
|
||||
LANG: "",
|
||||
expected: "",
|
||||
description: "C locale should be treated as invalid (fallback to default)",
|
||||
},
|
||||
{
|
||||
name: "Handle POSIX locale",
|
||||
LC_ALL: "",
|
||||
LC_MESSAGES: "POSIX",
|
||||
LANG: "",
|
||||
expected: "",
|
||||
description: "POSIX locale should be treated as invalid (fallback to default)",
|
||||
},
|
||||
{
|
||||
name: "Handle locale with modifiers",
|
||||
LC_ALL: "",
|
||||
LC_MESSAGES: "",
|
||||
LANG: "de_DE.UTF-8@euro",
|
||||
expected: "de-DE",
|
||||
description: "Should strip encoding and modifiers",
|
||||
},
|
||||
{
|
||||
name: "Skip invalid locale and use next priority",
|
||||
LC_ALL: "invalid_locale",
|
||||
LC_MESSAGES: "fr_CA.UTF-8",
|
||||
LANG: "en_US.UTF-8",
|
||||
expected: "fr-CA",
|
||||
description: "Should skip invalid high-priority locale and use next valid one",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Set test environment
|
||||
os.Setenv("LC_ALL", tt.LC_ALL)
|
||||
os.Setenv("LC_MESSAGES", tt.LC_MESSAGES)
|
||||
os.Setenv("LANG", tt.LANG)
|
||||
|
||||
result := detectSystemLocale()
|
||||
if result != tt.expected {
|
||||
t.Errorf("%s: expected %q, got %q", tt.description, tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestNormalizeLocale(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected string
|
||||
}{
|
||||
// Standard Unix locale formats
|
||||
{"en_US.UTF-8", "en-US"},
|
||||
{"fr_FR.ISO8859-1", "fr-FR"},
|
||||
{"de_DE@euro", "de-DE"},
|
||||
{"zh_CN.GB2312", "zh-CN"},
|
||||
{"ja_JP.eucJP@traditional", "ja-JP"},
|
||||
|
||||
// Already normalized
|
||||
{"en-US", "en-US"},
|
||||
{"fr-CA", "fr-CA"},
|
||||
|
||||
// Language only
|
||||
{"en", "en"},
|
||||
{"fr", "fr"},
|
||||
{"zh", "zh"},
|
||||
|
||||
// Special cases
|
||||
{"C", ""},
|
||||
{"POSIX", ""},
|
||||
{"", ""},
|
||||
|
||||
// Complex cases
|
||||
{"pt_BR.UTF-8@currency=BRL", "pt-BR"},
|
||||
{"sr_RS.UTF-8@latin", "sr-RS"},
|
||||
{"uz_UZ.UTF-8@cyrillic", "uz-UZ"},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.input, func(t *testing.T) {
|
||||
result := normalizeLocale(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("normalizeLocale(%q): expected %q, got %q", tt.input, tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIsValidLocale(t *testing.T) {
|
||||
tests := []struct {
|
||||
input string
|
||||
expected bool
|
||||
}{
|
||||
// Valid locales
|
||||
{"en", true},
|
||||
{"en-US", true},
|
||||
{"fr-FR", true},
|
||||
{"zh-CN", true},
|
||||
{"ja-JP", true},
|
||||
{"pt-BR", true},
|
||||
{"es-MX", true},
|
||||
|
||||
// Invalid locales
|
||||
{"", false},
|
||||
{"invalid", false},
|
||||
{"123", false}, // Numbers
|
||||
|
||||
// Note: golang.org/x/text/language is quite lenient and accepts:
|
||||
// - "en-ZZ" (unknown country codes are allowed)
|
||||
// - "en_US" (underscores are normalized to hyphens)
|
||||
// These are actually valid according to the language package
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.input, func(t *testing.T) {
|
||||
result := isValidLocale(tt.input)
|
||||
if result != tt.expected {
|
||||
t.Errorf("isValidLocale(%q): expected %v, got %v", tt.input, tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestGetPreferredLocale(t *testing.T) {
|
||||
// Save original environment
|
||||
originalLC_ALL := os.Getenv("LC_ALL")
|
||||
originalLC_MESSAGES := os.Getenv("LC_MESSAGES")
|
||||
originalLANG := os.Getenv("LANG")
|
||||
|
||||
// Clean up after test
|
||||
defer func() {
|
||||
os.Setenv("LC_ALL", originalLC_ALL)
|
||||
os.Setenv("LC_MESSAGES", originalLC_MESSAGES)
|
||||
os.Setenv("LANG", originalLANG)
|
||||
}()
|
||||
|
||||
tests := []struct {
|
||||
name string
|
||||
explicitLang string
|
||||
LC_ALL string
|
||||
LC_MESSAGES string
|
||||
LANG string
|
||||
expected string
|
||||
description string
|
||||
}{
|
||||
{
|
||||
name: "Explicit language takes precedence",
|
||||
explicitLang: "es-ES",
|
||||
LC_ALL: "fr_FR.UTF-8",
|
||||
LC_MESSAGES: "de_DE.UTF-8",
|
||||
LANG: "ja_JP.UTF-8",
|
||||
expected: "es-ES",
|
||||
description: "Explicit language should override environment variables",
|
||||
},
|
||||
{
|
||||
name: "Use environment when no explicit language",
|
||||
explicitLang: "",
|
||||
LC_ALL: "it_IT.UTF-8",
|
||||
LC_MESSAGES: "ru_RU.UTF-8",
|
||||
LANG: "pl_PL.UTF-8",
|
||||
expected: "it-IT",
|
||||
description: "Should detect from environment when no explicit language",
|
||||
},
|
||||
{
|
||||
name: "Empty when no explicit and no environment",
|
||||
explicitLang: "",
|
||||
LC_ALL: "",
|
||||
LC_MESSAGES: "",
|
||||
LANG: "",
|
||||
expected: "",
|
||||
description: "Should return empty when nothing is set",
|
||||
},
|
||||
}
|
||||
|
||||
for _, tt := range tests {
|
||||
t.Run(tt.name, func(t *testing.T) {
|
||||
// Set test environment
|
||||
os.Setenv("LC_ALL", tt.LC_ALL)
|
||||
os.Setenv("LC_MESSAGES", tt.LC_MESSAGES)
|
||||
os.Setenv("LANG", tt.LANG)
|
||||
|
||||
result := getPreferredLocale(tt.explicitLang)
|
||||
if result != tt.expected {
|
||||
t.Errorf("%s: expected %q, got %q", tt.description, tt.expected, result)
|
||||
}
|
||||
})
|
||||
}
|
||||
}
|
||||
|
||||
func TestIntegrationWithInit(t *testing.T) {
|
||||
// Save original environment
|
||||
originalLC_ALL := os.Getenv("LC_ALL")
|
||||
originalLANG := os.Getenv("LANG")
|
||||
|
||||
// Clean up after test
|
||||
defer func() {
|
||||
os.Setenv("LC_ALL", originalLC_ALL)
|
||||
os.Setenv("LANG", originalLANG)
|
||||
translator = nil // Reset global state
|
||||
}()
|
||||
|
||||
// Test that Init uses environment variables when no explicit locale provided
|
||||
os.Setenv("LC_ALL", "es_ES.UTF-8")
|
||||
os.Setenv("LANG", "fr_FR.UTF-8")
|
||||
|
||||
localizer, err := Init("")
|
||||
if err != nil {
|
||||
t.Fatalf("Init failed: %v", err)
|
||||
}
|
||||
|
||||
if localizer == nil {
|
||||
t.Error("Expected non-nil localizer")
|
||||
}
|
||||
|
||||
// Reset translator to test T() function auto-initialization
|
||||
translator = nil
|
||||
os.Setenv("LC_ALL", "")
|
||||
os.Setenv("LANG", "es_ES.UTF-8")
|
||||
|
||||
// This should trigger auto-initialization with environment detection
|
||||
result := T("html_readability_error")
|
||||
if result == "" {
|
||||
t.Error("Expected non-empty translation result")
|
||||
}
|
||||
}
|
||||
136
internal/i18n/locales/en.json
Normal file
136
internal/i18n/locales/en.json
Normal file
@@ -0,0 +1,136 @@
|
||||
{
|
||||
"html_readability_error": "use original input, because can't apply html readability",
|
||||
"vendor_not_configured": "vendor %s not configured",
|
||||
"vendor_no_transcription_support": "vendor %s does not support audio transcription",
|
||||
"transcription_model_required": "transcription model is required (use --transcribe-model)",
|
||||
"youtube_not_configured": "YouTube is not configured, please run the setup procedure",
|
||||
"error_fetching_playlist_videos": "error fetching playlist videos: %w",
|
||||
"scraping_not_configured": "scraping functionality is not configured. Please set up Jina to enable scraping",
|
||||
"could_not_determine_home_dir": "could not determine user home directory: %w",
|
||||
"could_not_stat_env_file": "could not stat .env file: %w",
|
||||
"could_not_create_config_dir": "could not create config directory: %w",
|
||||
"could_not_create_env_file": "could not create .env file: %w",
|
||||
"could_not_copy_to_clipboard": "could not copy to clipboard: %v",
|
||||
"file_already_exists_not_overwriting": "file %s already exists, not overwriting. Rename the existing file or choose a different name",
|
||||
"error_creating_file": "error creating file: %v",
|
||||
"error_writing_to_file": "error writing to file: %v",
|
||||
"error_creating_audio_file": "error creating audio file: %v",
|
||||
"error_writing_audio_data": "error writing audio data to file: %v",
|
||||
"tts_model_requires_audio_output": "TTS model '%s' requires audio output. Please specify an audio output file with -o flag (e.g., -o output.wav)",
|
||||
"audio_output_file_specified_but_not_tts_model": "audio output file '%s' specified but model '%s' is not a TTS model. Please use a TTS model like gemini-2.5-flash-preview-tts",
|
||||
"file_already_exists_choose_different": "file %s already exists. Please choose a different filename or remove the existing file",
|
||||
"no_notification_system_available": "no notification system available",
|
||||
"cannot_convert_string": "cannot convert string %q to %v",
|
||||
"unsupported_conversion": "unsupported conversion from %v to %v",
|
||||
"invalid_config_path": "invalid config path: %w",
|
||||
"config_file_not_found": "config file not found: %s",
|
||||
"error_reading_config_file": "error reading config file: %w",
|
||||
"error_parsing_config_file": "error parsing config file: %w",
|
||||
"error_reading_piped_message": "error reading piped message from stdin: %w",
|
||||
"image_file_already_exists": "image file already exists: %s",
|
||||
"invalid_image_file_extension": "invalid image file extension '%s'. Supported formats: .png, .jpeg, .jpg, .webp",
|
||||
"image_parameters_require_image_file": "image parameters (--image-size, --image-quality, --image-background, --image-compression) can only be used with --image-file",
|
||||
"invalid_image_size": "invalid image size '%s'. Supported sizes: 1024x1024, 1536x1024, 1024x1536, auto",
|
||||
"invalid_image_quality": "invalid image quality '%s'. Supported qualities: low, medium, high, auto",
|
||||
"invalid_image_background": "invalid image background '%s'. Supported backgrounds: opaque, transparent",
|
||||
"image_compression_jpeg_webp_only": "image compression can only be used with JPEG and WebP formats, not %s",
|
||||
"image_compression_range_error": "image compression must be between 0 and 100, got %d",
|
||||
"transparent_background_png_webp_only": "transparent background can only be used with PNG and WebP formats, not %s",
|
||||
"available_transcription_models": "Available transcription models:",
|
||||
"tts_audio_generated_successfully": "TTS audio generated successfully and saved to: %s\n",
|
||||
"fabric_command_complete": "Fabric Command Complete",
|
||||
"fabric_command_complete_with_pattern": "Fabric: %s Complete",
|
||||
"command_completed_successfully": "Command completed successfully",
|
||||
"output_truncated": "Output: %s...",
|
||||
"output_full": "Output: %s",
|
||||
"choose_pattern_from_available": "Choose a pattern from the available patterns",
|
||||
"pattern_variables_help": "Values for pattern variables, e.g. -v=#role:expert -v=#points:30",
|
||||
"choose_context_from_available": "Choose a context from the available contexts",
|
||||
"choose_session_from_available": "Choose a session from the available sessions",
|
||||
"attachment_path_or_url_help": "Attachment path or URL (e.g. for OpenAI image recognition messages)",
|
||||
"run_setup_for_reconfigurable_parts": "Run setup for all reconfigurable parts of fabric",
|
||||
"set_temperature": "Set temperature",
|
||||
"set_top_p": "Set top P",
|
||||
"stream_help": "Stream",
|
||||
"set_presence_penalty": "Set presence penalty",
|
||||
"use_model_defaults_raw_help": "Use the defaults of the model without sending chat options (like temperature etc.) and use the user role instead of the system role for patterns.",
|
||||
"set_frequency_penalty": "Set frequency penalty",
|
||||
"list_all_patterns": "List all patterns",
|
||||
"list_all_available_models": "List all available models",
|
||||
"list_all_contexts": "List all contexts",
|
||||
"list_all_sessions": "List all sessions",
|
||||
"update_patterns": "Update patterns",
|
||||
"messages_to_send_to_chat": "Messages to send to chat",
|
||||
"copy_to_clipboard": "Copy to clipboard",
|
||||
"choose_model": "Choose model",
|
||||
"specify_vendor_for_model": "Specify vendor for the selected model (e.g., -V \"LM Studio\" -m openai/gpt-oss-20b)",
|
||||
"model_context_length_ollama": "Model context length (only affects ollama)",
|
||||
"output_to_file": "Output to file",
|
||||
"output_entire_session": "Output the entire session (also a temporary one) to the output file",
|
||||
"number_of_latest_patterns": "Number of latest patterns to list",
|
||||
"change_default_model": "Change default model",
|
||||
"youtube_url_help": "YouTube video or play list \"URL\" to grab transcript, comments from it and send to chat or print it put to the console and store it in the output file",
|
||||
"prefer_playlist_over_video": "Prefer playlist over video if both ids are present in the URL",
|
||||
"grab_transcript_from_youtube": "Grab transcript from YouTube video and send to chat (it is used per default).",
|
||||
"grab_transcript_with_timestamps": "Grab transcript from YouTube video with timestamps and send to chat",
|
||||
"grab_comments_from_youtube": "Grab comments from YouTube video and send to chat",
|
||||
"output_video_metadata": "Output video metadata",
|
||||
"additional_yt_dlp_args": "Additional arguments to pass to yt-dlp (e.g. '--cookies-from-browser brave')",
|
||||
"specify_language_code": "Specify the Language Code for the chat, e.g. -g=en -g=zh",
|
||||
"scrape_website_url": "Scrape website URL to markdown using Jina AI",
|
||||
"search_question_jina": "Search question using Jina AI",
|
||||
"seed_for_lmm_generation": "Seed to be used for LMM generation",
|
||||
"wipe_context": "Wipe context",
|
||||
"wipe_session": "Wipe session",
|
||||
"print_context": "Print context",
|
||||
"print_session": "Print session",
|
||||
"convert_html_readability": "Convert HTML input into a clean, readable view",
|
||||
"apply_variables_to_input": "Apply variables to user input",
|
||||
"disable_pattern_variable_replacement": "Disable pattern variable replacement",
|
||||
"show_dry_run": "Show what would be sent to the model without actually sending it",
|
||||
"serve_fabric_rest_api": "Serve the Fabric Rest API",
|
||||
"serve_fabric_api_ollama_endpoints": "Serve the Fabric Rest API with ollama endpoints",
|
||||
"address_to_bind_rest_api": "The address to bind the REST API",
|
||||
"api_key_secure_server_routes": "API key used to secure server routes",
|
||||
"path_to_yaml_config": "Path to YAML config file",
|
||||
"print_current_version": "Print current version",
|
||||
"list_all_registered_extensions": "List all registered extensions",
|
||||
"register_new_extension": "Register a new extension from config file path",
|
||||
"remove_registered_extension": "Remove a registered extension by name",
|
||||
"choose_strategy_from_available": "Choose a strategy from the available strategies",
|
||||
"list_all_strategies": "List all strategies",
|
||||
"list_all_vendors": "List all vendors",
|
||||
"output_raw_list_shell_completion": "Output raw list without headers/formatting (for shell completion)",
|
||||
"enable_web_search_tool": "Enable web search tool for supported models (Anthropic, OpenAI, Gemini)",
|
||||
"set_location_web_search": "Set location for web search results (e.g., 'America/Los_Angeles')",
|
||||
"save_generated_image_to_file": "Save generated image to specified file path (e.g., 'output.png')",
|
||||
"image_dimensions_help": "Image dimensions: 1024x1024, 1536x1024, 1024x1536, auto (default: auto)",
|
||||
"image_quality_help": "Image quality: low, medium, high, auto (default: auto)",
|
||||
"compression_level_jpeg_webp": "Compression level 0-100 for JPEG/WebP formats (default: not set)",
|
||||
"background_type_help": "Background type: opaque, transparent (default: opaque, only for PNG/WebP)",
|
||||
"suppress_thinking_tags": "Suppress text enclosed in thinking tags",
|
||||
"start_tag_thinking_sections": "Start tag for thinking sections",
|
||||
"end_tag_thinking_sections": "End tag for thinking sections",
|
||||
"disable_openai_responses_api": "Disable OpenAI Responses API (default: false)",
|
||||
"audio_video_file_transcribe": "Audio or video file to transcribe",
|
||||
"model_for_transcription": "Model to use for transcription (separate from chat model)",
|
||||
"split_media_files_ffmpeg": "Split audio/video files larger than 25MB using ffmpeg",
|
||||
"tts_voice_name": "TTS voice name for supported models (e.g., Kore, Charon, Puck)",
|
||||
"list_gemini_tts_voices": "List all available Gemini TTS voices",
|
||||
"list_transcription_models": "List all available transcription models",
|
||||
"send_desktop_notification": "Send desktop notification when command completes",
|
||||
"custom_notification_command": "Custom command to run for notifications (overrides built-in notifications)",
|
||||
"set_reasoning_thinking_level": "Set reasoning/thinking level (e.g., off, low, medium, high, or numeric tokens for Anthropic or Google Gemini)",
|
||||
"set_debug_level": "Set debug level (0=off, 1=basic, 2=detailed, 3=trace)",
|
||||
"usage_header": "Usage:",
|
||||
"application_options_header": "Application Options:",
|
||||
"help_options_header": "Help Options:",
|
||||
"help_message": "Show this help message",
|
||||
"options_placeholder": "[OPTIONS]",
|
||||
"available_vendors_header": "Available Vendors:",
|
||||
"available_models_header": "Available models",
|
||||
"no_items_found": "No %s",
|
||||
"no_description_available": "No description available",
|
||||
"i18n_download_failed": "Failed to download translation for language '%s': %v",
|
||||
"i18n_load_failed": "Failed to load translation file: %v"
|
||||
}
|
||||
136
internal/i18n/locales/es.json
Normal file
136
internal/i18n/locales/es.json
Normal file
@@ -0,0 +1,136 @@
|
||||
{
|
||||
"html_readability_error": "usa la entrada original, porque no se puede aplicar la legibilidad de html",
|
||||
"vendor_not_configured": "el proveedor %s no está configurado",
|
||||
"vendor_no_transcription_support": "el proveedor %s no admite transcripción de audio",
|
||||
"transcription_model_required": "se requiere un modelo de transcripción (usa --transcribe-model)",
|
||||
"youtube_not_configured": "YouTube no está configurado, por favor ejecuta el procedimiento de configuración",
|
||||
"error_fetching_playlist_videos": "error al obtener videos de la lista de reproducción: %w",
|
||||
"scraping_not_configured": "la funcionalidad de extracción no está configurada. Por favor configura Jina para habilitar la extracción",
|
||||
"could_not_determine_home_dir": "no se pudo determinar el directorio home del usuario: %w",
|
||||
"could_not_stat_env_file": "no se pudo verificar el archivo .env: %w",
|
||||
"could_not_create_config_dir": "no se pudo crear el directorio de configuración: %w",
|
||||
"could_not_create_env_file": "no se pudo crear el archivo .env: %w",
|
||||
"could_not_copy_to_clipboard": "no se pudo copiar al portapapeles: %v",
|
||||
"file_already_exists_not_overwriting": "el archivo %s ya existe, no se sobrescribirá. Renombra el archivo existente o elige un nombre diferente",
|
||||
"error_creating_file": "error al crear el archivo: %v",
|
||||
"error_writing_to_file": "error al escribir al archivo: %v",
|
||||
"error_creating_audio_file": "error al crear el archivo de audio: %v",
|
||||
"error_writing_audio_data": "error al escribir datos de audio al archivo: %v",
|
||||
"tts_model_requires_audio_output": "el modelo TTS '%s' requiere salida de audio. Por favor especifica un archivo de salida de audio con la bandera -o (ej., -o output.wav)",
|
||||
"audio_output_file_specified_but_not_tts_model": "se especificó el archivo de salida de audio '%s' pero el modelo '%s' no es un modelo TTS. Por favor usa un modelo TTS como gemini-2.5-flash-preview-tts",
|
||||
"file_already_exists_choose_different": "el archivo %s ya existe. Por favor elige un nombre diferente o elimina el archivo existente",
|
||||
"no_notification_system_available": "no hay sistema de notificaciones disponible",
|
||||
"cannot_convert_string": "no se puede convertir la cadena %q a %v",
|
||||
"unsupported_conversion": "conversión no soportada de %v a %v",
|
||||
"invalid_config_path": "ruta de configuración inválida: %w",
|
||||
"config_file_not_found": "archivo de configuración no encontrado: %s",
|
||||
"error_reading_config_file": "error al leer el archivo de configuración: %w",
|
||||
"error_parsing_config_file": "error al analizar el archivo de configuración: %w",
|
||||
"error_reading_piped_message": "error al leer mensaje desde stdin: %w",
|
||||
"image_file_already_exists": "el archivo de imagen ya existe: %s",
|
||||
"invalid_image_file_extension": "extensión de archivo de imagen inválida '%s'. Formatos soportados: .png, .jpeg, .jpg, .webp",
|
||||
"image_parameters_require_image_file": "los parámetros de imagen (--image-size, --image-quality, --image-background, --image-compression) solo pueden usarse con --image-file",
|
||||
"invalid_image_size": "tamaño de imagen inválido '%s'. Tamaños soportados: 1024x1024, 1536x1024, 1024x1536, auto",
|
||||
"invalid_image_quality": "calidad de imagen inválida '%s'. Calidades soportadas: low, medium, high, auto",
|
||||
"invalid_image_background": "fondo de imagen inválido '%s'. Fondos soportados: opaque, transparent",
|
||||
"image_compression_jpeg_webp_only": "la compresión de imagen solo puede usarse con formatos JPEG y WebP, no %s",
|
||||
"image_compression_range_error": "la compresión de imagen debe estar entre 0 y 100, se obtuvo %d",
|
||||
"transparent_background_png_webp_only": "el fondo transparente solo puede usarse con formatos PNG y WebP, no %s",
|
||||
"available_transcription_models": "Modelos de transcripción disponibles:",
|
||||
"tts_audio_generated_successfully": "Audio TTS generado exitosamente y guardado en: %s\n",
|
||||
"fabric_command_complete": "Comando Fabric Completado",
|
||||
"fabric_command_complete_with_pattern": "Fabric: %s Completado",
|
||||
"command_completed_successfully": "Comando completado exitosamente",
|
||||
"output_truncated": "Salida: %s...",
|
||||
"output_full": "Salida: %s",
|
||||
"choose_pattern_from_available": "Elige un patrón de los patrones disponibles",
|
||||
"pattern_variables_help": "Valores para variables de patrón, ej. -v=#role:expert -v=#points:30",
|
||||
"choose_context_from_available": "Elige un contexto de los contextos disponibles",
|
||||
"choose_session_from_available": "Elige una sesión de las sesiones disponibles",
|
||||
"attachment_path_or_url_help": "Ruta de adjunto o URL (ej. para mensajes de reconocimiento de imagen de OpenAI)",
|
||||
"run_setup_for_reconfigurable_parts": "Ejecutar configuración para todas las partes reconfigurables de fabric",
|
||||
"set_temperature": "Establecer temperatura",
|
||||
"set_top_p": "Establecer top P",
|
||||
"stream_help": "Transmitir",
|
||||
"set_presence_penalty": "Establecer penalización de presencia",
|
||||
"use_model_defaults_raw_help": "Usar los valores predeterminados del modelo sin enviar opciones de chat (como temperatura, etc.) y usar el rol de usuario en lugar del rol del sistema para patrones.",
|
||||
"set_frequency_penalty": "Establecer penalización de frecuencia",
|
||||
"list_all_patterns": "Listar todos los patrones",
|
||||
"list_all_available_models": "Listar todos los modelos disponibles",
|
||||
"list_all_contexts": "Listar todos los contextos",
|
||||
"list_all_sessions": "Listar todas las sesiones",
|
||||
"update_patterns": "Actualizar patrones",
|
||||
"messages_to_send_to_chat": "Mensajes para enviar al chat",
|
||||
"copy_to_clipboard": "Copiar al portapapeles",
|
||||
"choose_model": "Elegir modelo",
|
||||
"specify_vendor_for_model": "Especificar proveedor para el modelo seleccionado (ej., -V \"LM Studio\" -m openai/gpt-oss-20b)",
|
||||
"model_context_length_ollama": "Longitud de contexto del modelo (solo afecta a ollama)",
|
||||
"output_to_file": "Salida a archivo",
|
||||
"output_entire_session": "Salida de toda la sesión (también una temporal) al archivo de salida",
|
||||
"number_of_latest_patterns": "Número de patrones más recientes a listar",
|
||||
"change_default_model": "Cambiar modelo predeterminado",
|
||||
"youtube_url_help": "Video de YouTube o \"URL\" de lista de reproducción para obtener transcripción, comentarios y enviar al chat o imprimir en la consola y almacenar en el archivo de salida",
|
||||
"prefer_playlist_over_video": "Preferir lista de reproducción sobre video si ambos ids están presentes en la URL",
|
||||
"grab_transcript_from_youtube": "Obtener transcripción del video de YouTube y enviar al chat (se usa por defecto).",
|
||||
"grab_transcript_with_timestamps": "Obtener transcripción del video de YouTube con marcas de tiempo y enviar al chat",
|
||||
"grab_comments_from_youtube": "Obtener comentarios del video de YouTube y enviar al chat",
|
||||
"output_video_metadata": "Salida de metadatos del video",
|
||||
"additional_yt_dlp_args": "Argumentos adicionales para pasar a yt-dlp (ej. '--cookies-from-browser brave')",
|
||||
"specify_language_code": "Especificar el Código de Idioma para el chat, ej. -g=en -g=zh",
|
||||
"scrape_website_url": "Extraer URL del sitio web a markdown usando Jina AI",
|
||||
"search_question_jina": "Pregunta de búsqueda usando Jina AI",
|
||||
"seed_for_lmm_generation": "Semilla para ser usada en la generación LMM",
|
||||
"wipe_context": "Limpiar contexto",
|
||||
"wipe_session": "Limpiar sesión",
|
||||
"print_context": "Imprimir contexto",
|
||||
"print_session": "Imprimir sesión",
|
||||
"convert_html_readability": "Convertir entrada HTML en una vista limpia y legible",
|
||||
"apply_variables_to_input": "Aplicar variables a la entrada del usuario",
|
||||
"disable_pattern_variable_replacement": "Deshabilitar reemplazo de variables de patrón",
|
||||
"show_dry_run": "Mostrar lo que se enviaría al modelo sin enviarlo realmente",
|
||||
"serve_fabric_rest_api": "Servir la API REST de Fabric",
|
||||
"serve_fabric_api_ollama_endpoints": "Servir la API REST de Fabric con endpoints de ollama",
|
||||
"address_to_bind_rest_api": "La dirección para vincular la API REST",
|
||||
"api_key_secure_server_routes": "Clave API usada para asegurar rutas del servidor",
|
||||
"path_to_yaml_config": "Ruta al archivo de configuración YAML",
|
||||
"print_current_version": "Imprimir versión actual",
|
||||
"list_all_registered_extensions": "Listar todas las extensiones registradas",
|
||||
"register_new_extension": "Registrar una nueva extensión desde la ruta del archivo de configuración",
|
||||
"remove_registered_extension": "Eliminar una extensión registrada por nombre",
|
||||
"choose_strategy_from_available": "Elegir una estrategia de las estrategias disponibles",
|
||||
"list_all_strategies": "Listar todas las estrategias",
|
||||
"list_all_vendors": "Listar todos los proveedores",
|
||||
"output_raw_list_shell_completion": "Salida de lista sin procesar sin encabezados/formato (para completado de shell)",
|
||||
"enable_web_search_tool": "Habilitar herramienta de búsqueda web para modelos soportados (Anthropic, OpenAI, Gemini)",
|
||||
"set_location_web_search": "Establecer ubicación para resultados de búsqueda web (ej., 'America/Los_Angeles')",
|
||||
"save_generated_image_to_file": "Guardar imagen generada en la ruta de archivo especificada (ej., 'output.png')",
|
||||
"image_dimensions_help": "Dimensiones de imagen: 1024x1024, 1536x1024, 1024x1536, auto (predeterminado: auto)",
|
||||
"image_quality_help": "Calidad de imagen: low, medium, high, auto (predeterminado: auto)",
|
||||
"compression_level_jpeg_webp": "Nivel de compresión 0-100 para formatos JPEG/WebP (predeterminado: no establecido)",
|
||||
"background_type_help": "Tipo de fondo: opaque, transparent (predeterminado: opaque, solo para PNG/WebP)",
|
||||
"suppress_thinking_tags": "Suprimir texto encerrado en etiquetas de pensamiento",
|
||||
"start_tag_thinking_sections": "Etiqueta de inicio para secciones de pensamiento",
|
||||
"end_tag_thinking_sections": "Etiqueta de fin para secciones de pensamiento",
|
||||
"disable_openai_responses_api": "Deshabilitar API de Respuestas de OpenAI (predeterminado: false)",
|
||||
"audio_video_file_transcribe": "Archivo de audio o video para transcribir",
|
||||
"model_for_transcription": "Modelo para usar en transcripción (separado del modelo de chat)",
|
||||
"split_media_files_ffmpeg": "Dividir archivos de audio/video mayores a 25MB usando ffmpeg",
|
||||
"tts_voice_name": "Nombre de voz TTS para modelos soportados (ej., Kore, Charon, Puck)",
|
||||
"list_gemini_tts_voices": "Listar todas las voces TTS de Gemini disponibles",
|
||||
"list_transcription_models": "Listar todos los modelos de transcripción disponibles",
|
||||
"send_desktop_notification": "Enviar notificación de escritorio cuando se complete el comando",
|
||||
"custom_notification_command": "Comando personalizado para ejecutar notificaciones (anula las notificaciones integradas)",
|
||||
"set_reasoning_thinking_level": "Establecer nivel de razonamiento/pensamiento (ej., off, low, medium, high, o tokens numéricos para Anthropic o Google Gemini)",
|
||||
"set_debug_level": "Establecer nivel de depuración (0=apagado, 1=básico, 2=detallado, 3=rastreo)",
|
||||
"usage_header": "Uso:",
|
||||
"application_options_header": "Opciones de la Aplicación:",
|
||||
"help_options_header": "Opciones de Ayuda:",
|
||||
"help_message": "Mostrar este mensaje de ayuda",
|
||||
"options_placeholder": "[OPCIONES]",
|
||||
"available_vendors_header": "Proveedores Disponibles:",
|
||||
"available_models_header": "Modelos disponibles",
|
||||
"no_items_found": "No hay %s",
|
||||
"no_description_available": "No hay descripción disponible",
|
||||
"i18n_download_failed": "Error al descargar traducción para el idioma '%s': %v",
|
||||
"i18n_load_failed": "Error al cargar archivo de traducción: %v"
|
||||
}
|
||||
@@ -61,6 +61,15 @@ func Debug(l Level, format string, a ...interface{}) {
|
||||
}
|
||||
}
|
||||
|
||||
// Log writes a message unconditionally to stderr.
|
||||
// This is for important messages that should always be shown regardless of debug level.
|
||||
func Log(format string, a ...interface{}) {
|
||||
mu.RLock()
|
||||
w := output
|
||||
mu.RUnlock()
|
||||
fmt.Fprintf(w, format, a...)
|
||||
}
|
||||
|
||||
// SetOutput allows overriding the output destination for debug logs.
|
||||
func SetOutput(w io.Writer) {
|
||||
mu.Lock()
|
||||
|
||||
@@ -9,11 +9,11 @@ import (
|
||||
"fmt"
|
||||
"io"
|
||||
"net/http"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/util"
|
||||
"golang.org/x/oauth2"
|
||||
)
|
||||
@@ -77,7 +77,7 @@ func (t *OAuthTransport) getValidToken(tokenIdentifier string) (string, error) {
|
||||
}
|
||||
// If no token exists, run OAuth flow
|
||||
if token == nil {
|
||||
fmt.Fprintln(os.Stderr, "No OAuth token found, initiating authentication...")
|
||||
debuglog.Log("No OAuth token found, initiating authentication...\n")
|
||||
newAccessToken, err := RunOAuthFlow(tokenIdentifier)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to authenticate: %w", err)
|
||||
@@ -87,11 +87,11 @@ func (t *OAuthTransport) getValidToken(tokenIdentifier string) (string, error) {
|
||||
|
||||
// Check if token needs refresh (5 minute buffer)
|
||||
if token.IsExpired(5) {
|
||||
fmt.Fprintln(os.Stderr, "OAuth token expired, refreshing...")
|
||||
debuglog.Log("OAuth token expired, refreshing...\n")
|
||||
newAccessToken, err := RefreshToken(tokenIdentifier)
|
||||
if err != nil {
|
||||
// If refresh fails, try re-authentication
|
||||
fmt.Fprintln(os.Stderr, "Token refresh failed, re-authenticating...")
|
||||
debuglog.Log("Token refresh failed, re-authenticating...\n")
|
||||
newAccessToken, err = RunOAuthFlow(tokenIdentifier)
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("failed to refresh or re-authenticate: %w", err)
|
||||
@@ -143,13 +143,13 @@ func RunOAuthFlow(tokenIdentifier string) (token string, err error) {
|
||||
if err == nil && existingToken != nil {
|
||||
// If token exists but is expired, try refreshing first
|
||||
if existingToken.IsExpired(5) {
|
||||
fmt.Fprintln(os.Stderr, "Found expired OAuth token, attempting refresh...")
|
||||
debuglog.Log("Found expired OAuth token, attempting refresh...\n")
|
||||
refreshedToken, refreshErr := RefreshToken(tokenIdentifier)
|
||||
if refreshErr == nil {
|
||||
fmt.Fprintln(os.Stderr, "Token refresh successful")
|
||||
debuglog.Log("Token refresh successful\n")
|
||||
return refreshedToken, nil
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "Token refresh failed (%v), proceeding with full OAuth flow...\n", refreshErr)
|
||||
debuglog.Log("Token refresh failed (%v), proceeding with full OAuth flow...\n", refreshErr)
|
||||
} else {
|
||||
// Token exists and is still valid
|
||||
return existingToken.AccessToken, nil
|
||||
@@ -176,10 +176,10 @@ func RunOAuthFlow(tokenIdentifier string) (token string, err error) {
|
||||
oauth2.SetAuthURLParam("state", verifier),
|
||||
)
|
||||
|
||||
fmt.Fprintln(os.Stderr, "Open the following URL in your browser. Fabric would like to authorize:")
|
||||
fmt.Fprintln(os.Stderr, authURL)
|
||||
debuglog.Log("Open the following URL in your browser. Fabric would like to authorize:\n")
|
||||
debuglog.Log("%s\n", authURL)
|
||||
openBrowser(authURL)
|
||||
fmt.Fprint(os.Stderr, "Paste the authorization code here: ")
|
||||
debuglog.Log("Paste the authorization code here: ")
|
||||
var code string
|
||||
fmt.Scanln(&code)
|
||||
parts := strings.SplitN(code, "#", 2)
|
||||
|
||||
@@ -5,11 +5,12 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
"github.com/danielmiessler/fabric/internal/util"
|
||||
)
|
||||
|
||||
func NewVendorsModels() *VendorsModels {
|
||||
return &VendorsModels{GroupsItemsSelectorString: util.NewGroupsItemsSelectorString("Available models")}
|
||||
return &VendorsModels{GroupsItemsSelectorString: util.NewGroupsItemsSelectorString(i18n.T("available_models_header"))}
|
||||
}
|
||||
|
||||
type VendorsModels struct {
|
||||
@@ -21,7 +22,7 @@ type VendorsModels struct {
|
||||
// Default vendor and model are highlighted with an asterisk.
|
||||
func (o *VendorsModels) PrintWithVendor(shellCompleteList bool, defaultVendor, defaultModel string) {
|
||||
if !shellCompleteList {
|
||||
fmt.Printf("\n%v:\n", o.SelectionLabel)
|
||||
fmt.Printf("%s:\n\n", o.SelectionLabel)
|
||||
}
|
||||
|
||||
var currentItemIndex int
|
||||
|
||||
@@ -64,7 +64,7 @@ func (o *Client) TranscribeFile(ctx context.Context, filePath, model string, spl
|
||||
if !split {
|
||||
return "", fmt.Errorf("file %s exceeds 25MB limit; use --split-media-file to enable automatic splitting", filePath)
|
||||
}
|
||||
debuglog.Debug(debuglog.Basic, "File %s is larger than the size limit... breaking it up into chunks...\n", filePath)
|
||||
debuglog.Log("File %s is larger than the size limit... breaking it up into chunks...\n", filePath)
|
||||
if files, cleanup, err = splitAudioFile(filePath, ext, MaxAudioFileSize); err != nil {
|
||||
return "", err
|
||||
}
|
||||
@@ -75,7 +75,7 @@ func (o *Client) TranscribeFile(ctx context.Context, filePath, model string, spl
|
||||
|
||||
var builder strings.Builder
|
||||
for i, f := range files {
|
||||
debuglog.Debug(debuglog.Basic, "Using model %s to transcribe part %d (file name: %s)...\n", model, i+1, f)
|
||||
debuglog.Log("Using model %s to transcribe part %d (file name: %s)...\n", model, i+1, f)
|
||||
var chunk *os.File
|
||||
if chunk, err = os.Open(f); err != nil {
|
||||
return "", err
|
||||
@@ -115,7 +115,7 @@ func splitAudioFile(src, ext string, maxSize int64) (files []string, cleanup fun
|
||||
segmentTime := 600 // start with 10 minutes
|
||||
for {
|
||||
pattern := filepath.Join(dir, "chunk-%03d"+ext)
|
||||
debuglog.Debug(debuglog.Basic, "Running ffmpeg to split audio into %d-second chunks...\n", segmentTime)
|
||||
debuglog.Log("Running ffmpeg to split audio into %d-second chunks...\n", segmentTime)
|
||||
cmd := exec.Command("ffmpeg", "-y", "-i", src, "-f", "segment", "-segment_time", fmt.Sprintf("%d", segmentTime), "-c", "copy", pattern)
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
@@ -4,9 +4,10 @@ import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync" // Added sync package
|
||||
"sync"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/plugins"
|
||||
perplexity "github.com/sgaunet/perplexity-go/v2"
|
||||
|
||||
@@ -171,7 +172,7 @@ func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.Cha
|
||||
if err != nil {
|
||||
// Log error, can't send to string channel directly.
|
||||
// Consider a mechanism to propagate this error if needed.
|
||||
fmt.Fprintf(os.Stderr, "perplexity streaming error: %v\\n", err) // Corrected capitalization
|
||||
debuglog.Log("perplexity streaming error: %v\n", err)
|
||||
// If the error occurs during stream setup, the channel might not have been closed by the receiver loop.
|
||||
// However, closing it here might cause a panic if the receiver loop also tries to close it.
|
||||
// close(channel) // Caution: Uncommenting this may cause panic, as channel is closed in the receiver goroutine.
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
"github.com/danielmiessler/fabric/internal/util"
|
||||
)
|
||||
|
||||
@@ -108,7 +109,7 @@ func (o *StorageEntity) ListNames(shellCompleteList bool) (err error) {
|
||||
|
||||
if len(names) == 0 {
|
||||
if !shellCompleteList {
|
||||
fmt.Printf("\nNo %v\n", o.Label)
|
||||
fmt.Printf("%s\n", fmt.Sprintf(i18n.T("no_items_found"), o.Label))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
@@ -88,7 +88,7 @@ func NewExtensionRegistry(configDir string) *ExtensionRegistry {
|
||||
r.ensureConfigDir()
|
||||
|
||||
if err := r.loadRegistry(); err != nil {
|
||||
debuglog.Debug(debuglog.Basic, "Warning: could not load extension registry: %v\n", err)
|
||||
debuglog.Log("Warning: could not load extension registry: %v\n", err)
|
||||
}
|
||||
|
||||
return r
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/plugins"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/db/fsdb"
|
||||
"github.com/danielmiessler/fabric/internal/tools/githelper"
|
||||
@@ -335,9 +336,9 @@ func (o *PatternsLoader) createUniquePatternsFile() (err error) {
|
||||
patternNamesMap[entry.Name()] = true
|
||||
}
|
||||
}
|
||||
fmt.Fprintf(os.Stderr, "📂 Also included patterns from custom directory: %s\n", o.Patterns.CustomPatternsDir)
|
||||
debuglog.Log("📂 Also included patterns from custom directory: %s\n", o.Patterns.CustomPatternsDir)
|
||||
} else {
|
||||
fmt.Fprintf(os.Stderr, "Warning: Could not read custom patterns directory %s: %v\n", o.Patterns.CustomPatternsDir, customErr)
|
||||
debuglog.Log("Warning: Could not read custom patterns directory %s: %v\n", o.Patterns.CustomPatternsDir, customErr)
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@@ -223,9 +223,12 @@ schema = 3
|
||||
[mod."github.com/modern-go/reflect2"]
|
||||
version = "v1.0.2"
|
||||
hash = "sha256-+W9EIW7okXIXjWEgOaMh58eLvBZ7OshW2EhaIpNLSBU="
|
||||
[mod."github.com/nicksnyder/go-i18n/v2"]
|
||||
version = "v2.6.0"
|
||||
hash = "sha256-UrSECFbpCIg5avJ+f3LkJy/ncZFHa4q8sDqDIQ3YZJM="
|
||||
[mod."github.com/ollama/ollama"]
|
||||
version = "v0.9.0"
|
||||
hash = "sha256-r2eU+kMG3tuJy2B43RXsfmeltzM9t05NEmNiJAW5qr4="
|
||||
version = "v0.11.7"
|
||||
hash = "sha256-3Wn1JWmil0aQQ2I/r398HbnUsi8ADoroqNyPziuxn/c="
|
||||
[mod."github.com/openai/openai-go"]
|
||||
version = "v1.8.2"
|
||||
hash = "sha256-O8aV3zEj6o8kIlzlkYaTW4RzvwR3qNUBYiN8SuTM1R0="
|
||||
|
||||
@@ -1 +1 @@
|
||||
"1.4.295"
|
||||
"1.4.309"
|
||||
|
||||
@@ -1,116 +0,0 @@
|
||||
# Docker Test Environment for API Configuration Fix
|
||||
|
||||
This directory contains a Docker-based testing setup for fixing the issue where Fabric calls Ollama and Bedrock APIs even when not configured. This addresses the problem where unconfigured services show error messages during model listing.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
./scripts/docker-test/test-runner.sh
|
||||
|
||||
# Interactive mode - pick which test to run
|
||||
./scripts/docker-test/test-runner.sh -i
|
||||
|
||||
# Run specific test case
|
||||
./scripts/docker-test/test-runner.sh gemini-only
|
||||
|
||||
# Shell into test environment
|
||||
./scripts/docker-test/test-runner.sh -s gemini-only
|
||||
|
||||
# Build image only (for development)
|
||||
./scripts/docker-test/test-runner.sh -b
|
||||
|
||||
# Show help
|
||||
./scripts/docker-test/test-runner.sh -h
|
||||
```
|
||||
|
||||
## Test Cases
|
||||
|
||||
1. **no-config**: No APIs configured
|
||||
2. **gemini-only**: Only Gemini configured (reproduces original issue #1195)
|
||||
3. **openai-only**: Only OpenAI configured
|
||||
4. **ollama-only**: Only Ollama configured
|
||||
5. **bedrock-only**: Only Bedrock configured
|
||||
6. **mixed**: Multiple APIs configured (Gemini + OpenAI + Ollama)
|
||||
|
||||
## Environment Files
|
||||
|
||||
Each test case has a corresponding environment file in `scripts/docker-test/env/`:
|
||||
|
||||
- `env.no-config` - Empty configuration
|
||||
- `env.gemini-only` - Only Gemini API key
|
||||
- `env.openai-only` - Only OpenAI API key
|
||||
- `env.ollama-only` - Only Ollama URL
|
||||
- `env.bedrock-only` - Only Bedrock configuration
|
||||
- `env.mixed` - Multiple API configurations
|
||||
|
||||
These files are volume-mounted into the Docker container and persist changes made with `fabric -S`.
|
||||
|
||||
## Interactive Mode & Shell Access
|
||||
|
||||
The interactive mode (`-i`) provides several options:
|
||||
|
||||
```text
|
||||
Available test cases:
|
||||
|
||||
1) No APIs configured (no-config)
|
||||
2) Only Gemini configured (gemini-only)
|
||||
3) Only OpenAI configured (openai-only)
|
||||
4) Only Ollama configured (ollama-only)
|
||||
5) Only Bedrock configured (bedrock-only)
|
||||
6) Mixed configuration (mixed)
|
||||
7) Run all tests
|
||||
0) Exit
|
||||
|
||||
Add '!' after number to shell into test environment (e.g., '1!' to shell into no-config)
|
||||
```
|
||||
|
||||
### Shell Mode
|
||||
|
||||
- Use `1!`, `2!`, etc. to shell into any test environment
|
||||
- Run `fabric -S` to configure APIs interactively
|
||||
- Run `fabric --listmodels` or `fabric -L` to test model listing
|
||||
- Changes persist in the environment files
|
||||
- Type `exit` to return to test runner
|
||||
|
||||
## Expected Results
|
||||
|
||||
**Before Fix:**
|
||||
|
||||
- `no-config` and `gemini-only` tests show Ollama connection errors
|
||||
- Tests show Bedrock authentication errors when BEDROCK_AWS_REGION not set
|
||||
- Error: `Ollama Get "http://localhost:11434/api/tags": dial tcp...`
|
||||
- Error: `Bedrock failed to list foundation models...`
|
||||
|
||||
**After Fix:**
|
||||
|
||||
- Clean output with no error messages for unconfigured services
|
||||
- Only configured services appear in model listings
|
||||
- Ollama only initialized when `OLLAMA_API_URL` is set
|
||||
- Bedrock only initialized when `BEDROCK_AWS_REGION` is set
|
||||
|
||||
## Implementation Details
|
||||
|
||||
- **Volume-mounted configs**: Environment files are mounted to `/home/testuser/.config/fabric/.env`
|
||||
- **Persistent state**: Configuration changes survive between test runs
|
||||
- **Single Docker image**: Built once from `scripts/docker-test/base/Dockerfile`, reused for all tests
|
||||
- **Isolated environments**: Each test uses its own environment file
|
||||
- **Cross-platform**: Works on macOS, Linux, and Windows with Docker
|
||||
|
||||
## Development Workflow
|
||||
|
||||
1. Make code changes to fix API initialization logic
|
||||
2. Run `./scripts/docker-test/test-runner.sh no-config` to test the main issue
|
||||
3. Use `./scripts/docker-test/test-runner.sh -i` for interactive testing
|
||||
4. Shell into environments (`1!`, `2!`, etc.) to debug specific configurations
|
||||
5. Run all tests before submitting PR: `./scripts/docker-test/test-runner.sh`
|
||||
|
||||
## Architecture
|
||||
|
||||
The fix involves:
|
||||
|
||||
1. **Ollama**: Override `IsConfigured()` method to check for `OLLAMA_API_URL` env var
|
||||
2. **Bedrock**: Modify `hasAWSCredentials()` to require `BEDROCK_AWS_REGION`
|
||||
3. **Plugin Registry**: Only initialize providers when properly configured
|
||||
|
||||
This prevents unnecessary API calls and eliminates confusing error messages for users.
|
||||
@@ -1,30 +0,0 @@
|
||||
FROM golang:1.24-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
COPY ./cmd/fabric ./cmd/fabric
|
||||
COPY ./internal ./internal
|
||||
RUN go build -o fabric ./cmd/fabric
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates
|
||||
|
||||
# Create a test user
|
||||
RUN adduser -D -s /bin/sh testuser
|
||||
|
||||
# Switch to test user
|
||||
USER testuser
|
||||
WORKDIR /home/testuser
|
||||
|
||||
# Set environment variables for the test user
|
||||
ENV HOME=/home/testuser
|
||||
ENV USER=testuser
|
||||
|
||||
COPY --from=builder /app/fabric .
|
||||
|
||||
# Create fabric config directory and empty .env file
|
||||
RUN mkdir -p .config/fabric && touch .config/fabric/.env
|
||||
|
||||
ENTRYPOINT ["./fabric"]
|
||||
@@ -1,235 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# Get the directory where this script is located
|
||||
top_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
base_name="$(basename "$top_dir")"
|
||||
cd "$top_dir"/../.. || exit 1
|
||||
|
||||
# Check if bash version supports associative arrays
|
||||
if [[ ${BASH_VERSION%%.*} -lt 4 ]]; then
|
||||
echo "This script requires bash 4.0 or later for associative arrays."
|
||||
echo "Current version: $BASH_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IMAGE_NAME="fabric-test-setup"
|
||||
ENV_DIR="scripts/${base_name}/env"
|
||||
|
||||
# Test case descriptions
|
||||
declare -A test_descriptions=(
|
||||
["no-config"]="No APIs configured"
|
||||
["gemini-only"]="Only Gemini configured (reproduces original issue)"
|
||||
["openai-only"]="Only OpenAI configured"
|
||||
["ollama-only"]="Only Ollama configured"
|
||||
["bedrock-only"]="Only Bedrock configured"
|
||||
["mixed"]="Mixed configuration (Gemini + OpenAI + Ollama)"
|
||||
)
|
||||
|
||||
# Test case order for consistent display
|
||||
test_order=("no-config" "gemini-only" "openai-only" "ollama-only" "bedrock-only" "mixed")
|
||||
|
||||
build_image() {
|
||||
echo "=== Building Docker image ==="
|
||||
docker build -f "${top_dir}/base/Dockerfile" -t "$IMAGE_NAME" .
|
||||
echo
|
||||
}
|
||||
|
||||
check_env_file() {
|
||||
local test_name="$1"
|
||||
local env_file="$ENV_DIR/env.$test_name"
|
||||
|
||||
if [[ ! -f "$env_file" ]]; then
|
||||
echo "Error: Environment file not found: $env_file"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
run_test() {
|
||||
local test_name="$1"
|
||||
local description="${test_descriptions[$test_name]}"
|
||||
local env_file="$ENV_DIR/env.$test_name"
|
||||
|
||||
check_env_file "$test_name"
|
||||
|
||||
echo "===================="
|
||||
echo "Test: $description"
|
||||
echo "Config: $test_name"
|
||||
echo "Env file: $env_file"
|
||||
echo "===================="
|
||||
|
||||
echo "Running test..."
|
||||
if docker run --rm \
|
||||
-e HOME=/home/testuser \
|
||||
-e USER=testuser \
|
||||
-v "$(pwd)/$env_file:/home/testuser/.config/fabric/.env:ro" \
|
||||
"$IMAGE_NAME" --listmodels 2>&1; then
|
||||
echo "✅ Test completed"
|
||||
else
|
||||
echo "❌ Test failed"
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
shell_into_env() {
|
||||
local test_name="$1"
|
||||
local description="${test_descriptions[$test_name]}"
|
||||
local env_file="$ENV_DIR/env.$test_name"
|
||||
|
||||
check_env_file "$test_name"
|
||||
|
||||
echo "===================="
|
||||
echo "Shelling into: $description"
|
||||
echo "Config: $test_name"
|
||||
echo "Env file: $env_file"
|
||||
echo "===================="
|
||||
echo "You can now run 'fabric -S' to configure, or 'fabric --listmodels' or 'fabric -L' to test."
|
||||
echo "Changes to .env will persist in $env_file"
|
||||
echo "Type 'exit' to return to the test runner."
|
||||
echo
|
||||
|
||||
docker run -it --rm \
|
||||
-e HOME=/home/testuser \
|
||||
-e USER=testuser \
|
||||
-v "$(pwd)/$env_file:/home/testuser/.config/fabric/.env" \
|
||||
--entrypoint=/bin/sh \
|
||||
"$IMAGE_NAME"
|
||||
}
|
||||
|
||||
interactive_mode() {
|
||||
echo "=== Interactive Mode ==="
|
||||
echo "Available test cases:"
|
||||
echo
|
||||
local i=1
|
||||
local cases=()
|
||||
for test_name in "${test_order[@]}"; do
|
||||
echo "$i) ${test_descriptions[$test_name]} ($test_name)"
|
||||
cases[i]="$test_name"
|
||||
((i++))
|
||||
done
|
||||
echo "$i) Run all tests"
|
||||
echo "0) Exit"
|
||||
echo
|
||||
echo "Add '!' after number to shell into test environment (e.g., '1!' to shell into no-config)"
|
||||
echo
|
||||
|
||||
while true; do
|
||||
read -r -p "Select test case (0-$i) [or 1!, etc. to shell into test environment]: " choice
|
||||
|
||||
# Check for shell mode (! suffix)
|
||||
local shell_mode=false
|
||||
if [[ "$choice" == *"!" ]]; then
|
||||
shell_mode=true
|
||||
choice="${choice%!}" # Remove the ! suffix
|
||||
fi
|
||||
|
||||
if [[ "$choice" == "0" ]]; then
|
||||
if [[ "$shell_mode" == true ]]; then
|
||||
echo "Cannot shell into exit option."
|
||||
continue
|
||||
fi
|
||||
echo "Exiting..."
|
||||
exit 0
|
||||
elif [[ "$choice" == "$i" ]]; then
|
||||
if [[ "$shell_mode" == true ]]; then
|
||||
echo "Cannot shell into 'run all tests' option."
|
||||
continue
|
||||
fi
|
||||
echo "Running all tests..."
|
||||
run_all_tests
|
||||
break
|
||||
elif [[ "$choice" -ge 1 && "$choice" -lt "$i" ]]; then
|
||||
local selected_test="${cases[$choice]}"
|
||||
if [[ "$shell_mode" == true ]]; then
|
||||
echo "Shelling into: ${test_descriptions[$selected_test]}"
|
||||
shell_into_env "$selected_test"
|
||||
else
|
||||
echo "Running: ${test_descriptions[$selected_test]}"
|
||||
run_test "$selected_test"
|
||||
fi
|
||||
|
||||
read -r -p "Continue testing? (y/n): " again
|
||||
if [[ "$again" != "y" && "$again" != "Y" ]]; then
|
||||
break
|
||||
fi
|
||||
echo
|
||||
else
|
||||
echo "Invalid choice. Please select 0-$i (optionally with '!' for shell mode)."
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
run_all_tests() {
|
||||
echo "=== Testing PR #1645: Conditional API initialization ==="
|
||||
echo
|
||||
|
||||
for test_name in "${test_order[@]}"; do
|
||||
run_test "$test_name"
|
||||
done
|
||||
|
||||
echo "=== Test run complete ==="
|
||||
echo "Review the output above to check:"
|
||||
echo "1. No Ollama connection errors when OLLAMA_URL not set"
|
||||
echo "2. No Bedrock authentication errors when BEDROCK_AWS_REGION not set"
|
||||
echo "3. Only configured services appear in model listings"
|
||||
}
|
||||
|
||||
show_help() {
|
||||
echo "Usage: $0 [OPTIONS] [TEST_CASE]"
|
||||
echo
|
||||
echo "Test PR #1645 conditional API initialization"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -h, --help Show this help message"
|
||||
echo " -i, --interactive Run in interactive mode"
|
||||
echo " -b, --build-only Build image only, don't run tests"
|
||||
echo " -s, --shell TEST Shell into test environment"
|
||||
echo
|
||||
echo "Test cases:"
|
||||
for test_name in "${test_order[@]}"; do
|
||||
echo " $test_name: ${test_descriptions[$test_name]}"
|
||||
done
|
||||
echo
|
||||
echo "Examples:"
|
||||
echo " $0 # Run all tests"
|
||||
echo " $0 -i # Interactive mode"
|
||||
echo " $0 gemini-only # Run specific test"
|
||||
echo " $0 -s gemini-only # Shell into gemini-only environment"
|
||||
echo " $0 -b # Build image only"
|
||||
echo
|
||||
echo "Environment files are located in $ENV_DIR/ and can be edited directly."
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
if [[ $# -eq 0 ]]; then
|
||||
build_image
|
||||
run_all_tests
|
||||
elif [[ "$1" == "-h" || "$1" == "--help" ]]; then
|
||||
show_help
|
||||
elif [[ "$1" == "-i" || "$1" == "--interactive" ]]; then
|
||||
build_image
|
||||
interactive_mode
|
||||
elif [[ "$1" == "-b" || "$1" == "--build-only" ]]; then
|
||||
build_image
|
||||
elif [[ "$1" == "-s" || "$1" == "--shell" ]]; then
|
||||
if [[ -z "$2" ]]; then
|
||||
echo "Error: -s/--shell requires a test case name"
|
||||
echo "Use -h for help."
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${test_descriptions[$2]}" ]]; then
|
||||
echo "Error: Unknown test case: $2"
|
||||
echo "Use -h for help."
|
||||
exit 1
|
||||
fi
|
||||
build_image
|
||||
shell_into_env "$2"
|
||||
elif [[ -n "${test_descriptions[$1]}" ]]; then
|
||||
build_image
|
||||
run_test "$1"
|
||||
else
|
||||
echo "Unknown test case or option: $1"
|
||||
echo "Use -h for help."
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,41 +1,28 @@
|
||||
# Use official golang image as builder
|
||||
FROM golang:1.24.2-alpine AS builder
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
FROM golang:1.24-alpine AS builder
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache git
|
||||
|
||||
# Copy go mod and sum files
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
# Download dependencies
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the application
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -o fabric ./cmd/fabric
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /fabric ./cmd/fabric
|
||||
|
||||
# Use scratch as final base image
|
||||
FROM alpine:latest
|
||||
|
||||
# Copy the binary from builder
|
||||
COPY --from=builder /app/fabric /fabric
|
||||
LABEL org.opencontainers.image.description="A Docker image for running the Fabric CLI. See https://github.com/danielmiessler/Fabric/tree/main/scripts/docker for details."
|
||||
|
||||
# Copy patterns directory
|
||||
COPY patterns /patterns
|
||||
RUN apk add --no-cache ca-certificates \
|
||||
&& mkdir -p /root/.config/fabric
|
||||
|
||||
# Ensure clean config directory and copy ENV file
|
||||
RUN rm -rf /root/.config/fabric && \
|
||||
mkdir -p /root/.config/fabric
|
||||
COPY ENV /root/.config/fabric/.env
|
||||
COPY --from=builder /fabric /usr/local/bin/fabric
|
||||
|
||||
# Add debug commands
|
||||
RUN ls -la /root/.config/fabric/
|
||||
|
||||
# Expose port 8080
|
||||
EXPOSE 8080
|
||||
|
||||
# Run the binary with debug output
|
||||
ENTRYPOINT ["/fabric"]
|
||||
CMD ["--serve"]
|
||||
ENTRYPOINT ["fabric"]
|
||||
|
||||
@@ -1,40 +1,60 @@
|
||||
# Docker Deployment
|
||||
# Fabric Docker Image
|
||||
|
||||
This directory contains Docker configuration files for running Fabric in containers.
|
||||
This directory provides a simple Docker setup for running the [Fabric](https://github.com/danielmiessler/fabric) CLI.
|
||||
|
||||
## Files
|
||||
## Build
|
||||
|
||||
- `Dockerfile` - Main Docker build configuration
|
||||
- `docker-compose.yml` - Docker Compose stack configuration
|
||||
- `start-docker.sh` - Helper script to start the stack
|
||||
- `README.md` - This documentation
|
||||
|
||||
## Quick Start
|
||||
Build the image from the repository root:
|
||||
|
||||
```bash
|
||||
# Start the Docker stack
|
||||
./start-docker.sh
|
||||
|
||||
# Or manually with docker-compose
|
||||
docker-compose up -d
|
||||
|
||||
# View logs
|
||||
docker-compose logs -f
|
||||
|
||||
# Stop the stack
|
||||
docker-compose down
|
||||
docker build -t fabric -f scripts/docker/Dockerfile .
|
||||
```
|
||||
|
||||
## Building
|
||||
## Persisting configuration
|
||||
|
||||
Fabric stores its configuration in `~/.config/fabric/.env`. Mount this path to keep your settings on the host.
|
||||
|
||||
### Using a host directory
|
||||
|
||||
```bash
|
||||
# Build the Docker image
|
||||
docker build -t fabric .
|
||||
|
||||
# Or use docker-compose
|
||||
docker-compose build
|
||||
mkdir -p $HOME/.fabric-config
|
||||
# Run setup to create the .env and download patterns
|
||||
docker run --rm -it -v $HOME/.fabric-config:/root/.config/fabric fabric --setup
|
||||
```
|
||||
|
||||
## Configuration
|
||||
Subsequent runs can reuse the same directory:
|
||||
|
||||
Make sure to configure your environment variables and API keys before running the Docker stack. See the main README.md for setup instructions.
|
||||
```bash
|
||||
docker run --rm -it -v $HOME/.fabric-config:/root/.config/fabric fabric -p your-pattern
|
||||
```
|
||||
|
||||
### Mounting a single .env file
|
||||
|
||||
If you only want to persist the `.env` file:
|
||||
|
||||
```bash
|
||||
# assuming .env exists in the current directory
|
||||
docker run --rm -it -v $PWD/.env:/root/.config/fabric/.env fabric -p your-pattern
|
||||
```
|
||||
|
||||
## Running the server
|
||||
|
||||
Expose port 8080 to use Fabric's REST API:
|
||||
|
||||
```bash
|
||||
docker run --rm -it -p 8080:8080 -v $HOME/.fabric-config:/root/.config/fabric fabric --serve
|
||||
```
|
||||
|
||||
The API will be available at `http://localhost:8080`.
|
||||
|
||||
## Multi-arch builds and GHCR packages
|
||||
|
||||
For multi-arch Docker builds (such as those used for GitHub Container Registry packages), the description should be set via annotations in the manifest instead of the Dockerfile LABEL. When building multi-arch images, ensure the build configuration includes:
|
||||
|
||||
```json
|
||||
"annotations": {
|
||||
"org.opencontainers.image.description": "A Docker image for running the Fabric CLI. See https://github.com/danielmiessler/Fabric/tree/main/scripts/docker for details."
|
||||
}
|
||||
```
|
||||
|
||||
This ensures that GHCR packages display the proper description.
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
fabric-api:
|
||||
build: .
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ./ENV:/root/.config/fabric/.env:ro
|
||||
environment:
|
||||
- GIN_MODE=release
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Helper script to start the Fabric Docker stack
|
||||
|
||||
echo "Starting Fabric Docker stack..."
|
||||
cd "$(dirname "$0")"
|
||||
docker-compose up -d
|
||||
|
||||
echo "Fabric is now running!"
|
||||
echo "Check logs with: docker-compose logs -f"
|
||||
echo "Stop with: docker-compose down"
|
||||
114
scripts/installer/README.md
Normal file
114
scripts/installer/README.md
Normal file
@@ -0,0 +1,114 @@
|
||||
# Fabric One-Line Installer
|
||||
|
||||
This directory contains the official one-line installer scripts for Fabric.
|
||||
|
||||
## Quick Start
|
||||
|
||||
### Unix/Linux/macOS
|
||||
|
||||
Install Fabric with a single command:
|
||||
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/danielmiessler/fabric/main/scripts/installer/install.sh | bash
|
||||
```
|
||||
|
||||
### Windows (PowerShell)
|
||||
|
||||
Install Fabric with a single PowerShell command:
|
||||
|
||||
```powershell
|
||||
iwr -useb https://raw.githubusercontent.com/danielmiessler/fabric/main/scripts/installer/install.ps1 | iex
|
||||
```
|
||||
|
||||
## Custom Installation Directory
|
||||
|
||||
### Unix/Linux/macOS
|
||||
|
||||
By default, Fabric is installed to `~/.local/bin`. To install elsewhere:
|
||||
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/danielmiessler/fabric/main/scripts/installer/install.sh | INSTALL_DIR=/usr/local/bin bash
|
||||
```
|
||||
|
||||
For system-wide installation (requires sudo):
|
||||
|
||||
```bash
|
||||
curl -fsSL https://raw.githubusercontent.com/danielmiessler/fabric/main/scripts/installer/install.sh | sudo INSTALL_DIR=/usr/local/bin bash
|
||||
```
|
||||
|
||||
### Windows (PowerShell)
|
||||
|
||||
By default, Fabric is installed to `%USERPROFILE%\.local\bin`. To install elsewhere:
|
||||
|
||||
```powershell
|
||||
$env:INSTALL_DIR="C:\tools"; iwr -useb https://raw.githubusercontent.com/danielmiessler/fabric/main/scripts/installer/install.ps1 | iex
|
||||
```
|
||||
|
||||
## Supported Systems
|
||||
|
||||
- **Operating Systems**: Darwin (macOS), Linux, Windows
|
||||
- **Architectures**: x86_64, arm64, i386 (Windows only)
|
||||
|
||||
## What It Does
|
||||
|
||||
1. **Detects** your OS and architecture automatically
|
||||
2. **Downloads** the latest Fabric release from GitHub
|
||||
3. **Extracts** only the `fabric` binary (not the full archive)
|
||||
4. **Installs** to your chosen directory (default: `~/.local/bin`)
|
||||
5. **Verifies** the installation works correctly
|
||||
6. **Provides** PATH setup instructions if needed
|
||||
|
||||
## Features
|
||||
|
||||
- ✅ **Cross-platform** - Unix/Linux/macOS (bash) and Windows (PowerShell)
|
||||
- ✅ **Zero dependencies** - No additional tools required
|
||||
- ✅ **Automatic detection** - OS and architecture
|
||||
- ✅ **Smart extraction** - Only the binary, not extra files
|
||||
- ✅ **Error handling** - Clear messages and graceful failures
|
||||
- ✅ **PATH guidance** - Helps you set up your environment
|
||||
- ✅ **Verification** - Tests the installation before completing
|
||||
|
||||
## Requirements
|
||||
|
||||
### Unix/Linux/macOS
|
||||
|
||||
- `curl` or `wget` for downloading
|
||||
- `tar` for extraction (standard on all Unix systems)
|
||||
- Write permissions to the installation directory
|
||||
|
||||
### Windows
|
||||
|
||||
- PowerShell (built into Windows)
|
||||
- Write permissions to the installation directory
|
||||
|
||||
## After Installation
|
||||
|
||||
1. **Configure Fabric**: Run `fabric --setup`
|
||||
2. **Add API keys**: Follow the setup prompts
|
||||
3. **Start using**: Try `fabric --help` or `fabric --listpatterns`
|
||||
|
||||
## Troubleshooting
|
||||
|
||||
**Permission denied?**
|
||||
|
||||
- Try with `sudo` for system directories
|
||||
- Or choose a directory you can write to: `INSTALL_DIR=~/bin`
|
||||
|
||||
**Binary not found after install?**
|
||||
|
||||
- Add the install directory to your PATH
|
||||
- The installer provides specific instructions for your shell
|
||||
|
||||
**Download fails?**
|
||||
|
||||
- Check your internet connection
|
||||
- Verify GitHub is accessible from your network
|
||||
|
||||
## Alternative Installation Methods
|
||||
|
||||
If the one-liner doesn't work for you, see the main [Installation Guide](../../README.md#installation) for:
|
||||
|
||||
- Binary downloads
|
||||
- Package managers (Homebrew, winget, AUR)
|
||||
- Docker images
|
||||
- Building from source
|
||||
253
scripts/installer/install.ps1
Normal file
253
scripts/installer/install.ps1
Normal file
@@ -0,0 +1,253 @@
|
||||
# Fabric Windows Installer Script
|
||||
# Usage: iwr -useb https://raw.githubusercontent.com/danielmiessler/fabric/main/scripts/installer/install.ps1 | iex
|
||||
# Usage with custom directory: $env:INSTALL_DIR="C:\tools"; iwr -useb https://raw.githubusercontent.com/danielmiessler/fabric/main/scripts/installer/install.ps1 | iex
|
||||
|
||||
param(
|
||||
[string]$InstallDir = $env:INSTALL_DIR
|
||||
)
|
||||
|
||||
$ErrorActionPreference = "Stop"
|
||||
|
||||
# Colors for output (Windows Console colors)
|
||||
$Colors = @{
|
||||
Red = "Red"
|
||||
Green = "Green"
|
||||
Yellow = "Yellow"
|
||||
Blue = "Cyan"
|
||||
White = "White"
|
||||
}
|
||||
|
||||
# Print functions
|
||||
function Write-Info {
|
||||
param([string]$Message)
|
||||
Write-Host "[INFO] $Message" -ForegroundColor $Colors.Blue
|
||||
}
|
||||
|
||||
function Write-Success {
|
||||
param([string]$Message)
|
||||
Write-Host "[SUCCESS] $Message" -ForegroundColor $Colors.Green
|
||||
}
|
||||
|
||||
function Write-Warning {
|
||||
param([string]$Message)
|
||||
Write-Host "[WARNING] $Message" -ForegroundColor $Colors.Yellow
|
||||
}
|
||||
|
||||
function Write-Error {
|
||||
param([string]$Message)
|
||||
Write-Host "[ERROR] $Message" -ForegroundColor $Colors.Red
|
||||
}
|
||||
|
||||
# Detect Windows architecture
|
||||
function Get-Architecture {
|
||||
$arch = $env:PROCESSOR_ARCHITECTURE
|
||||
$archAMD64 = $env:PROCESSOR_ARCHITEW6432
|
||||
|
||||
# Check for ARM64
|
||||
if ($arch -eq "ARM64") {
|
||||
return "arm64"
|
||||
}
|
||||
|
||||
# Check for x86_64/AMD64
|
||||
if ($arch -eq "AMD64" -or $archAMD64 -eq "AMD64") {
|
||||
return "x86_64"
|
||||
}
|
||||
|
||||
# Check for x86 (32-bit)
|
||||
if ($arch -eq "X86") {
|
||||
return "i386"
|
||||
}
|
||||
|
||||
Write-Error "Unsupported architecture: $arch"
|
||||
Write-Error "This installer supports x86_64, i386, and arm64"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Test if running with appropriate permissions for directory
|
||||
function Test-WritePermission {
|
||||
param([string]$Path)
|
||||
|
||||
try {
|
||||
if (!(Test-Path $Path)) {
|
||||
New-Item -Path $Path -ItemType Directory -Force | Out-Null
|
||||
}
|
||||
|
||||
$testFile = Join-Path $Path "fabric_write_test.tmp"
|
||||
"test" | Out-File -FilePath $testFile -Force
|
||||
Remove-Item $testFile -Force
|
||||
return $true
|
||||
}
|
||||
catch {
|
||||
return $false
|
||||
}
|
||||
}
|
||||
|
||||
# Download and install Fabric
|
||||
function Install-Fabric {
|
||||
param(
|
||||
[string]$Architecture,
|
||||
[string]$InstallDirectory
|
||||
)
|
||||
|
||||
# Construct download URL
|
||||
$filename = "fabric_Windows_$Architecture.zip"
|
||||
$downloadUrl = "https://github.com/danielmiessler/fabric/releases/latest/download/$filename"
|
||||
|
||||
Write-Info "Downloading Fabric for Windows $Architecture..."
|
||||
Write-Info "URL: $downloadUrl"
|
||||
|
||||
# Create temporary directory
|
||||
$tempDir = Join-Path $env:TEMP "fabric_install_$(Get-Random)"
|
||||
New-Item -Path $tempDir -ItemType Directory -Force | Out-Null
|
||||
$tempFile = Join-Path $tempDir "fabric.zip"
|
||||
|
||||
try {
|
||||
# Download the archive
|
||||
Write-Info "Downloading archive..."
|
||||
Invoke-WebRequest -Uri $downloadUrl -OutFile $tempFile -UseBasicParsing
|
||||
|
||||
Write-Info "Extracting Fabric binary..."
|
||||
|
||||
# Extract the zip file
|
||||
Add-Type -AssemblyName System.IO.Compression.FileSystem
|
||||
$zip = [System.IO.Compression.ZipFile]::OpenRead($tempFile)
|
||||
|
||||
# Find and extract only fabric.exe
|
||||
$fabricEntry = $zip.Entries | Where-Object { $_.Name -eq "fabric.exe" }
|
||||
if (!$fabricEntry) {
|
||||
Write-Error "fabric.exe not found in the downloaded archive"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Create install directory if it doesn't exist
|
||||
if (!(Test-Path $InstallDirectory)) {
|
||||
Write-Info "Creating install directory: $InstallDirectory"
|
||||
New-Item -Path $InstallDirectory -ItemType Directory -Force | Out-Null
|
||||
}
|
||||
|
||||
# Extract fabric.exe to install directory
|
||||
$fabricPath = Join-Path $InstallDirectory "fabric.exe"
|
||||
Write-Info "Installing Fabric to $fabricPath..."
|
||||
|
||||
[System.IO.Compression.ZipFileExtensions]::ExtractToFile($fabricEntry, $fabricPath, $true)
|
||||
$zip.Dispose()
|
||||
|
||||
Write-Success "Fabric installed successfully to $fabricPath"
|
||||
return $fabricPath
|
||||
}
|
||||
catch {
|
||||
Write-Error "Failed to download or extract Fabric: $($_.Exception.Message)"
|
||||
exit 1
|
||||
}
|
||||
finally {
|
||||
# Clean up
|
||||
if (Test-Path $tempDir) {
|
||||
Remove-Item $tempDir -Recurse -Force -ErrorAction SilentlyContinue
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
# Check if directory is in PATH
|
||||
function Test-InPath {
|
||||
param([string]$Directory)
|
||||
|
||||
$pathDirs = $env:PATH -split ';'
|
||||
return $pathDirs -contains $Directory
|
||||
}
|
||||
|
||||
# Provide PATH setup instructions
|
||||
function Show-PathInstructions {
|
||||
param([string]$InstallDir)
|
||||
|
||||
if (Test-InPath $InstallDir) {
|
||||
Write-Success "✅ $InstallDir is already in your PATH"
|
||||
}
|
||||
else {
|
||||
Write-Warning "⚠️ $InstallDir is not in your PATH"
|
||||
Write-Info "To use fabric from anywhere, you have a few options:"
|
||||
Write-Info ""
|
||||
Write-Info "Option 1 - Add to PATH for current user (recommended):"
|
||||
Write-Info " `$currentPath = [Environment]::GetEnvironmentVariable('PATH', 'User')"
|
||||
Write-Info " [Environment]::SetEnvironmentVariable('PATH', `"`$currentPath;$InstallDir`", 'User')"
|
||||
Write-Info ""
|
||||
Write-Info "Option 2 - Add to PATH for all users (requires admin):"
|
||||
Write-Info " `$currentPath = [Environment]::GetEnvironmentVariable('PATH', 'Machine')"
|
||||
Write-Info " [Environment]::SetEnvironmentVariable('PATH', `"`$currentPath;$InstallDir`", 'Machine')"
|
||||
Write-Info ""
|
||||
Write-Info "Option 3 - Add to current session only:"
|
||||
Write-Info " `$env:PATH += `";$InstallDir`""
|
||||
Write-Info ""
|
||||
Write-Info "After updating PATH, restart your terminal or run: refreshenv"
|
||||
}
|
||||
}
|
||||
|
||||
# Verify installation
|
||||
function Test-Installation {
|
||||
param([string]$FabricPath)
|
||||
|
||||
if (Test-Path $FabricPath) {
|
||||
Write-Info "Verifying installation..."
|
||||
try {
|
||||
$version = & $FabricPath --version 2>$null
|
||||
if ($LASTEXITCODE -eq 0) {
|
||||
Write-Success "Fabric $version is working correctly!"
|
||||
}
|
||||
else {
|
||||
Write-Warning "Fabric binary exists but --version failed"
|
||||
}
|
||||
}
|
||||
catch {
|
||||
Write-Warning "Fabric binary exists but could not run --version"
|
||||
}
|
||||
}
|
||||
else {
|
||||
Write-Error "Fabric binary not found at $FabricPath"
|
||||
exit 1
|
||||
}
|
||||
}
|
||||
|
||||
# Main installation function
|
||||
function Main {
|
||||
Write-Info "🚀 Starting Fabric installation..."
|
||||
|
||||
# Detect architecture
|
||||
$arch = Get-Architecture
|
||||
Write-Info "Detected architecture: $arch"
|
||||
|
||||
# Determine install directory
|
||||
if (!$InstallDir) {
|
||||
$InstallDir = Join-Path $env:USERPROFILE ".local\bin"
|
||||
}
|
||||
|
||||
Write-Info "Install directory: $InstallDir"
|
||||
|
||||
# Check permissions
|
||||
if (!(Test-WritePermission $InstallDir)) {
|
||||
Write-Error "Cannot write to $InstallDir"
|
||||
Write-Error "Try running as Administrator or choose a different directory"
|
||||
Write-Info "Example with custom directory: `$env:INSTALL_DIR=`"C:\tools`"; iwr -useb ... | iex"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Install Fabric
|
||||
$fabricPath = Install-Fabric -Architecture $arch -InstallDirectory $InstallDir
|
||||
|
||||
# Verify installation
|
||||
Test-Installation -FabricPath $fabricPath
|
||||
|
||||
# Check PATH and provide instructions
|
||||
Show-PathInstructions -InstallDir $InstallDir
|
||||
|
||||
Write-Info ""
|
||||
Write-Success "🎉 Installation complete!"
|
||||
Write-Info ""
|
||||
Write-Info "Next steps:"
|
||||
Write-Info " 1. Run 'fabric --setup' to configure Fabric"
|
||||
Write-Info " 2. Add your API keys and preferences"
|
||||
Write-Info " 3. Start using Fabric with 'fabric --help'"
|
||||
Write-Info ""
|
||||
Write-Info "Documentation: https://github.com/danielmiessler/fabric"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
Main
|
||||
219
scripts/installer/install.sh
Executable file
219
scripts/installer/install.sh
Executable file
@@ -0,0 +1,219 @@
|
||||
#!/bin/bash
|
||||
# Fabric Installer Script
|
||||
# Usage: curl -fsSL https://raw.githubusercontent.com/danielmiessler/fabric/main/scripts/installer/install.sh | bash
|
||||
# Usage with custom directory: curl -fsSL https://raw.githubusercontent.com/danielmiessler/fabric/main/scripts/installer/install.sh | INSTALL_DIR=/usr/local/bin bash
|
||||
|
||||
set -e
|
||||
|
||||
# Colors for output
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[1;33m'
|
||||
BLUE='\033[0;34m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# Print functions
|
||||
print_info() {
|
||||
printf "${BLUE}[INFO]${NC} %s\n" "$1"
|
||||
}
|
||||
|
||||
print_success() {
|
||||
printf "${GREEN}[SUCCESS]${NC} %s\n" "$1"
|
||||
}
|
||||
|
||||
print_warning() {
|
||||
printf "${YELLOW}[WARNING]${NC} %s\n" "$1"
|
||||
}
|
||||
|
||||
print_error() {
|
||||
printf "${RED}[ERROR]${NC} %s\n" "$1" >&2
|
||||
}
|
||||
|
||||
# Detect OS
|
||||
detect_os() {
|
||||
case "$(uname -s)" in
|
||||
Darwin*)
|
||||
echo "Darwin"
|
||||
;;
|
||||
Linux*)
|
||||
echo "Linux"
|
||||
;;
|
||||
*)
|
||||
print_error "Unsupported operating system: $(uname -s)"
|
||||
print_error "This installer only supports Darwin (macOS) and Linux"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Detect architecture
|
||||
detect_arch() {
|
||||
case "$(uname -m)" in
|
||||
x86_64|amd64)
|
||||
echo "x86_64"
|
||||
;;
|
||||
arm64|aarch64)
|
||||
echo "arm64"
|
||||
;;
|
||||
*)
|
||||
print_error "Unsupported architecture: $(uname -m)"
|
||||
print_error "This installer only supports x86_64 and arm64"
|
||||
exit 1
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
# Check if command exists
|
||||
command_exists() {
|
||||
command -v "$1" >/dev/null 2>&1
|
||||
}
|
||||
|
||||
# Download and extract fabric
|
||||
install_fabric() {
|
||||
local os="$1"
|
||||
local arch="$2"
|
||||
local install_dir="$3"
|
||||
|
||||
# Construct download URL
|
||||
local filename="fabric_${os}_${arch}.tar.gz"
|
||||
local download_url="https://github.com/danielmiessler/fabric/releases/latest/download/${filename}"
|
||||
|
||||
print_info "Downloading Fabric for ${os} ${arch}..."
|
||||
print_info "URL: ${download_url}"
|
||||
|
||||
# Create temporary directory
|
||||
local temp_dir
|
||||
temp_dir=$(mktemp -d)
|
||||
local temp_file="${temp_dir}/fabric.tar.gz"
|
||||
|
||||
# Download the archive
|
||||
if command_exists curl; then
|
||||
if ! curl -fsSL "${download_url}" -o "${temp_file}"; then
|
||||
print_error "Failed to download Fabric"
|
||||
rm -rf "${temp_dir}"
|
||||
exit 1
|
||||
fi
|
||||
elif command_exists wget; then
|
||||
if ! wget -q "${download_url}" -O "${temp_file}"; then
|
||||
print_error "Failed to download Fabric"
|
||||
rm -rf "${temp_dir}"
|
||||
exit 1
|
||||
fi
|
||||
else
|
||||
print_error "Neither curl nor wget found. Please install one of them and try again."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
print_info "Extracting Fabric binary..."
|
||||
|
||||
# Extract only the fabric binary from the archive
|
||||
if ! tar -xzf "${temp_file}" -C "${temp_dir}" fabric; then
|
||||
print_error "Failed to extract Fabric binary"
|
||||
rm -rf "${temp_dir}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Create install directory if it doesn't exist
|
||||
if [ ! -d "${install_dir}" ]; then
|
||||
print_info "Creating install directory: ${install_dir}"
|
||||
if ! mkdir -p "${install_dir}"; then
|
||||
print_error "Failed to create install directory: ${install_dir}"
|
||||
print_error "You may need to run with sudo or choose a different directory"
|
||||
rm -rf "${temp_dir}"
|
||||
exit 1
|
||||
fi
|
||||
fi
|
||||
|
||||
# Move binary to install directory
|
||||
print_info "Installing Fabric to ${install_dir}/fabric..."
|
||||
if ! mv "${temp_dir}/fabric" "${install_dir}/fabric"; then
|
||||
print_error "Failed to install Fabric to ${install_dir}"
|
||||
print_error "You may need to run with sudo or choose a different directory"
|
||||
rm -rf "${temp_dir}"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# Make sure it's executable
|
||||
chmod +x "${install_dir}/fabric"
|
||||
|
||||
# Clean up
|
||||
rm -rf "${temp_dir}"
|
||||
|
||||
print_success "Fabric installed successfully to ${install_dir}/fabric"
|
||||
}
|
||||
|
||||
# Check PATH and provide instructions
|
||||
check_path() {
|
||||
local install_dir="$1"
|
||||
|
||||
if echo "$PATH" | grep -q "${install_dir}"; then
|
||||
print_success "✅ ${install_dir} is already in your PATH"
|
||||
else
|
||||
print_warning "⚠️ ${install_dir} is not in your PATH"
|
||||
print_info "To use fabric from anywhere, add the following to your shell profile:"
|
||||
print_info " export PATH=\"\$PATH:${install_dir}\""
|
||||
print_info ""
|
||||
print_info "For bash, add it to ~/.bashrc or ~/.bash_profile"
|
||||
print_info "For zsh, add it to ~/.zshrc"
|
||||
print_info "For fish, run: fish_add_path ${install_dir}"
|
||||
fi
|
||||
}
|
||||
|
||||
# Verify installation
|
||||
verify_installation() {
|
||||
local install_dir="$1"
|
||||
local fabric_path="${install_dir}/fabric"
|
||||
|
||||
if [ -x "${fabric_path}" ]; then
|
||||
print_info "Verifying installation..."
|
||||
local version
|
||||
if version=$("${fabric_path}" --version 2>/dev/null); then
|
||||
print_success "Fabric ${version} is working correctly!"
|
||||
else
|
||||
print_warning "Fabric binary exists but --version failed"
|
||||
fi
|
||||
else
|
||||
print_error "Fabric binary not found at ${fabric_path}"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Main installation function
|
||||
main() {
|
||||
print_info "🚀 Starting Fabric installation..."
|
||||
|
||||
# Detect system
|
||||
local os
|
||||
local arch
|
||||
os=$(detect_os)
|
||||
arch=$(detect_arch)
|
||||
|
||||
print_info "Detected system: ${os} ${arch}"
|
||||
|
||||
# Determine install directory
|
||||
local install_dir="${INSTALL_DIR:-${HOME}/.local/bin}"
|
||||
|
||||
print_info "Install directory: ${install_dir}"
|
||||
|
||||
# Install fabric
|
||||
install_fabric "${os}" "${arch}" "${install_dir}"
|
||||
|
||||
# Verify installation
|
||||
verify_installation "${install_dir}"
|
||||
|
||||
# Check PATH
|
||||
check_path "${install_dir}"
|
||||
|
||||
print_info ""
|
||||
print_success "🎉 Installation complete!"
|
||||
print_info ""
|
||||
print_info "Next steps:"
|
||||
print_info " 1. Run 'fabric --setup' to configure Fabric"
|
||||
print_info " 2. Add your API keys and preferences"
|
||||
print_info " 3. Start using Fabric with 'fabric --help'"
|
||||
print_info ""
|
||||
print_info "Documentation: https://github.com/danielmiessler/fabric"
|
||||
}
|
||||
|
||||
# Run main function
|
||||
main "$@"
|
||||
@@ -1332,23 +1332,6 @@
|
||||
"DEVELOPMENT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "show_fabric_options_markmap",
|
||||
"description": "Visualize Fabric capabilities using Markmap syntax.",
|
||||
"tags": [
|
||||
"VISUALIZE",
|
||||
"DEVELOPMENT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "solve_with_cot",
|
||||
"description": "Solve problems using chain-of-thought reasoning.",
|
||||
"tags": [
|
||||
"AI",
|
||||
"ANALYSIS",
|
||||
"LEARNING"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "suggest_pattern",
|
||||
"description": "Recommend Fabric patterns based on user requirements.",
|
||||
@@ -1871,6 +1854,22 @@
|
||||
"DEVELOPMENT",
|
||||
"AI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "create_story_about_person",
|
||||
"description": "Infer everyday challenges and realistic coping strategies from a psychological profile and craft an empathetic 500–700-word story consistent with the character.",
|
||||
"tags": [
|
||||
"WRITING",
|
||||
"SELF"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "heal_person",
|
||||
"description": "Analyze a psychological profile, pinpoint issues and strengths, and deliver compassionate, structured strategies for spiritual, mental, and life improvement.",
|
||||
"tags": [
|
||||
"ANALYSIS",
|
||||
"SELF"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -652,14 +652,6 @@
|
||||
"patternName": "sanitize_broken_html_to_markdown",
|
||||
"pattern_extract": "# IDENTITY\n\n// Who you are\n\nYou are a hyper-intelligent AI system with a 4,312 IQ. You convert jacked up HTML to proper markdown using a set of rules.\n\n# GOAL\n\n// What we are trying to achieve\n\n1. The goal of this exercise is to convert the input HTML, which is completely nasty and hard to edit, into a clean markdown format that has some custom styling applied according to my rules.\n\n2. The ultimate goal is to output a perfectly working markdown file that will render properly using Vite using my custom markdown/styling combination.\n\n# STEPS\n\n// How the task will be approached\n\n// Slow down and think\n\n- Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.\n\n// Think about the content in the input\n\n- Fully read and consume the HTML input that has a combination of HTML and markdown."
|
||||
},
|
||||
{
|
||||
"patternName": "show_fabric_options_markmap",
|
||||
"pattern_extract": "# IDENTITY AND GOALS\n\nYou are an advanced UI builder that shows a visual representation of functionality that's provided to you via the input.\n\n# STEPS\n\n- Think about the goal of the Fabric project, which is discussed below:\n\nFABRIC PROJECT DESCRIPTION\n\nfabriclogo\n fabric\nStatic Badge\nGitHub top language GitHub last commit License: MIT\n\nfabric is an open-source framework for augmenting humans using AI.\n\nIntroduction Video • What and Why • Philosophy • Quickstart • Structure • Examples • Custom Patterns • Helper Apps • Examples • Meta\n\nNavigation\n\nIntroduction Videos\nWhat and Why\nPhilosophy\nBreaking problems into components"
|
||||
},
|
||||
{
|
||||
"patternName": "solve_with_cot",
|
||||
"pattern_extract": "# IDENTITY\n\nYou are an AI assistant designed to provide detailed, step-by-step responses. Your outputs should follow this structure:\n\n# STEPS\n\n1. Begin with a <thinking> section.\n\n2. Inside the thinking section:\n\n- a. Briefly analyze the question and outline your approach.\n\n- b. Present a clear plan of steps to solve the problem.\n\n- c. Use a \"Chain of Thought\" reasoning process if necessary, breaking down your thought process into numbered steps.\n\n3. Include a <reflection> section for each idea where you:\n\n- a. Review your reasoning.\n\n- b. Check for potential errors or oversights.\n\n- c. Confirm or adjust your conclusion if necessary.\n - Be sure to close all reflection sections.\n - Close the thinking section with </thinking>."
|
||||
},
|
||||
{
|
||||
"patternName": "suggest_pattern",
|
||||
"pattern_extract": "# IDENTITY and PURPOSE\nYou are an AI assistant tasked with creating a new feature for a fabric command-line tool. Your primary responsibility is to develop a pattern that suggests appropriate fabric patterns or commands based on user input. You are knowledgeable about fabric commands and understand the need to expand the tool's functionality. Your role involves analyzing user requests, determining the most suitable fabric commands or patterns, and providing helpful suggestions to users.\n\nTake a step back and think step-by-step about how to achieve the best possible results by following the steps below.\n\n# STEPS\n- Analyze the user's input to understand their specific needs and context\n- Determine the appropriate fabric pattern or command based on the user's request\n- Generate a response that suggests the relevant fabric command(s) or pattern(s)\n- Provide explanations or multiple options when applicable\n- If no specific command is found, suggest using `create_pattern`\n\n# OUTPUT INSTRUCTIONS\n- Only output Markdown\n- Provide suggestions for fabric commands or patterns based on the user's input\n- Include explanations or multiple options when appropriate\n- If suggesting `create_pattern`, include instructions for saving and using the new pattern\n- Format the output to be clear and easy to understand for users new to fabric\n- Ensure the response aligns with the goal of making fabric more accessible and user-friendly\n- Ensure you follow ALL these instructions when creating your output\n\n# INPUT\nINPUT:"
|
||||
@@ -907,6 +899,14 @@
|
||||
{
|
||||
"patternName": "generate_code_rules",
|
||||
"pattern_extract": "# IDENTITY AND PURPOSE You are a senior developer and expert prompt engineer. Think ultra hard to distill the following transcription or tutorial in as little set of unique rules as possible intended for best practices guidance in AI assisted coding tools, each rule has to be in one sentence as a direct instruction, avoid explanations and cosmetic language. Output in Markdown, I prefer bullet dash (-). --- # TRANSCRIPT"
|
||||
},
|
||||
{
|
||||
"patternName": "create_story_about_person",
|
||||
"pattern_extract": "You are an expert creative writer specializing in character-driven narratives, and a keen observer of human psychology. Your task is to craft a compelling, realistic short story based on a psychological profile or personal data provided by the user. **Input:** The user will provide a psychological profile or descriptive data about a fictional or real person. This input will be clearly delimited by triple backticks (```). It may include personality traits, habits, fears, motivations, strengths, weaknesses, background information, or specific behavioral patterns. **Task Steps:** 1. **Analyze Profile:** Carefully read and internalize the provided psychological profile. Identify the core personality traits, typical reactions, strengths, and vulnerabilities of the individual. 2. **Brainstorm Challenges:** Based on the analysis from Step 1, generate 3-5 common, relatable, everyday problems or minor dilemmas that a person with this specific profile might genuinely encounter. These challenges should be varied and could span social, professional, personal, or emotional domains. 3. **Develop Strategies:** For each identified problem from Step 2, devise 1-2 specific, plausible methods or strategies that the character, consistent with their psychological profile, would naturally employ (or attempt to employ) to navigate, cope with, or solve these challenges. Consider both internal thought processes and external actions. 4. **Construct Narrative:** Weave these problems and the character's responses into a cohesive, engaging short story (approximately 500-700 words, 3-5 paragraphs). The story should have a clear narrative flow, introducing the character, presenting the challenges, and showing their journey through them. 5. **Maintain Consistency:** Throughout the story, ensure the character's actions, dialogue, internal monologue, and emotional reactions are consistently aligned with the psychological profile provided. The story should feel authentic to the character. **Output Requirements:** * **Format:** A continuous narrative short story. * **Tone:** Empathetic, realistic, and engaging. * **Content:** The story must clearly depict the character facing everyday problems and demonstrate their unique methods and strategies for navigating these challenges, directly reflecting the input profile. * **Length:** Approximately 500-700 words. * **Avoid:** Overly dramatic or fantastical scenarios unless the profile explicitly suggests such a context. Focus on the 'everyday common problems'. **Example of Input Format:** ``` [Psychological Profile/Data Here] ```"
|
||||
},
|
||||
{
|
||||
"patternName": "heal_person",
|
||||
"pattern_extract": "# IDENTITY and PURPOSE You are an AI assistant whose primary responsibility is to interpret and analyze psychological profiles and/or psychology data files provided as input. Your role is to carefully process this data and use your expertise to develop a tailored plan aimed at spiritual and mental healing, as well as overall life improvement for the subject. You must approach each case with sensitivity, applying psychological knowledge and holistic strategies to create actionable, personalized recommendations that address both mental and spiritual well-being. Your focus is on structured, compassionate, and practical guidance that can help the individual make meaningful improvements in their life. Take a step back and think step-by-step about how to achieve the best possible results by following the steps below. # STEPS - Carefully review the psychological-profile and/or psychology data file provided as input. - Analyze the data to identify key issues, strengths, and areas needing improvement related to the subject's mental and spiritual well-being. - Develop a comprehensive plan that includes specific strategies for spiritual healing, mental health improvement, and overall life enhancement. - Structure your output to clearly outline recommendations, resources, and actionable steps tailored to the individual's unique profile. # OUTPUT INSTRUCTIONS - Only output Markdown. - Ensure your output is organized, clear, and easy to follow, using headings, subheadings, and bullet points where appropriate. - Ensure you follow ALL these instructions when creating your output. # INPUT INPUT:# IDENTITY and PURPOSE You are an AI assistant whose primary responsibility is to interpret and analyze psychological profiles and/or psychology data files provided as input. Your role is to carefully process this data and use your expertise to develop a tailored plan aimed at spiritual and mental healing, as well as overall life improvement for the subject. You must approach each case with sensitivity, applying psychological knowledge and holistic strategies to create actionable, personalized recommendations that address both mental and spiritual well-being. Your focus is on structured, compassionate, and practical guidance that can help the individual make meaningful improvements in their life. Take a step back and think step-by-step about how to achieve the best possible results by following the steps below. # STEPS - Carefully review the psychological-profile and/or psychology data file provided as input. - Analyze the data to identify key issues, strengths, and areas needing improvement related to the subject's mental and spiritual well-being. - Develop a comprehensive plan that includes specific strategies for spiritual healing, mental health improvement, and overall life enhancement. - Structure your output to clearly outline recommendations, resources, and actionable steps tailored to the individual's unique profile. # OUTPUT INSTRUCTIONS - Only output Markdown. - Ensure your output is organized, clear, and easy to follow, using headings, subheadings, and bullet points where appropriate. - Ensure you follow ALL these instructions when creating your output. # INPUT INPUT:"
|
||||
}
|
||||
]
|
||||
}
|
||||
8
web/pnpm-lock.yaml
generated
8
web/pnpm-lock.yaml
generated
@@ -941,8 +941,8 @@ packages:
|
||||
resolution: {integrity: sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==}
|
||||
engines: {node: '>=8'}
|
||||
|
||||
devalue@5.1.1:
|
||||
resolution: {integrity: sha512-maua5KUiapvEwiEAe+XnlZ3Rh0GD+qI1J/nb9vrJc3muPXvcF/8gXYTWF76+5DAqHyDUtOIImEuo0YKE9mshVw==}
|
||||
devalue@5.3.2:
|
||||
resolution: {integrity: sha512-UDsjUbpQn9kvm68slnrs+mfxwFkIflOhkanmyabZ8zOYk8SMEIbJ3TK+88g70hSIeytu4y18f0z/hYHMTrXIWw==}
|
||||
|
||||
devlop@1.1.0:
|
||||
resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==}
|
||||
@@ -2704,7 +2704,7 @@ snapshots:
|
||||
'@types/cookie': 0.6.0
|
||||
acorn: 8.14.1
|
||||
cookie: 1.0.2
|
||||
devalue: 5.1.1
|
||||
devalue: 5.3.2
|
||||
esm-env: 1.2.2
|
||||
kleur: 4.1.5
|
||||
magic-string: 0.30.17
|
||||
@@ -3060,7 +3060,7 @@ snapshots:
|
||||
detect-libc@2.0.4:
|
||||
optional: true
|
||||
|
||||
devalue@5.1.1: {}
|
||||
devalue@5.3.2: {}
|
||||
|
||||
devlop@1.1.0:
|
||||
dependencies:
|
||||
|
||||
@@ -1332,23 +1332,6 @@
|
||||
"DEVELOPMENT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "show_fabric_options_markmap",
|
||||
"description": "Visualize Fabric capabilities using Markmap syntax.",
|
||||
"tags": [
|
||||
"VISUALIZE",
|
||||
"DEVELOPMENT"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "solve_with_cot",
|
||||
"description": "Solve problems using chain-of-thought reasoning.",
|
||||
"tags": [
|
||||
"AI",
|
||||
"ANALYSIS",
|
||||
"LEARNING"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "suggest_pattern",
|
||||
"description": "Recommend Fabric patterns based on user requirements.",
|
||||
@@ -1871,6 +1854,22 @@
|
||||
"DEVELOPMENT",
|
||||
"AI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "create_story_about_person",
|
||||
"description": "Infer everyday challenges and realistic coping strategies from a psychological profile and craft an empathetic 500–700-word story consistent with the character.",
|
||||
"tags": [
|
||||
"WRITING",
|
||||
"SELF"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "heal_person",
|
||||
"description": "Analyze a psychological profile, pinpoint issues and strengths, and deliver compassionate, structured strategies for spiritual, mental, and life improvement.",
|
||||
"tags": [
|
||||
"ANALYSIS",
|
||||
"SELF"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user