mirror of
https://github.com/danielmiessler/Fabric.git
synced 2026-01-10 14:58:02 -05:00
Compare commits
19 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
19b512c3ab | ||
|
|
a4ce90970a | ||
|
|
8d2fda3af9 | ||
|
|
aa59d58deb | ||
|
|
d209ee38c7 | ||
|
|
c20be027fe | ||
|
|
3ef3509bfd | ||
|
|
7142b020ef | ||
|
|
1b9f07b525 | ||
|
|
dcfc94ca07 | ||
|
|
0e85861a46 | ||
|
|
7c5a040287 | ||
|
|
08eb48c2e7 | ||
|
|
e40d4e6623 | ||
|
|
51bd1ebadf | ||
|
|
d3de731967 | ||
|
|
458b0a5e1c | ||
|
|
b8f64bd554 | ||
|
|
1622a34331 |
3
.github/pull_request_template.md
vendored
3
.github/pull_request_template.md
vendored
@@ -1,9 +1,12 @@
|
||||
## What this Pull Request (PR) does
|
||||
|
||||
Please briefly describe what this PR does.
|
||||
|
||||
## Related issues
|
||||
|
||||
Please reference any open issues this PR relates to in here.
|
||||
If it closes an issue, type `closes #[ISSUE_NUMBER]`.
|
||||
|
||||
## Screenshots
|
||||
|
||||
Provide any screenshots you may find relevant to facilitate us understanding your PR.
|
||||
|
||||
4
.github/workflows/ci.yml
vendored
4
.github/workflows/ci.yml
vendored
@@ -20,13 +20,13 @@ jobs:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
|
||||
- name: Install Nix
|
||||
uses: DeterminateSystems/nix-installer-action@main
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: ./go.mod
|
||||
|
||||
|
||||
11
.github/workflows/patterns.yaml
vendored
11
.github/workflows/patterns.yaml
vendored
@@ -11,22 +11,27 @@ jobs:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Verify Changes in Patterns Folder
|
||||
id: check-changes
|
||||
run: |
|
||||
git fetch origin
|
||||
if git diff --quiet HEAD~1 -- data/patterns; then
|
||||
echo "No changes detected in patterns folder."
|
||||
exit 1
|
||||
echo "changes=false" >> $GITHUB_OUTPUT
|
||||
else
|
||||
echo "Changes detected in patterns folder."
|
||||
echo "changes=true" >> $GITHUB_OUTPUT
|
||||
fi
|
||||
|
||||
- name: Zip the Patterns Folder
|
||||
if: steps.check-changes.outputs.changes == 'true'
|
||||
run: zip -r patterns.zip data/patterns/
|
||||
|
||||
- name: Upload Patterns Artifact
|
||||
if: steps.check-changes.outputs.changes == 'true'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: patterns
|
||||
|
||||
138
.github/workflows/release.yml
vendored
138
.github/workflows/release.yml
vendored
@@ -15,149 +15,39 @@ jobs:
|
||||
contents: read
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: ./go.mod
|
||||
|
||||
- name: Run tests
|
||||
run: go test -v ./...
|
||||
|
||||
get_version:
|
||||
name: Get version
|
||||
runs-on: ubuntu-latest
|
||||
outputs:
|
||||
latest_tag: ${{ steps.get_version.outputs.latest_tag }}
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Get version from source
|
||||
id: get_version
|
||||
shell: bash
|
||||
run: |
|
||||
if [ ! -f "nix/pkgs/fabric/version.nix" ]; then
|
||||
echo "Error: version.nix file not found"
|
||||
exit 1
|
||||
fi
|
||||
version=$(cat nix/pkgs/fabric/version.nix | tr -d '"' | tr -cd '0-9.')
|
||||
if [ -z "$version" ]; then
|
||||
echo "Error: version is empty"
|
||||
exit 1
|
||||
fi
|
||||
if ! echo "$version" | grep -E '^[0-9]+\.[0-9]+\.[0-9]+' > /dev/null; then
|
||||
echo "Error: Invalid version format: $version"
|
||||
exit 1
|
||||
fi
|
||||
echo "latest_tag=v$version" >> $GITHUB_OUTPUT
|
||||
|
||||
build:
|
||||
name: Build binaries for Windows, macOS, and Linux
|
||||
needs: [test, get_version]
|
||||
runs-on: ${{ matrix.os }}
|
||||
permissions:
|
||||
contents: write
|
||||
strategy:
|
||||
matrix:
|
||||
os: [ubuntu-latest, macos-latest, windows-latest]
|
||||
arch: [amd64, arm64]
|
||||
exclude:
|
||||
- os: windows-latest
|
||||
arch: arm64
|
||||
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
with:
|
||||
go-version-file: ./go.mod
|
||||
|
||||
- name: Build binary on Linux and macOS
|
||||
if: matrix.os != 'windows-latest'
|
||||
env:
|
||||
GOOS: ${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
run: |
|
||||
OS_NAME="${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}"
|
||||
go build -o fabric-${OS_NAME}-${{ matrix.arch }} ./cmd/fabric
|
||||
|
||||
- name: Build binary on Windows
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
GOOS: windows
|
||||
GOARCH: ${{ matrix.arch }}
|
||||
run: |
|
||||
go build -o fabric-windows-${{ matrix.arch }}.exe ./cmd/fabric
|
||||
|
||||
- name: Upload build artifact
|
||||
if: matrix.os != 'windows-latest'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fabric-${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}-${{ matrix.arch }}
|
||||
path: fabric-${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}-${{ matrix.arch }}
|
||||
|
||||
- name: Upload build artifact
|
||||
if: matrix.os == 'windows-latest'
|
||||
uses: actions/upload-artifact@v4
|
||||
with:
|
||||
name: fabric-windows-${{ matrix.arch }}.exe
|
||||
path: fabric-windows-${{ matrix.arch }}.exe
|
||||
|
||||
- name: Create release if it doesn't exist
|
||||
shell: bash
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
if ! gh release view ${{ needs.get_version.outputs.latest_tag }} >/dev/null 2>&1; then
|
||||
gh release create ${{ needs.get_version.outputs.latest_tag }} --title "Release ${{ needs.get_version.outputs.latest_tag }}" --notes "Automated release for ${{ needs.get_version.outputs.latest_tag }}"
|
||||
else
|
||||
echo "Release ${{ needs.get_version.outputs.latest_tag }} already exists."
|
||||
fi
|
||||
|
||||
- name: Upload release artifact
|
||||
if: matrix.os == 'windows-latest'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
gh release upload ${{ needs.get_version.outputs.latest_tag }} fabric-windows-${{ matrix.arch }}.exe
|
||||
|
||||
- name: Upload release artifact
|
||||
if: matrix.os != 'windows-latest'
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
OS_NAME="${{ matrix.os == 'ubuntu-latest' && 'linux' || 'darwin' }}"
|
||||
gh release upload ${{ needs.get_version.outputs.latest_tag }} fabric-${OS_NAME}-${{ matrix.arch }}
|
||||
|
||||
update_release_notes:
|
||||
needs: [build, get_version]
|
||||
# only run in main upstream repo
|
||||
if: ${{ github.repository_owner == 'danielmiessler' }}
|
||||
name: Build & Release with Goreleaser
|
||||
needs: [test]
|
||||
runs-on: ubuntu-latest
|
||||
permissions:
|
||||
contents: write
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
- name: Set up Go
|
||||
uses: actions/setup-go@v4
|
||||
uses: actions/setup-go@v5
|
||||
with:
|
||||
go-version-file: ./go.mod
|
||||
|
||||
- name: Update release description
|
||||
- name: Run GoReleaser
|
||||
uses: goreleaser/goreleaser-action@v6
|
||||
with:
|
||||
distribution: goreleaser
|
||||
args: release --clean
|
||||
env:
|
||||
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
go run ./cmd/generate_changelog --sync-db
|
||||
go run ./cmd/generate_changelog --release ${{ needs.get_version.outputs.latest_tag }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
|
||||
@@ -6,7 +6,6 @@ on:
|
||||
- main # Monitor the main branch
|
||||
paths-ignore:
|
||||
- "data/patterns/**"
|
||||
- "**/*.md"
|
||||
- "data/strategies/**"
|
||||
- "cmd/generate_changelog/*.db"
|
||||
- "cmd/generate_changelog/incoming/*.txt"
|
||||
@@ -22,12 +21,13 @@ concurrency:
|
||||
|
||||
jobs:
|
||||
update-version:
|
||||
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
if: >
|
||||
${{ github.repository_owner == 'danielmiessler' }} &&
|
||||
github.event_name == 'push' && github.ref == 'refs/heads/main'
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout repository
|
||||
uses: actions/checkout@v4
|
||||
uses: actions/checkout@v5
|
||||
with:
|
||||
fetch-depth: 0
|
||||
|
||||
@@ -49,12 +49,13 @@ jobs:
|
||||
run: |
|
||||
latest_tag=$(git tag --sort=-creatordate | head -n 1)
|
||||
echo "Latest tag is: $latest_tag"
|
||||
echo "tag=$latest_tag" >> $GITHUB_OUTPUT
|
||||
echo "tag=$latest_tag" >> $GITHUB_ENV # Save the latest tag to environment file
|
||||
|
||||
- name: Increment patch version
|
||||
id: increment_version
|
||||
run: |
|
||||
latest_tag=${{ env.tag }}
|
||||
latest_tag=${{ steps.get_latest_tag.outputs.tag }}
|
||||
major=$(echo "$latest_tag" | cut -d. -f1 | sed 's/v//')
|
||||
minor=$(echo "$latest_tag" | cut -d. -f2)
|
||||
patch=$(echo "$latest_tag" | cut -d. -f3)
|
||||
@@ -62,19 +63,21 @@ jobs:
|
||||
new_version="${major}.${minor}.${new_patch}"
|
||||
new_tag="v${new_version}"
|
||||
echo "New version is: $new_version"
|
||||
echo "new_version=$new_version" >> $GITHUB_OUTPUT
|
||||
echo "new_version=$new_version" >> $GITHUB_ENV # Save the new version to environment file
|
||||
echo "New tag is: $new_tag"
|
||||
echo "new_tag=$new_tag" >> $GITHUB_OUTPUT
|
||||
echo "new_tag=$new_tag" >> $GITHUB_ENV # Save the new tag to environment file
|
||||
|
||||
- name: Update version.go file
|
||||
run: |
|
||||
echo "package main" > cmd/fabric/version.go
|
||||
echo "" >> cmd/fabric/version.go
|
||||
echo "var version = \"${{ env.new_tag }}\"" >> cmd/fabric/version.go
|
||||
echo "var version = \"${{ steps.increment_version.outputs.new_tag }}\"" >> cmd/fabric/version.go
|
||||
|
||||
- name: Update version.nix file
|
||||
run: |
|
||||
echo "\"${{ env.new_version }}\"" > nix/pkgs/fabric/version.nix
|
||||
echo "\"${{ steps.increment_version.outputs.new_version }}\"" > nix/pkgs/fabric/version.nix
|
||||
|
||||
- name: Format source code
|
||||
run: |
|
||||
@@ -88,7 +91,7 @@ jobs:
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
go run ./cmd/generate_changelog --process-prs ${{ env.new_tag }}
|
||||
go run ./cmd/generate_changelog --process-prs ${{ steps.increment_version.outputs.new_tag }}
|
||||
go run ./cmd/generate_changelog --sync-db
|
||||
- name: Commit changes
|
||||
run: |
|
||||
@@ -101,7 +104,7 @@ jobs:
|
||||
# and removing the incoming/ directory.
|
||||
|
||||
if ! git diff --staged --quiet; then
|
||||
git commit -m "chore(release): Update version to ${{ env.new_tag }}"
|
||||
git commit -m "chore(release): Update version to ${{ steps.increment_version.outputs.new_tag }}"
|
||||
else
|
||||
echo "No changes to commit."
|
||||
fi
|
||||
@@ -114,10 +117,10 @@ jobs:
|
||||
|
||||
- name: Create a new tag
|
||||
env:
|
||||
GITHUB_TOKEN: ${{ secrets.TAG_PAT }}
|
||||
GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
|
||||
run: |
|
||||
git tag ${{ env.new_tag }}
|
||||
git push origin ${{ env.new_tag }} # Push the new tag
|
||||
git tag ${{ steps.increment_version.outputs.new_tag }}
|
||||
git push origin ${{ steps.increment_version.outputs.new_tag }} # Push the new tag
|
||||
|
||||
- name: Dispatch event to trigger release workflow
|
||||
env:
|
||||
@@ -127,4 +130,4 @@ jobs:
|
||||
-H "Authorization: token $GITHUB_TOKEN" \
|
||||
-H "Accept: application/vnd.github.v3+json" \
|
||||
https://api.github.com/repos/${{ github.repository }}/dispatches \
|
||||
-d '{"event_type": "tag_created", "client_payload": {"tag": "${{ env.new_tag }}"}}'
|
||||
-d '{"event_type": "tag_created", "client_payload": {"tag": "${{ steps.increment_version.outputs.new_tag }}"}}'
|
||||
|
||||
36
.goreleaser.yaml
Normal file
36
.goreleaser.yaml
Normal file
@@ -0,0 +1,36 @@
|
||||
# Read the documentation at https://goreleaser.com
|
||||
|
||||
version: 2
|
||||
|
||||
project_name: fabric
|
||||
before:
|
||||
hooks:
|
||||
# You may remove this if you don't use go modules.
|
||||
- go mod tidy
|
||||
# you may remove this if you don't need go generate
|
||||
# - go generate ./...
|
||||
|
||||
builds:
|
||||
- env:
|
||||
- CGO_ENABLED=0
|
||||
goos:
|
||||
- linux
|
||||
- windows
|
||||
- darwin
|
||||
main: ./cmd/fabric
|
||||
binary: fabric
|
||||
|
||||
archives:
|
||||
- formats: [tar.gz]
|
||||
# this name template makes the OS and Arch compatible with the results of `uname`.
|
||||
name_template: >-
|
||||
{{ .ProjectName }}_
|
||||
{{- title .Os }}_
|
||||
{{- if eq .Arch "amd64" }}x86_64
|
||||
{{- else if eq .Arch "386" }}i386
|
||||
{{- else }}{{ .Arch }}{{ end }}
|
||||
{{- if .Arm }}v{{ .Arm }}{{ end }}
|
||||
# use zip for windows archives
|
||||
format_overrides:
|
||||
- goos: windows
|
||||
formats: [zip]
|
||||
10
.vscode/settings.json
vendored
10
.vscode/settings.json
vendored
@@ -13,6 +13,7 @@
|
||||
"Behrens",
|
||||
"blindspots",
|
||||
"Bombal",
|
||||
"Buildx",
|
||||
"Callirhoe",
|
||||
"Callirrhoe",
|
||||
"Cerebras",
|
||||
@@ -31,11 +32,13 @@
|
||||
"Despina",
|
||||
"direnv",
|
||||
"DMARC",
|
||||
"DOCKERHUB",
|
||||
"dryrun",
|
||||
"dsrp",
|
||||
"editability",
|
||||
"Eisler",
|
||||
"elif",
|
||||
"Elister",
|
||||
"envrc",
|
||||
"Erinome",
|
||||
"Errorf",
|
||||
@@ -62,6 +65,7 @@
|
||||
"goopenai",
|
||||
"GOPATH",
|
||||
"gopkg",
|
||||
"Goreleaser",
|
||||
"GOROOT",
|
||||
"Graphviz",
|
||||
"grokai",
|
||||
@@ -73,6 +77,7 @@
|
||||
"Hormozi's",
|
||||
"horts",
|
||||
"HTMLURL",
|
||||
"imagetools",
|
||||
"jaredmontoya",
|
||||
"jessevdk",
|
||||
"Jina",
|
||||
@@ -101,13 +106,17 @@
|
||||
"mbed",
|
||||
"metacharacters",
|
||||
"Miessler",
|
||||
"modeline",
|
||||
"modelines",
|
||||
"mpga",
|
||||
"nometa",
|
||||
"numpy",
|
||||
"ollama",
|
||||
"ollamaapi",
|
||||
"Omri",
|
||||
"openaiapi",
|
||||
"opencode",
|
||||
"opencontainers",
|
||||
"openrouter",
|
||||
"Orus",
|
||||
"osascript",
|
||||
@@ -156,6 +165,7 @@
|
||||
"videoid",
|
||||
"webp",
|
||||
"WEBVTT",
|
||||
"winget",
|
||||
"wipecontext",
|
||||
"wipesession",
|
||||
"wireframes",
|
||||
|
||||
61
CHANGELOG.md
61
CHANGELOG.md
@@ -1,5 +1,66 @@
|
||||
# Changelog
|
||||
|
||||
## v1.4.303 (2025-08-28)
|
||||
|
||||
### PR [#1736](https://github.com/danielmiessler/Fabric/pull/1736) by [tonymet](https://github.com/tonymet): Winget Publishing and GoReleaser
|
||||
|
||||
- Added GoReleaser support for improved package distribution
|
||||
- Winget and Docker publishing moved to ksylvan/fabric-packager GitHub repo
|
||||
- Hardened release pipeline by gating workflows to upstream owner only
|
||||
- Migrated from custom tokens to built-in GITHUB_TOKEN for enhanced security
|
||||
- Removed docker-publish-on-tag workflow to reduce duplication and complexity
|
||||
- Added ARM binary release support with updated documentation
|
||||
|
||||
## v1.4.302 (2025-08-28)
|
||||
|
||||
### PR [#1737](https://github.com/danielmiessler/Fabric/pull/1737) by [ksylvan](https://github.com/ksylvan) and [OmriH-Elister](https://github.com/OmriH-Elister): Add New Psychological Analysis Patterns + devalue version bump
|
||||
|
||||
- Add create_story_about_person system pattern with narrative workflow
|
||||
- Add heal_person system pattern for compassionate healing plans
|
||||
- Update pattern_explanations to register new patterns and renumber indices
|
||||
- Extend pattern_descriptions with entries, tags, and concise descriptions
|
||||
- Bump devalue dependency from 5.1.1 to 5.3.2
|
||||
|
||||
## v1.4.301 (2025-08-28)
|
||||
|
||||
### PR [#1735](https://github.com/danielmiessler/Fabric/pull/1735) by [ksylvan](https://github.com/ksylvan): Fix Docker Build Path Configuration
|
||||
|
||||
- Fix: update Docker workflow to use specific Dockerfile and monitor markdown file changes
|
||||
- Add explicit Dockerfile path to Docker build action
|
||||
- Remove markdown files from workflow paths-ignore filter
|
||||
- Enable CI triggers for documentation file changes
|
||||
- Specify Docker build context with custom file location
|
||||
|
||||
## v1.4.300 (2025-08-28)
|
||||
|
||||
### PR [#1732](https://github.com/danielmiessler/Fabric/pull/1732) by [ksylvan](https://github.com/ksylvan): CI Infra: Changelog Generation Tool + Docker Image Pubishing
|
||||
|
||||
- Add GitHub Actions workflow to publish Docker images on tags
|
||||
- Build multi-arch images with Buildx and QEMU across amd64, arm64
|
||||
- Tag images using semver; push to GHCR and Docker Hub
|
||||
- Gate patterns workflow steps on detected changes instead of failing
|
||||
- Auto-detect GitHub owner and repo from git remote URL
|
||||
|
||||
## v1.4.299 (2025-08-27)
|
||||
|
||||
### PR [#1731](https://github.com/danielmiessler/Fabric/pull/1731) by [ksylvan](https://github.com/ksylvan): chore: upgrade ollama dependency from v0.9.0 to v0.11.7
|
||||
|
||||
- Updated ollama package from version 0.9.0 to 0.11.7
|
||||
- Fixed 8 security vulnerabilities including 5 high-severity CVEs that could cause denial of service attacks
|
||||
- Patched Ollama server vulnerabilities related to division by zero errors and memory exhaustion
|
||||
- Resolved security flaws that allowed malicious GGUF model file uploads to crash the server
|
||||
- Enhanced system stability and security posture through comprehensive dependency upgrade
|
||||
|
||||
## v1.4.298 (2025-08-27)
|
||||
|
||||
### PR [#1730](https://github.com/danielmiessler/Fabric/pull/1730) by [ksylvan](https://github.com/ksylvan): Modernize Dockerfile with Best Practices Implementation
|
||||
|
||||
- Remove docker-test framework and simplify production docker setup by eliminating complex testing infrastructure
|
||||
- Delete entire docker-test directory including test runner scripts and environment configuration files
|
||||
- Implement multi-stage build optimization in production Dockerfile to improve build efficiency
|
||||
- Remove docker-compose.yml and start-docker.sh helper scripts to streamline container workflow
|
||||
- Update README documentation with cleaner Docker usage instructions and reduced image size benefits
|
||||
|
||||
## v1.4.297 (2025-08-26)
|
||||
|
||||
### PR [#1729](https://github.com/danielmiessler/Fabric/pull/1729) by [ksylvan](https://github.com/ksylvan): Add GitHub Community Health Documents
|
||||
|
||||
@@ -57,6 +57,7 @@ Below are the **new features and capabilities** we've added (newest first):
|
||||
|
||||
### Recent Major Features
|
||||
|
||||
- [v1.4.303](https://github.com/danielmiessler/fabric/releases/tag/v1.4.303) (Aug 29, 2025) — **New Binary Releases**: Linux ARM and Windows ARM targets. You can run Fabric on the Raspberry PI and on your Windows Surface!
|
||||
- [v1.4.294](https://github.com/danielmiessler/fabric/releases/tag/v1.4.294) (Aug 20, 2025) — **Venice AI Support**: Added the Venice AI provider. Venice is a Privacy-First, Open-Source AI provider. See their ["About Venice"](https://docs.venice.ai/overview/about-venice) page for details.
|
||||
- [v1.4.291](https://github.com/danielmiessler/fabric/releases/tag/v1.4.291) (Aug 18, 2025) — **Speech To Text**: Add OpenAI speech-to-text support with `--transcribe-file`, `--transcribe-model`, and `--split-media-file` flags.
|
||||
- [v1.4.287](https://github.com/danielmiessler/fabric/releases/tag/v1.4.287) (Aug 16, 2025) — **AI Reasoning**: Add Thinking to Gemini models and introduce `readme_updates` python script
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
package main
|
||||
|
||||
var version = "v1.4.297"
|
||||
var version = "v1.4.303"
|
||||
|
||||
Binary file not shown.
@@ -3,6 +3,9 @@ package internal
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/cache"
|
||||
"github.com/danielmiessler/fabric/cmd/generate_changelog/internal/config"
|
||||
@@ -17,17 +20,50 @@ type ReleaseManager struct {
|
||||
repo string
|
||||
}
|
||||
|
||||
// getGitHubInfo extracts owner and repo from git remote origin URL
|
||||
func getGitHubInfo() (owner, repo string, err error) {
|
||||
cmd := exec.Command("git", "remote", "get-url", "origin")
|
||||
output, err := cmd.Output()
|
||||
if err != nil {
|
||||
return "", "", fmt.Errorf("failed to get git remote URL: %w", err)
|
||||
}
|
||||
|
||||
url := strings.TrimSpace(string(output))
|
||||
|
||||
// Handle both SSH and HTTPS URLs
|
||||
// SSH: git@github.com:owner/repo.git
|
||||
// HTTPS: https://github.com/owner/repo.git
|
||||
var re *regexp.Regexp
|
||||
if strings.HasPrefix(url, "git@") {
|
||||
re = regexp.MustCompile(`git@github\.com:([^/]+)/([^/.]+)(?:\.git)?`)
|
||||
} else {
|
||||
re = regexp.MustCompile(`https://github\.com/([^/]+)/([^/.]+)(?:\.git)?`)
|
||||
}
|
||||
|
||||
matches := re.FindStringSubmatch(url)
|
||||
if len(matches) < 3 {
|
||||
return "", "", fmt.Errorf("invalid GitHub URL format: %s", url)
|
||||
}
|
||||
|
||||
return matches[1], matches[2], nil
|
||||
}
|
||||
|
||||
func NewReleaseManager(cfg *config.Config) (*ReleaseManager, error) {
|
||||
cache, err := cache.New(cfg.CacheFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to create cache: %w", err)
|
||||
}
|
||||
|
||||
owner, repo, err := getGitHubInfo()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to get GitHub repository info: %w", err)
|
||||
}
|
||||
|
||||
return &ReleaseManager{
|
||||
cache: cache,
|
||||
githubToken: cfg.GitHubToken,
|
||||
owner: "danielmiessler",
|
||||
repo: "fabric",
|
||||
owner: owner,
|
||||
repo: repo,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
||||
26
data/patterns/create_story_about_person/system.md
Normal file
26
data/patterns/create_story_about_person/system.md
Normal file
@@ -0,0 +1,26 @@
|
||||
You are an expert creative writer specializing in character-driven narratives, and a keen observer of human psychology. Your task is to craft a compelling, realistic short story based on a psychological profile or personal data provided by the user.
|
||||
|
||||
**Input:**
|
||||
The user will provide a psychological profile or descriptive data about a fictional or real person. This input will be clearly delimited by triple backticks (```). It may include personality traits, habits, fears, motivations, strengths, weaknesses, background information, or specific behavioral patterns.
|
||||
|
||||
**Task Steps:**
|
||||
|
||||
1. **Analyze Profile:** Carefully read and internalize the provided psychological profile. Identify the core personality traits, typical reactions, strengths, and vulnerabilities of the individual.
|
||||
2. **Brainstorm Challenges:** Based on the analysis from Step 1, generate 3-5 common, relatable, everyday problems or minor dilemmas that a person with this specific profile might genuinely encounter. These challenges should be varied and could span social, professional, personal, or emotional domains.
|
||||
3. **Develop Strategies:** For each identified problem from Step 2, devise 1-2 specific, plausible methods or strategies that the character, consistent with their psychological profile, would naturally employ (or attempt to employ) to navigate, cope with, or solve these challenges. Consider both internal thought processes and external actions.
|
||||
4. **Construct Narrative:** Weave these problems and the character's responses into a cohesive, engaging short story (approximately 500-700 words, 3-5 paragraphs). The story should have a clear narrative flow, introducing the character, presenting the challenges, and showing their journey through them.
|
||||
5. **Maintain Consistency:** Throughout the story, ensure the character's actions, dialogue, internal monologue, and emotional reactions are consistently aligned with the psychological profile provided. The story should feel authentic to the character.
|
||||
|
||||
**Output Requirements:**
|
||||
|
||||
* **Format:** A continuous narrative short story.
|
||||
* **Tone:** Empathetic, realistic, and engaging.
|
||||
* **Content:** The story must clearly depict the character facing everyday problems and demonstrate their unique methods and strategies for navigating these challenges, directly reflecting the input profile.
|
||||
* **Length:** Approximately 500-700 words.
|
||||
* **Avoid:** Overly dramatic or fantastical scenarios unless the profile explicitly suggests such a context. Focus on the 'everyday common problems'.
|
||||
|
||||
**Example of Input Format:**
|
||||
|
||||
```
|
||||
[Psychological Profile/Data Here]
|
||||
```
|
||||
53
data/patterns/heal_person/system.md
Normal file
53
data/patterns/heal_person/system.md
Normal file
@@ -0,0 +1,53 @@
|
||||
# IDENTITY and PURPOSE
|
||||
|
||||
You are an AI assistant whose primary responsibility is to interpret and analyze psychological profiles and/or psychology data files provided as input. Your role is to carefully process this data and use your expertise to develop a tailored plan aimed at spiritual and mental healing, as well as overall life improvement for the subject. You must approach each case with sensitivity, applying psychological knowledge and holistic strategies to create actionable, personalized recommendations that address both mental and spiritual well-being. Your focus is on structured, compassionate, and practical guidance that can help the individual make meaningful improvements in their life.
|
||||
|
||||
Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
|
||||
|
||||
# STEPS
|
||||
|
||||
- Carefully review the psychological-profile and/or psychology data file provided as input.
|
||||
|
||||
- Analyze the data to identify key issues, strengths, and areas needing improvement related to the subject's mental and spiritual well-being.
|
||||
|
||||
- Develop a comprehensive plan that includes specific strategies for spiritual healing, mental health improvement, and overall life enhancement.
|
||||
|
||||
- Structure your output to clearly outline recommendations, resources, and actionable steps tailored to the individual's unique profile.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
- Only output Markdown.
|
||||
|
||||
- Ensure your output is organized, clear, and easy to follow, using headings, subheadings, and bullet points where appropriate.
|
||||
|
||||
- Ensure you follow ALL these instructions when creating your output.
|
||||
|
||||
# INPUT
|
||||
|
||||
INPUT:# IDENTITY and PURPOSE
|
||||
|
||||
You are an AI assistant whose primary responsibility is to interpret and analyze psychological profiles and/or psychology data files provided as input. Your role is to carefully process this data and use your expertise to develop a tailored plan aimed at spiritual and mental healing, as well as overall life improvement for the subject. You must approach each case with sensitivity, applying psychological knowledge and holistic strategies to create actionable, personalized recommendations that address both mental and spiritual well-being. Your focus is on structured, compassionate, and practical guidance that can help the individual make meaningful improvements in their life.
|
||||
|
||||
Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
|
||||
|
||||
# STEPS
|
||||
|
||||
- Carefully review the psychological-profile and/or psychology data file provided as input.
|
||||
|
||||
- Analyze the data to identify key issues, strengths, and areas needing improvement related to the subject's mental and spiritual well-being.
|
||||
|
||||
- Develop a comprehensive plan that includes specific strategies for spiritual healing, mental health improvement, and overall life enhancement.
|
||||
|
||||
- Structure your output to clearly outline recommendations, resources, and actionable steps tailored to the individual's unique profile.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
- Only output Markdown.
|
||||
|
||||
- Ensure your output is organized, clear, and easy to follow, using headings, subheadings, and bullet points where appropriate.
|
||||
|
||||
- Ensure you follow ALL these instructions when creating your output.
|
||||
|
||||
# INPUT
|
||||
|
||||
INPUT:
|
||||
@@ -88,136 +88,138 @@
|
||||
84. **create_security_update**: Creates concise security updates for newsletters, covering stories, threats, advisories, vulnerabilities, and a summary of key issues.
|
||||
85. **create_show_intro**: Creates compelling short intros for podcasts, summarizing key topics and themes discussed in the episode.
|
||||
86. **create_sigma_rules**: Extracts Tactics, Techniques, and Procedures (TTPs) from security news and converts them into Sigma detection rules for host-based detections.
|
||||
87. **create_story_explanation**: Summarizes complex content in a clear, approachable story format that makes the concepts easy to understand.
|
||||
88. **create_stride_threat_model**: Create a STRIDE-based threat model for a system design, identifying assets, trust boundaries, data flows, and prioritizing threats with mitigations.
|
||||
89. **create_summary**: Summarizes content into a 20-word sentence, 10 main points (16 words max), and 5 key takeaways in Markdown format.
|
||||
90. **create_tags**: Identifies at least 5 tags from text content for mind mapping tools, including authors and existing tags if present.
|
||||
91. **create_threat_scenarios**: Identifies likely attack methods for any system by providing a narrative-based threat model, balancing risk and opportunity.
|
||||
92. **create_ttrc_graph**: Creates a CSV file showing the progress of Time to Remediate Critical Vulnerabilities over time using given data.
|
||||
93. **create_ttrc_narrative**: Creates a persuasive narrative highlighting progress in reducing the Time to Remediate Critical Vulnerabilities metric over time.
|
||||
94. **create_upgrade_pack**: Extracts world model and task algorithm updates from content, providing beliefs about how the world works and task performance.
|
||||
95. **create_user_story**: Writes concise and clear technical user stories for new features in complex software programs, formatted for all stakeholders.
|
||||
96. **create_video_chapters**: Extracts interesting topics and timestamps from a transcript, providing concise summaries of key moments.
|
||||
97. **create_visualization**: Transforms complex ideas into visualizations using intricate ASCII art, simplifying concepts where necessary.
|
||||
98. **dialog_with_socrates**: Engages in deep, meaningful dialogues to explore and challenge beliefs using the Socratic method.
|
||||
99. **enrich_blog_post**: Enhances Markdown blog files by applying instructions to improve structure, visuals, and readability for HTML rendering.
|
||||
100. **explain_code**: Explains code, security tool output, configuration text, and answers questions based on the provided input.
|
||||
101. **explain_docs**: Improves and restructures tool documentation into clear, concise instructions, including overviews, usage, use cases, and key features.
|
||||
102. **explain_math**: Helps you understand mathematical concepts in a clear and engaging way.
|
||||
103. **explain_project**: Summarizes project documentation into clear, concise sections covering the project, problem, solution, installation, usage, and examples.
|
||||
104. **explain_terms**: Produces a glossary of advanced terms from content, providing a definition, analogy, and explanation of why each term matters.
|
||||
105. **export_data_as_csv**: Extracts and outputs all data structures from the input in properly formatted CSV data.
|
||||
106. **extract_algorithm_update_recommendations**: Extracts concise, practical algorithm update recommendations from the input and outputs them in a bulleted list.
|
||||
107. **extract_article_wisdom**: Extracts surprising, insightful, and interesting information from content, categorizing it into sections like summary, ideas, quotes, facts, references, and recommendations.
|
||||
108. **extract_book_ideas**: Extracts and outputs 50 to 100 of the most surprising, insightful, and interesting ideas from a book's content.
|
||||
109. **extract_book_recommendations**: Extracts and outputs 50 to 100 practical, actionable recommendations from a book's content.
|
||||
110. **extract_business_ideas**: Extracts top business ideas from content and elaborates on the best 10 with unique differentiators.
|
||||
111. **extract_controversial_ideas**: Extracts and outputs controversial statements and supporting quotes from the input in a structured Markdown list.
|
||||
112. **extract_core_message**: Extracts and outputs a clear, concise sentence that articulates the core message of a given text or body of work.
|
||||
113. **extract_ctf_writeup**: Extracts a short writeup from a warstory-like text about a cyber security engagement.
|
||||
114. **extract_domains**: Extracts domains and URLs from content to identify sources used for articles, newsletters, and other publications.
|
||||
115. **extract_extraordinary_claims**: Extracts and outputs a list of extraordinary claims from conversations, focusing on scientifically disputed or false statements.
|
||||
116. **extract_ideas**: Extracts and outputs all the key ideas from input, presented as 15-word bullet points in Markdown.
|
||||
117. **extract_insights**: Extracts and outputs the most powerful and insightful ideas from text, formatted as 16-word bullet points in the INSIGHTS section, also IDEAS section.
|
||||
118. **extract_insights_dm**: Extracts and outputs all valuable insights and a concise summary of the content, including key points and topics discussed.
|
||||
119. **extract_instructions**: Extracts clear, actionable step-by-step instructions and main objectives from instructional video transcripts, organizing them into a concise list.
|
||||
120. **extract_jokes**: Extracts jokes from text content, presenting each joke with its punchline in separate bullet points.
|
||||
121. **extract_latest_video**: Extracts the latest video URL from a YouTube RSS feed and outputs the URL only.
|
||||
122. **extract_main_activities**: Extracts key events and activities from transcripts or logs, providing a summary of what happened.
|
||||
123. **extract_main_idea**: Extracts the main idea and key recommendation from the input, summarizing them in 15-word sentences.
|
||||
124. **extract_most_redeeming_thing**: Extracts the most redeeming aspect from an input, summarizing it in a single 15-word sentence.
|
||||
125. **extract_patterns**: Extracts and analyzes recurring, surprising, and insightful patterns from input, providing detailed analysis and advice for builders.
|
||||
126. **extract_poc**: Extracts proof of concept URLs and validation methods from security reports, providing the URL and command to run.
|
||||
127. **extract_predictions**: Extracts predictions from input, including specific details such as date, confidence level, and verification method.
|
||||
128. **extract_primary_problem**: Extracts the primary problem with the world as presented in a given text or body of work.
|
||||
129. **extract_primary_solution**: Extracts the primary solution for the world as presented in a given text or body of work.
|
||||
130. **extract_product_features**: Extracts and outputs a list of product features from the provided input in a bulleted format.
|
||||
131. **extract_questions**: Extracts and outputs all questions asked by the interviewer in a conversation or interview.
|
||||
132. **extract_recipe**: Extracts and outputs a recipe with a short meal description, ingredients with measurements, and preparation steps.
|
||||
133. **extract_recommendations**: Extracts and outputs concise, practical recommendations from a given piece of content in a bulleted list.
|
||||
134. **extract_references**: Extracts and outputs a bulleted list of references to art, stories, books, literature, and other sources from content.
|
||||
135. **extract_skills**: Extracts and classifies skills from a job description into a table, separating each skill and classifying it as either hard or soft.
|
||||
136. **extract_song_meaning**: Analyzes a song to provide a summary of its meaning, supported by detailed evidence from lyrics, artist commentary, and fan analysis.
|
||||
137. **extract_sponsors**: Extracts and lists official sponsors and potential sponsors from a provided transcript.
|
||||
138. **extract_videoid**: Extracts and outputs the video ID from any given URL.
|
||||
139. **extract_wisdom**: Extracts surprising, insightful, and interesting information from text on topics like human flourishing, AI, learning, and more.
|
||||
140. **extract_wisdom_agents**: Extracts valuable insights, ideas, quotes, and references from content, emphasizing topics like human flourishing, AI, learning, and technology.
|
||||
141. **extract_wisdom_dm**: Extracts all valuable, insightful, and thought-provoking information from content, focusing on topics like human flourishing, AI, learning, and technology.
|
||||
142. **extract_wisdom_nometa**: Extracts insights, ideas, quotes, habits, facts, references, and recommendations from content, focusing on human flourishing, AI, technology, and related topics.
|
||||
143. **find_female_life_partner**: Analyzes criteria for finding a female life partner and provides clear, direct, and poetic descriptions.
|
||||
144. **find_hidden_message**: Extracts overt and hidden political messages, justifications, audience actions, and a cynical analysis from content.
|
||||
145. **find_logical_fallacies**: Identifies and analyzes fallacies in arguments, classifying them as formal or informal with detailed reasoning.
|
||||
146. **get_wow_per_minute**: Determines the wow-factor of content per minute based on surprise, novelty, insight, value, and wisdom, measuring how rewarding the content is for the viewer.
|
||||
147. **get_youtube_rss**: Returns the RSS URL for a given YouTube channel based on the channel ID or URL.
|
||||
148. **humanize**: Rewrites AI-generated text to sound natural, conversational, and easy to understand, maintaining clarity and simplicity.
|
||||
149. **identify_dsrp_distinctions**: Encourages creative, systems-based thinking by exploring distinctions, boundaries, and their implications, drawing on insights from prominent systems thinkers.
|
||||
150. **identify_dsrp_perspectives**: Explores the concept of distinctions in systems thinking, focusing on how boundaries define ideas, influence understanding, and reveal or obscure insights.
|
||||
151. **identify_dsrp_relationships**: Encourages exploration of connections, distinctions, and boundaries between ideas, inspired by systems thinkers to reveal new insights and patterns in complex systems.
|
||||
152. **identify_dsrp_systems**: Encourages organizing ideas into systems of parts and wholes, inspired by systems thinkers to explore relationships and how changes in organization impact meaning and understanding.
|
||||
153. **identify_job_stories**: Identifies key job stories or requirements for roles.
|
||||
154. **improve_academic_writing**: Refines text into clear, concise academic language while improving grammar, coherence, and clarity, with a list of changes.
|
||||
155. **improve_prompt**: Improves an LLM/AI prompt by applying expert prompt writing strategies for better results and clarity.
|
||||
156. **improve_report_finding**: Improves a penetration test security finding by providing detailed descriptions, risks, recommendations, references, quotes, and a concise summary in markdown format.
|
||||
157. **improve_writing**: Refines text by correcting grammar, enhancing style, improving clarity, and maintaining the original meaning. skills.
|
||||
158. **judge_output**: Evaluates Honeycomb queries by judging their effectiveness, providing critiques and outcomes based on language nuances and analytics relevance.
|
||||
159. **label_and_rate**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
|
||||
160. **md_callout**: Classifies content and generates a markdown callout based on the provided text, selecting the most appropriate type.
|
||||
161. **official_pattern_template**: Template to use if you want to create new fabric patterns.
|
||||
162. **prepare_7s_strategy**: Prepares a comprehensive briefing document from 7S's strategy capturing organizational profile, strategic elements, and market dynamics with clear, concise, and organized content.
|
||||
163. **provide_guidance**: Provides psychological and life coaching advice, including analysis, recommendations, and potential diagnoses, with a compassionate and honest tone.
|
||||
164. **rate_ai_response**: Rates the quality of AI responses by comparing them to top human expert performance, assigning a letter grade, reasoning, and providing a 1-100 score based on the evaluation.
|
||||
165. **rate_ai_result**: Assesses the quality of AI/ML/LLM work by deeply analyzing content, instructions, and output, then rates performance based on multiple dimensions, including coverage, creativity, and interdisciplinary thinking.
|
||||
166. **rate_content**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
|
||||
167. **rate_value**: Produces the best possible output by deeply analyzing and understanding the input and its intended purpose.
|
||||
168. **raw_query**: Fully digests and contemplates the input to produce the best possible result based on understanding the sender's intent.
|
||||
169. **recommend_artists**: Recommends a personalized festival schedule with artists aligned to your favorite styles and interests, including rationale.
|
||||
170. **recommend_pipeline_upgrades**: Optimizes vulnerability-checking pipelines by incorporating new information and improving their efficiency, with detailed explanations of changes.
|
||||
171. **recommend_talkpanel_topics**: Produces a clean set of proposed talks or panel talking points for a person based on their interests and goals, formatted for submission to a conference organizer.
|
||||
172. **refine_design_document**: Refines a design document based on a design review by analyzing, mapping concepts, and implementing changes using valid Markdown.
|
||||
173. **review_design**: Reviews and analyzes architecture design, focusing on clarity, component design, system integrations, security, performance, scalability, and data management.
|
||||
174. **sanitize_broken_html_to_markdown**: Converts messy HTML into clean, properly formatted Markdown, applying custom styling and ensuring compatibility with Vite.
|
||||
175. **show_fabric_options_markmap**: Visualizes the functionality of the Fabric framework by representing its components, commands, and features based on the provided input.
|
||||
176. **solve_with_cot**: Provides detailed, step-by-step responses with chain of thought reasoning, using structured thinking, reflection, and output sections.
|
||||
177. **suggest_pattern**: Suggests appropriate fabric patterns or commands based on user input, providing clear explanations and options for users.
|
||||
178. **summarize**: Summarizes content into a 20-word sentence, main points, and takeaways, formatted with numbered lists in Markdown.
|
||||
179. **summarize_board_meeting**: Creates formal meeting notes from board meeting transcripts for corporate governance documentation.
|
||||
180. **summarize_debate**: Summarizes debates, identifies primary disagreement, extracts arguments, and provides analysis of evidence and argument strength to predict outcomes.
|
||||
181. **summarize_git_changes**: Summarizes recent project updates from the last 7 days, focusing on key changes with enthusiasm.
|
||||
182. **summarize_git_diff**: Summarizes and organizes Git diff changes with clear, succinct commit messages and bullet points.
|
||||
183. **summarize_lecture**: Extracts relevant topics, definitions, and tools from lecture transcripts, providing structured summaries with timestamps and key takeaways.
|
||||
184. **summarize_legislation**: Summarizes complex political proposals and legislation by analyzing key points, proposed changes, and providing balanced, positive, and cynical characterizations.
|
||||
185. **summarize_meeting**: Analyzes meeting transcripts to extract a structured summary, including an overview, key points, tasks, decisions, challenges, timeline, references, and next steps.
|
||||
186. **summarize_micro**: Summarizes content into a 20-word sentence, 3 main points, and 3 takeaways, formatted in clear, concise Markdown.
|
||||
187. **summarize_newsletter**: Extracts the most meaningful, interesting, and useful content from a newsletter, summarizing key sections such as content, opinions, tools, companies, and follow-up items in clear, structured Markdown.
|
||||
188. **summarize_paper**: Summarizes an academic paper by detailing its title, authors, technical approach, distinctive features, experimental setup, results, advantages, limitations, and conclusion in a clear, structured format using human-readable Markdown.
|
||||
189. **summarize_prompt**: Summarizes AI chat prompts by describing the primary function, unique approach, and expected output in a concise paragraph. The summary is focused on the prompt's purpose without unnecessary details or formatting.
|
||||
190. **summarize_pull-requests**: Summarizes pull requests for a coding project by providing a summary and listing the top PRs with human-readable descriptions.
|
||||
191. **summarize_rpg_session**: Summarizes a role-playing game session by extracting key events, combat stats, character changes, quotes, and more.
|
||||
192. **t_analyze_challenge_handling**: Provides 8-16 word bullet points evaluating how well challenges are being addressed, calling out any lack of effort.
|
||||
193. **t_check_metrics**: Analyzes deep context from the TELOS file and input instruction, then provides a wisdom-based output while considering metrics and KPIs to assess recent improvements.
|
||||
194. **t_create_h3_career**: Summarizes context and produces wisdom-based output by deeply analyzing both the TELOS File and the input instruction, considering the relationship between the two.
|
||||
195. **t_create_opening_sentences**: Describes from TELOS file the person's identity, goals, and actions in 4 concise, 32-word bullet points, humbly.
|
||||
196. **t_describe_life_outlook**: Describes from TELOS file a person's life outlook in 5 concise, 16-word bullet points.
|
||||
197. **t_extract_intro_sentences**: Summarizes from TELOS file a person's identity, work, and current projects in 5 concise and grounded bullet points.
|
||||
198. **t_extract_panel_topics**: Creates 5 panel ideas with titles and descriptions based on deep context from a TELOS file and input.
|
||||
199. **t_find_blindspots**: Identify potential blindspots in thinking, frames, or models that may expose the individual to error or risk.
|
||||
200. **t_find_negative_thinking**: Analyze a TELOS file and input to identify negative thinking in documents or journals, followed by tough love encouragement.
|
||||
201. **t_find_neglected_goals**: Analyze a TELOS file and input instructions to identify goals or projects that have not been worked on recently.
|
||||
202. **t_give_encouragement**: Analyze a TELOS file and input instructions to evaluate progress, provide encouragement, and offer recommendations for continued effort.
|
||||
203. **t_red_team_thinking**: Analyze a TELOS file and input instructions to red-team thinking, models, and frames, then provide recommendations for improvement.
|
||||
204. **t_threat_model_plans**: Analyze a TELOS file and input instructions to create threat models for a life plan and recommend improvements.
|
||||
205. **t_visualize_mission_goals_projects**: Analyze a TELOS file and input instructions to create an ASCII art diagram illustrating the relationship of missions, goals, and projects.
|
||||
206. **t_year_in_review**: Analyze a TELOS file to create insights about a person or entity, then summarize accomplishments and visualizations in bullet points.
|
||||
207. **to_flashcards**: Create Anki flashcards from a given text, focusing on concise, optimized questions and answers without external context.
|
||||
208. **transcribe_minutes**: Extracts (from meeting transcription) meeting minutes, identifying actionables, insightful ideas, decisions, challenges, and next steps in a structured format.
|
||||
209. **translate**: Translates sentences or documentation into the specified language code while maintaining the original formatting and tone.
|
||||
210. **tweet**: Provides a step-by-step guide on crafting engaging tweets with emojis, covering Twitter basics, account creation, features, and audience targeting.
|
||||
211. **write_essay**: Writes essays in the style of a specified author, embodying their unique voice, vocabulary, and approach. Uses `author_name` variable.
|
||||
212. **write_essay_pg**: Writes concise, clear essays in the style of Paul Graham, focusing on simplicity, clarity, and illumination of the provided topic.
|
||||
213. **write_hackerone_report**: Generates concise, clear, and reproducible bug bounty reports, detailing vulnerability impact, steps to reproduce, and exploit details for triagers.
|
||||
214. **write_latex**: Generates syntactically correct LaTeX code for a new.tex document, ensuring proper formatting and compatibility with pdflatex.
|
||||
215. **write_micro_essay**: Writes concise, clear, and illuminating essays on the given topic in the style of Paul Graham.
|
||||
216. **write_nuclei_template_rule**: Generates Nuclei YAML templates for detecting vulnerabilities using HTTP requests, matchers, extractors, and dynamic data extraction.
|
||||
217. **write_pull-request**: Drafts detailed pull request descriptions, explaining changes, providing reasoning, and identifying potential bugs from the git diff command output.
|
||||
218. **write_semgrep_rule**: Creates accurate and working Semgrep rules based on input, following syntax guidelines and specific language considerations.
|
||||
219. **youtube_summary**: Create concise, timestamped Youtube video summaries that highlight key points.
|
||||
87. **create_story_about_person**: Creates compelling, realistic short stories based on psychological profiles, showing how characters navigate everyday problems using strategies consistent with their personality traits.
|
||||
88. **create_story_explanation**: Summarizes complex content in a clear, approachable story format that makes the concepts easy to understand.
|
||||
89. **create_stride_threat_model**: Create a STRIDE-based threat model for a system design, identifying assets, trust boundaries, data flows, and prioritizing threats with mitigations.
|
||||
90. **create_summary**: Summarizes content into a 20-word sentence, 10 main points (16 words max), and 5 key takeaways in Markdown format.
|
||||
91. **create_tags**: Identifies at least 5 tags from text content for mind mapping tools, including authors and existing tags if present.
|
||||
92. **create_threat_scenarios**: Identifies likely attack methods for any system by providing a narrative-based threat model, balancing risk and opportunity.
|
||||
93. **create_ttrc_graph**: Creates a CSV file showing the progress of Time to Remediate Critical Vulnerabilities over time using given data.
|
||||
94. **create_ttrc_narrative**: Creates a persuasive narrative highlighting progress in reducing the Time to Remediate Critical Vulnerabilities metric over time.
|
||||
95. **create_upgrade_pack**: Extracts world model and task algorithm updates from content, providing beliefs about how the world works and task performance.
|
||||
96. **create_user_story**: Writes concise and clear technical user stories for new features in complex software programs, formatted for all stakeholders.
|
||||
97. **create_video_chapters**: Extracts interesting topics and timestamps from a transcript, providing concise summaries of key moments.
|
||||
98. **create_visualization**: Transforms complex ideas into visualizations using intricate ASCII art, simplifying concepts where necessary.
|
||||
99. **dialog_with_socrates**: Engages in deep, meaningful dialogues to explore and challenge beliefs using the Socratic method.
|
||||
100. **enrich_blog_post**: Enhances Markdown blog files by applying instructions to improve structure, visuals, and readability for HTML rendering.
|
||||
101. **explain_code**: Explains code, security tool output, configuration text, and answers questions based on the provided input.
|
||||
102. **explain_docs**: Improves and restructures tool documentation into clear, concise instructions, including overviews, usage, use cases, and key features.
|
||||
103. **explain_math**: Helps you understand mathematical concepts in a clear and engaging way.
|
||||
104. **explain_project**: Summarizes project documentation into clear, concise sections covering the project, problem, solution, installation, usage, and examples.
|
||||
105. **explain_terms**: Produces a glossary of advanced terms from content, providing a definition, analogy, and explanation of why each term matters.
|
||||
106. **export_data_as_csv**: Extracts and outputs all data structures from the input in properly formatted CSV data.
|
||||
107. **extract_algorithm_update_recommendations**: Extracts concise, practical algorithm update recommendations from the input and outputs them in a bulleted list.
|
||||
108. **extract_article_wisdom**: Extracts surprising, insightful, and interesting information from content, categorizing it into sections like summary, ideas, quotes, facts, references, and recommendations.
|
||||
109. **extract_book_ideas**: Extracts and outputs 50 to 100 of the most surprising, insightful, and interesting ideas from a book's content.
|
||||
110. **extract_book_recommendations**: Extracts and outputs 50 to 100 practical, actionable recommendations from a book's content.
|
||||
111. **extract_business_ideas**: Extracts top business ideas from content and elaborates on the best 10 with unique differentiators.
|
||||
112. **extract_controversial_ideas**: Extracts and outputs controversial statements and supporting quotes from the input in a structured Markdown list.
|
||||
113. **extract_core_message**: Extracts and outputs a clear, concise sentence that articulates the core message of a given text or body of work.
|
||||
114. **extract_ctf_writeup**: Extracts a short writeup from a warstory-like text about a cyber security engagement.
|
||||
115. **extract_domains**: Extracts domains and URLs from content to identify sources used for articles, newsletters, and other publications.
|
||||
116. **extract_extraordinary_claims**: Extracts and outputs a list of extraordinary claims from conversations, focusing on scientifically disputed or false statements.
|
||||
117. **extract_ideas**: Extracts and outputs all the key ideas from input, presented as 15-word bullet points in Markdown.
|
||||
118. **extract_insights**: Extracts and outputs the most powerful and insightful ideas from text, formatted as 16-word bullet points in the INSIGHTS section, also IDEAS section.
|
||||
119. **extract_insights_dm**: Extracts and outputs all valuable insights and a concise summary of the content, including key points and topics discussed.
|
||||
120. **extract_instructions**: Extracts clear, actionable step-by-step instructions and main objectives from instructional video transcripts, organizing them into a concise list.
|
||||
121. **extract_jokes**: Extracts jokes from text content, presenting each joke with its punchline in separate bullet points.
|
||||
122. **extract_latest_video**: Extracts the latest video URL from a YouTube RSS feed and outputs the URL only.
|
||||
123. **extract_main_activities**: Extracts key events and activities from transcripts or logs, providing a summary of what happened.
|
||||
124. **extract_main_idea**: Extracts the main idea and key recommendation from the input, summarizing them in 15-word sentences.
|
||||
125. **extract_most_redeeming_thing**: Extracts the most redeeming aspect from an input, summarizing it in a single 15-word sentence.
|
||||
126. **extract_patterns**: Extracts and analyzes recurring, surprising, and insightful patterns from input, providing detailed analysis and advice for builders.
|
||||
127. **extract_poc**: Extracts proof of concept URLs and validation methods from security reports, providing the URL and command to run.
|
||||
128. **extract_predictions**: Extracts predictions from input, including specific details such as date, confidence level, and verification method.
|
||||
129. **extract_primary_problem**: Extracts the primary problem with the world as presented in a given text or body of work.
|
||||
130. **extract_primary_solution**: Extracts the primary solution for the world as presented in a given text or body of work.
|
||||
131. **extract_product_features**: Extracts and outputs a list of product features from the provided input in a bulleted format.
|
||||
132. **extract_questions**: Extracts and outputs all questions asked by the interviewer in a conversation or interview.
|
||||
133. **extract_recipe**: Extracts and outputs a recipe with a short meal description, ingredients with measurements, and preparation steps.
|
||||
134. **extract_recommendations**: Extracts and outputs concise, practical recommendations from a given piece of content in a bulleted list.
|
||||
135. **extract_references**: Extracts and outputs a bulleted list of references to art, stories, books, literature, and other sources from content.
|
||||
136. **extract_skills**: Extracts and classifies skills from a job description into a table, separating each skill and classifying it as either hard or soft.
|
||||
137. **extract_song_meaning**: Analyzes a song to provide a summary of its meaning, supported by detailed evidence from lyrics, artist commentary, and fan analysis.
|
||||
138. **extract_sponsors**: Extracts and lists official sponsors and potential sponsors from a provided transcript.
|
||||
139. **extract_videoid**: Extracts and outputs the video ID from any given URL.
|
||||
140. **extract_wisdom**: Extracts surprising, insightful, and interesting information from text on topics like human flourishing, AI, learning, and more.
|
||||
141. **extract_wisdom_agents**: Extracts valuable insights, ideas, quotes, and references from content, emphasizing topics like human flourishing, AI, learning, and technology.
|
||||
142. **extract_wisdom_dm**: Extracts all valuable, insightful, and thought-provoking information from content, focusing on topics like human flourishing, AI, learning, and technology.
|
||||
143. **extract_wisdom_nometa**: Extracts insights, ideas, quotes, habits, facts, references, and recommendations from content, focusing on human flourishing, AI, technology, and related topics.
|
||||
144. **find_female_life_partner**: Analyzes criteria for finding a female life partner and provides clear, direct, and poetic descriptions.
|
||||
145. **find_hidden_message**: Extracts overt and hidden political messages, justifications, audience actions, and a cynical analysis from content.
|
||||
146. **find_logical_fallacies**: Identifies and analyzes fallacies in arguments, classifying them as formal or informal with detailed reasoning.
|
||||
147. **get_wow_per_minute**: Determines the wow-factor of content per minute based on surprise, novelty, insight, value, and wisdom, measuring how rewarding the content is for the viewer.
|
||||
148. **get_youtube_rss**: Returns the RSS URL for a given YouTube channel based on the channel ID or URL.
|
||||
149. **heal_person**: Develops a comprehensive plan for spiritual and mental healing based on psychological profiles, providing personalized recommendations for mental health improvement and overall life enhancement.
|
||||
150. **humanize**: Rewrites AI-generated text to sound natural, conversational, and easy to understand, maintaining clarity and simplicity.
|
||||
151. **identify_dsrp_distinctions**: Encourages creative, systems-based thinking by exploring distinctions, boundaries, and their implications, drawing on insights from prominent systems thinkers.
|
||||
152. **identify_dsrp_perspectives**: Explores the concept of distinctions in systems thinking, focusing on how boundaries define ideas, influence understanding, and reveal or obscure insights.
|
||||
153. **identify_dsrp_relationships**: Encourages exploration of connections, distinctions, and boundaries between ideas, inspired by systems thinkers to reveal new insights and patterns in complex systems.
|
||||
154. **identify_dsrp_systems**: Encourages organizing ideas into systems of parts and wholes, inspired by systems thinkers to explore relationships and how changes in organization impact meaning and understanding.
|
||||
155. **identify_job_stories**: Identifies key job stories or requirements for roles.
|
||||
156. **improve_academic_writing**: Refines text into clear, concise academic language while improving grammar, coherence, and clarity, with a list of changes.
|
||||
157. **improve_prompt**: Improves an LLM/AI prompt by applying expert prompt writing strategies for better results and clarity.
|
||||
158. **improve_report_finding**: Improves a penetration test security finding by providing detailed descriptions, risks, recommendations, references, quotes, and a concise summary in markdown format.
|
||||
159. **improve_writing**: Refines text by correcting grammar, enhancing style, improving clarity, and maintaining the original meaning. skills.
|
||||
160. **judge_output**: Evaluates Honeycomb queries by judging their effectiveness, providing critiques and outcomes based on language nuances and analytics relevance.
|
||||
161. **label_and_rate**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
|
||||
162. **md_callout**: Classifies content and generates a markdown callout based on the provided text, selecting the most appropriate type.
|
||||
163. **official_pattern_template**: Template to use if you want to create new fabric patterns.
|
||||
164. **prepare_7s_strategy**: Prepares a comprehensive briefing document from 7S's strategy capturing organizational profile, strategic elements, and market dynamics with clear, concise, and organized content.
|
||||
165. **provide_guidance**: Provides psychological and life coaching advice, including analysis, recommendations, and potential diagnoses, with a compassionate and honest tone.
|
||||
166. **rate_ai_response**: Rates the quality of AI responses by comparing them to top human expert performance, assigning a letter grade, reasoning, and providing a 1-100 score based on the evaluation.
|
||||
167. **rate_ai_result**: Assesses the quality of AI/ML/LLM work by deeply analyzing content, instructions, and output, then rates performance based on multiple dimensions, including coverage, creativity, and interdisciplinary thinking.
|
||||
168. **rate_content**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
|
||||
169. **rate_value**: Produces the best possible output by deeply analyzing and understanding the input and its intended purpose.
|
||||
170. **raw_query**: Fully digests and contemplates the input to produce the best possible result based on understanding the sender's intent.
|
||||
171. **recommend_artists**: Recommends a personalized festival schedule with artists aligned to your favorite styles and interests, including rationale.
|
||||
172. **recommend_pipeline_upgrades**: Optimizes vulnerability-checking pipelines by incorporating new information and improving their efficiency, with detailed explanations of changes.
|
||||
173. **recommend_talkpanel_topics**: Produces a clean set of proposed talks or panel talking points for a person based on their interests and goals, formatted for submission to a conference organizer.
|
||||
174. **refine_design_document**: Refines a design document based on a design review by analyzing, mapping concepts, and implementing changes using valid Markdown.
|
||||
175. **review_design**: Reviews and analyzes architecture design, focusing on clarity, component design, system integrations, security, performance, scalability, and data management.
|
||||
176. **sanitize_broken_html_to_markdown**: Converts messy HTML into clean, properly formatted Markdown, applying custom styling and ensuring compatibility with Vite.
|
||||
177. **show_fabric_options_markmap**: Visualizes the functionality of the Fabric framework by representing its components, commands, and features based on the provided input.
|
||||
178. **solve_with_cot**: Provides detailed, step-by-step responses with chain of thought reasoning, using structured thinking, reflection, and output sections.
|
||||
179. **suggest_pattern**: Suggests appropriate fabric patterns or commands based on user input, providing clear explanations and options for users.
|
||||
180. **summarize**: Summarizes content into a 20-word sentence, main points, and takeaways, formatted with numbered lists in Markdown.
|
||||
181. **summarize_board_meeting**: Creates formal meeting notes from board meeting transcripts for corporate governance documentation.
|
||||
182. **summarize_debate**: Summarizes debates, identifies primary disagreement, extracts arguments, and provides analysis of evidence and argument strength to predict outcomes.
|
||||
183. **summarize_git_changes**: Summarizes recent project updates from the last 7 days, focusing on key changes with enthusiasm.
|
||||
184. **summarize_git_diff**: Summarizes and organizes Git diff changes with clear, succinct commit messages and bullet points.
|
||||
185. **summarize_lecture**: Extracts relevant topics, definitions, and tools from lecture transcripts, providing structured summaries with timestamps and key takeaways.
|
||||
186. **summarize_legislation**: Summarizes complex political proposals and legislation by analyzing key points, proposed changes, and providing balanced, positive, and cynical characterizations.
|
||||
187. **summarize_meeting**: Analyzes meeting transcripts to extract a structured summary, including an overview, key points, tasks, decisions, challenges, timeline, references, and next steps.
|
||||
188. **summarize_micro**: Summarizes content into a 20-word sentence, 3 main points, and 3 takeaways, formatted in clear, concise Markdown.
|
||||
189. **summarize_newsletter**: Extracts the most meaningful, interesting, and useful content from a newsletter, summarizing key sections such as content, opinions, tools, companies, and follow-up items in clear, structured Markdown.
|
||||
190. **summarize_paper**: Summarizes an academic paper by detailing its title, authors, technical approach, distinctive features, experimental setup, results, advantages, limitations, and conclusion in a clear, structured format using human-readable Markdown.
|
||||
191. **summarize_prompt**: Summarizes AI chat prompts by describing the primary function, unique approach, and expected output in a concise paragraph. The summary is focused on the prompt's purpose without unnecessary details or formatting.
|
||||
192. **summarize_pull-requests**: Summarizes pull requests for a coding project by providing a summary and listing the top PRs with human-readable descriptions.
|
||||
193. **summarize_rpg_session**: Summarizes a role-playing game session by extracting key events, combat stats, character changes, quotes, and more.
|
||||
194. **t_analyze_challenge_handling**: Provides 8-16 word bullet points evaluating how well challenges are being addressed, calling out any lack of effort.
|
||||
195. **t_check_metrics**: Analyzes deep context from the TELOS file and input instruction, then provides a wisdom-based output while considering metrics and KPIs to assess recent improvements.
|
||||
196. **t_create_h3_career**: Summarizes context and produces wisdom-based output by deeply analyzing both the TELOS File and the input instruction, considering the relationship between the two.
|
||||
197. **t_create_opening_sentences**: Describes from TELOS file the person's identity, goals, and actions in 4 concise, 32-word bullet points, humbly.
|
||||
198. **t_describe_life_outlook**: Describes from TELOS file a person's life outlook in 5 concise, 16-word bullet points.
|
||||
199. **t_extract_intro_sentences**: Summarizes from TELOS file a person's identity, work, and current projects in 5 concise and grounded bullet points.
|
||||
200. **t_extract_panel_topics**: Creates 5 panel ideas with titles and descriptions based on deep context from a TELOS file and input.
|
||||
201. **t_find_blindspots**: Identify potential blindspots in thinking, frames, or models that may expose the individual to error or risk.
|
||||
202. **t_find_negative_thinking**: Analyze a TELOS file and input to identify negative thinking in documents or journals, followed by tough love encouragement.
|
||||
203. **t_find_neglected_goals**: Analyze a TELOS file and input instructions to identify goals or projects that have not been worked on recently.
|
||||
204. **t_give_encouragement**: Analyze a TELOS file and input instructions to evaluate progress, provide encouragement, and offer recommendations for continued effort.
|
||||
205. **t_red_team_thinking**: Analyze a TELOS file and input instructions to red-team thinking, models, and frames, then provide recommendations for improvement.
|
||||
206. **t_threat_model_plans**: Analyze a TELOS file and input instructions to create threat models for a life plan and recommend improvements.
|
||||
207. **t_visualize_mission_goals_projects**: Analyze a TELOS file and input instructions to create an ASCII art diagram illustrating the relationship of missions, goals, and projects.
|
||||
208. **t_year_in_review**: Analyze a TELOS file to create insights about a person or entity, then summarize accomplishments and visualizations in bullet points.
|
||||
209. **to_flashcards**: Create Anki flashcards from a given text, focusing on concise, optimized questions and answers without external context.
|
||||
210. **transcribe_minutes**: Extracts (from meeting transcription) meeting minutes, identifying actionables, insightful ideas, decisions, challenges, and next steps in a structured format.
|
||||
211. **translate**: Translates sentences or documentation into the specified language code while maintaining the original formatting and tone.
|
||||
212. **tweet**: Provides a step-by-step guide on crafting engaging tweets with emojis, covering Twitter basics, account creation, features, and audience targeting.
|
||||
213. **write_essay**: Writes essays in the style of a specified author, embodying their unique voice, vocabulary, and approach. Uses `author_name` variable.
|
||||
214. **write_essay_pg**: Writes concise, clear essays in the style of Paul Graham, focusing on simplicity, clarity, and illumination of the provided topic.
|
||||
215. **write_hackerone_report**: Generates concise, clear, and reproducible bug bounty reports, detailing vulnerability impact, steps to reproduce, and exploit details for triagers.
|
||||
216. **write_latex**: Generates syntactically correct LaTeX code for a new.tex document, ensuring proper formatting and compatibility with pdflatex.
|
||||
217. **write_micro_essay**: Writes concise, clear, and illuminating essays on the given topic in the style of Paul Graham.
|
||||
218. **write_nuclei_template_rule**: Generates Nuclei YAML templates for detecting vulnerabilities using HTTP requests, matchers, extractors, and dynamic data extraction.
|
||||
219. **write_pull-request**: Drafts detailed pull request descriptions, explaining changes, providing reasoning, and identifying potential bugs from the git diff command output.
|
||||
220. **write_semgrep_rule**: Creates accurate and working Semgrep rules based on input, following syntax guidelines and specific language considerations.
|
||||
221. **youtube_summary**: Create concise, timestamped Youtube video summaries that highlight key points.
|
||||
|
||||
2
go.mod
2
go.mod
@@ -21,7 +21,7 @@ require (
|
||||
github.com/joho/godotenv v1.5.1
|
||||
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51
|
||||
github.com/mattn/go-sqlite3 v1.14.28
|
||||
github.com/ollama/ollama v0.9.0
|
||||
github.com/ollama/ollama v0.11.7
|
||||
github.com/openai/openai-go v1.8.2
|
||||
github.com/otiai10/copy v1.14.1
|
||||
github.com/pkg/errors v0.9.1
|
||||
|
||||
4
go.sum
4
go.sum
@@ -180,8 +180,8 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/ollama/ollama v0.9.0 h1:GvdGhi8G/QMnFrY0TMLDy1bXua+Ify8KTkFe4ZY/OZs=
|
||||
github.com/ollama/ollama v0.9.0/go.mod h1:aio9yQ7nc4uwIbn6S0LkGEPgn8/9bNQLL1nHuH+OcD0=
|
||||
github.com/ollama/ollama v0.11.7 h1:CuYjaJ/YEnvLDpJocJbbVdpdVFyGA/OP6lKFyzZD4dI=
|
||||
github.com/ollama/ollama v0.11.7/go.mod h1:9+1//yWPsDE2u+l1a5mpaKrYw4VdnSsRU3ioq5BvMms=
|
||||
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
|
||||
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
|
||||
github.com/openai/openai-go v1.8.2 h1:UqSkJ1vCOPUpz9Ka5tS0324EJFEuOvMc+lA/EarJWP8=
|
||||
|
||||
@@ -224,8 +224,8 @@ schema = 3
|
||||
version = "v1.0.2"
|
||||
hash = "sha256-+W9EIW7okXIXjWEgOaMh58eLvBZ7OshW2EhaIpNLSBU="
|
||||
[mod."github.com/ollama/ollama"]
|
||||
version = "v0.9.0"
|
||||
hash = "sha256-r2eU+kMG3tuJy2B43RXsfmeltzM9t05NEmNiJAW5qr4="
|
||||
version = "v0.11.7"
|
||||
hash = "sha256-3Wn1JWmil0aQQ2I/r398HbnUsi8ADoroqNyPziuxn/c="
|
||||
[mod."github.com/openai/openai-go"]
|
||||
version = "v1.8.2"
|
||||
hash = "sha256-O8aV3zEj6o8kIlzlkYaTW4RzvwR3qNUBYiN8SuTM1R0="
|
||||
|
||||
@@ -1 +1 @@
|
||||
"1.4.297"
|
||||
"1.4.303"
|
||||
|
||||
@@ -1,116 +0,0 @@
|
||||
# Docker Test Environment for API Configuration Fix
|
||||
|
||||
This directory contains a Docker-based testing setup for fixing the issue where Fabric calls Ollama and Bedrock APIs even when not configured. This addresses the problem where unconfigured services show error messages during model listing.
|
||||
|
||||
## Quick Start
|
||||
|
||||
```bash
|
||||
# Run all tests
|
||||
./scripts/docker-test/test-runner.sh
|
||||
|
||||
# Interactive mode - pick which test to run
|
||||
./scripts/docker-test/test-runner.sh -i
|
||||
|
||||
# Run specific test case
|
||||
./scripts/docker-test/test-runner.sh gemini-only
|
||||
|
||||
# Shell into test environment
|
||||
./scripts/docker-test/test-runner.sh -s gemini-only
|
||||
|
||||
# Build image only (for development)
|
||||
./scripts/docker-test/test-runner.sh -b
|
||||
|
||||
# Show help
|
||||
./scripts/docker-test/test-runner.sh -h
|
||||
```
|
||||
|
||||
## Test Cases
|
||||
|
||||
1. **no-config**: No APIs configured
|
||||
2. **gemini-only**: Only Gemini configured (reproduces original issue #1195)
|
||||
3. **openai-only**: Only OpenAI configured
|
||||
4. **ollama-only**: Only Ollama configured
|
||||
5. **bedrock-only**: Only Bedrock configured
|
||||
6. **mixed**: Multiple APIs configured (Gemini + OpenAI + Ollama)
|
||||
|
||||
## Environment Files
|
||||
|
||||
Each test case has a corresponding environment file in `scripts/docker-test/env/`:
|
||||
|
||||
- `env.no-config` - Empty configuration
|
||||
- `env.gemini-only` - Only Gemini API key
|
||||
- `env.openai-only` - Only OpenAI API key
|
||||
- `env.ollama-only` - Only Ollama URL
|
||||
- `env.bedrock-only` - Only Bedrock configuration
|
||||
- `env.mixed` - Multiple API configurations
|
||||
|
||||
These files are volume-mounted into the Docker container and persist changes made with `fabric -S`.
|
||||
|
||||
## Interactive Mode & Shell Access
|
||||
|
||||
The interactive mode (`-i`) provides several options:
|
||||
|
||||
```text
|
||||
Available test cases:
|
||||
|
||||
1) No APIs configured (no-config)
|
||||
2) Only Gemini configured (gemini-only)
|
||||
3) Only OpenAI configured (openai-only)
|
||||
4) Only Ollama configured (ollama-only)
|
||||
5) Only Bedrock configured (bedrock-only)
|
||||
6) Mixed configuration (mixed)
|
||||
7) Run all tests
|
||||
0) Exit
|
||||
|
||||
Add '!' after number to shell into test environment (e.g., '1!' to shell into no-config)
|
||||
```
|
||||
|
||||
### Shell Mode
|
||||
|
||||
- Use `1!`, `2!`, etc. to shell into any test environment
|
||||
- Run `fabric -S` to configure APIs interactively
|
||||
- Run `fabric --listmodels` or `fabric -L` to test model listing
|
||||
- Changes persist in the environment files
|
||||
- Type `exit` to return to test runner
|
||||
|
||||
## Expected Results
|
||||
|
||||
**Before Fix:**
|
||||
|
||||
- `no-config` and `gemini-only` tests show Ollama connection errors
|
||||
- Tests show Bedrock authentication errors when BEDROCK_AWS_REGION not set
|
||||
- Error: `Ollama Get "http://localhost:11434/api/tags": dial tcp...`
|
||||
- Error: `Bedrock failed to list foundation models...`
|
||||
|
||||
**After Fix:**
|
||||
|
||||
- Clean output with no error messages for unconfigured services
|
||||
- Only configured services appear in model listings
|
||||
- Ollama only initialized when `OLLAMA_API_URL` is set
|
||||
- Bedrock only initialized when `BEDROCK_AWS_REGION` is set
|
||||
|
||||
## Implementation Details
|
||||
|
||||
- **Volume-mounted configs**: Environment files are mounted to `/home/testuser/.config/fabric/.env`
|
||||
- **Persistent state**: Configuration changes survive between test runs
|
||||
- **Single Docker image**: Built once from `scripts/docker-test/base/Dockerfile`, reused for all tests
|
||||
- **Isolated environments**: Each test uses its own environment file
|
||||
- **Cross-platform**: Works on macOS, Linux, and Windows with Docker
|
||||
|
||||
## Development Workflow
|
||||
|
||||
1. Make code changes to fix API initialization logic
|
||||
2. Run `./scripts/docker-test/test-runner.sh no-config` to test the main issue
|
||||
3. Use `./scripts/docker-test/test-runner.sh -i` for interactive testing
|
||||
4. Shell into environments (`1!`, `2!`, etc.) to debug specific configurations
|
||||
5. Run all tests before submitting PR: `./scripts/docker-test/test-runner.sh`
|
||||
|
||||
## Architecture
|
||||
|
||||
The fix involves:
|
||||
|
||||
1. **Ollama**: Override `IsConfigured()` method to check for `OLLAMA_API_URL` env var
|
||||
2. **Bedrock**: Modify `hasAWSCredentials()` to require `BEDROCK_AWS_REGION`
|
||||
3. **Plugin Registry**: Only initialize providers when properly configured
|
||||
|
||||
This prevents unnecessary API calls and eliminates confusing error messages for users.
|
||||
@@ -1,30 +0,0 @@
|
||||
FROM golang:1.24-alpine AS builder
|
||||
|
||||
WORKDIR /app
|
||||
COPY go.mod go.sum ./
|
||||
RUN go mod download
|
||||
|
||||
COPY ./cmd/fabric ./cmd/fabric
|
||||
COPY ./internal ./internal
|
||||
RUN go build -o fabric ./cmd/fabric
|
||||
|
||||
FROM alpine:latest
|
||||
RUN apk --no-cache add ca-certificates
|
||||
|
||||
# Create a test user
|
||||
RUN adduser -D -s /bin/sh testuser
|
||||
|
||||
# Switch to test user
|
||||
USER testuser
|
||||
WORKDIR /home/testuser
|
||||
|
||||
# Set environment variables for the test user
|
||||
ENV HOME=/home/testuser
|
||||
ENV USER=testuser
|
||||
|
||||
COPY --from=builder /app/fabric .
|
||||
|
||||
# Create fabric config directory and empty .env file
|
||||
RUN mkdir -p .config/fabric && touch .config/fabric/.env
|
||||
|
||||
ENTRYPOINT ["./fabric"]
|
||||
@@ -1,235 +0,0 @@
|
||||
#!/usr/bin/env bash
|
||||
|
||||
set -e
|
||||
|
||||
# Get the directory where this script is located
|
||||
top_dir="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
|
||||
base_name="$(basename "$top_dir")"
|
||||
cd "$top_dir"/../.. || exit 1
|
||||
|
||||
# Check if bash version supports associative arrays
|
||||
if [[ ${BASH_VERSION%%.*} -lt 4 ]]; then
|
||||
echo "This script requires bash 4.0 or later for associative arrays."
|
||||
echo "Current version: $BASH_VERSION"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
IMAGE_NAME="fabric-test-setup"
|
||||
ENV_DIR="scripts/${base_name}/env"
|
||||
|
||||
# Test case descriptions
|
||||
declare -A test_descriptions=(
|
||||
["no-config"]="No APIs configured"
|
||||
["gemini-only"]="Only Gemini configured (reproduces original issue)"
|
||||
["openai-only"]="Only OpenAI configured"
|
||||
["ollama-only"]="Only Ollama configured"
|
||||
["bedrock-only"]="Only Bedrock configured"
|
||||
["mixed"]="Mixed configuration (Gemini + OpenAI + Ollama)"
|
||||
)
|
||||
|
||||
# Test case order for consistent display
|
||||
test_order=("no-config" "gemini-only" "openai-only" "ollama-only" "bedrock-only" "mixed")
|
||||
|
||||
build_image() {
|
||||
echo "=== Building Docker image ==="
|
||||
docker build -f "${top_dir}/base/Dockerfile" -t "$IMAGE_NAME" .
|
||||
echo
|
||||
}
|
||||
|
||||
check_env_file() {
|
||||
local test_name="$1"
|
||||
local env_file="$ENV_DIR/env.$test_name"
|
||||
|
||||
if [[ ! -f "$env_file" ]]; then
|
||||
echo "Error: Environment file not found: $env_file"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
run_test() {
|
||||
local test_name="$1"
|
||||
local description="${test_descriptions[$test_name]}"
|
||||
local env_file="$ENV_DIR/env.$test_name"
|
||||
|
||||
check_env_file "$test_name"
|
||||
|
||||
echo "===================="
|
||||
echo "Test: $description"
|
||||
echo "Config: $test_name"
|
||||
echo "Env file: $env_file"
|
||||
echo "===================="
|
||||
|
||||
echo "Running test..."
|
||||
if docker run --rm \
|
||||
-e HOME=/home/testuser \
|
||||
-e USER=testuser \
|
||||
-v "$(pwd)/$env_file:/home/testuser/.config/fabric/.env:ro" \
|
||||
"$IMAGE_NAME" --listmodels 2>&1; then
|
||||
echo "✅ Test completed"
|
||||
else
|
||||
echo "❌ Test failed"
|
||||
fi
|
||||
echo
|
||||
}
|
||||
|
||||
shell_into_env() {
|
||||
local test_name="$1"
|
||||
local description="${test_descriptions[$test_name]}"
|
||||
local env_file="$ENV_DIR/env.$test_name"
|
||||
|
||||
check_env_file "$test_name"
|
||||
|
||||
echo "===================="
|
||||
echo "Shelling into: $description"
|
||||
echo "Config: $test_name"
|
||||
echo "Env file: $env_file"
|
||||
echo "===================="
|
||||
echo "You can now run 'fabric -S' to configure, or 'fabric --listmodels' or 'fabric -L' to test."
|
||||
echo "Changes to .env will persist in $env_file"
|
||||
echo "Type 'exit' to return to the test runner."
|
||||
echo
|
||||
|
||||
docker run -it --rm \
|
||||
-e HOME=/home/testuser \
|
||||
-e USER=testuser \
|
||||
-v "$(pwd)/$env_file:/home/testuser/.config/fabric/.env" \
|
||||
--entrypoint=/bin/sh \
|
||||
"$IMAGE_NAME"
|
||||
}
|
||||
|
||||
interactive_mode() {
|
||||
echo "=== Interactive Mode ==="
|
||||
echo "Available test cases:"
|
||||
echo
|
||||
local i=1
|
||||
local cases=()
|
||||
for test_name in "${test_order[@]}"; do
|
||||
echo "$i) ${test_descriptions[$test_name]} ($test_name)"
|
||||
cases[i]="$test_name"
|
||||
((i++))
|
||||
done
|
||||
echo "$i) Run all tests"
|
||||
echo "0) Exit"
|
||||
echo
|
||||
echo "Add '!' after number to shell into test environment (e.g., '1!' to shell into no-config)"
|
||||
echo
|
||||
|
||||
while true; do
|
||||
read -r -p "Select test case (0-$i) [or 1!, etc. to shell into test environment]: " choice
|
||||
|
||||
# Check for shell mode (! suffix)
|
||||
local shell_mode=false
|
||||
if [[ "$choice" == *"!" ]]; then
|
||||
shell_mode=true
|
||||
choice="${choice%!}" # Remove the ! suffix
|
||||
fi
|
||||
|
||||
if [[ "$choice" == "0" ]]; then
|
||||
if [[ "$shell_mode" == true ]]; then
|
||||
echo "Cannot shell into exit option."
|
||||
continue
|
||||
fi
|
||||
echo "Exiting..."
|
||||
exit 0
|
||||
elif [[ "$choice" == "$i" ]]; then
|
||||
if [[ "$shell_mode" == true ]]; then
|
||||
echo "Cannot shell into 'run all tests' option."
|
||||
continue
|
||||
fi
|
||||
echo "Running all tests..."
|
||||
run_all_tests
|
||||
break
|
||||
elif [[ "$choice" -ge 1 && "$choice" -lt "$i" ]]; then
|
||||
local selected_test="${cases[$choice]}"
|
||||
if [[ "$shell_mode" == true ]]; then
|
||||
echo "Shelling into: ${test_descriptions[$selected_test]}"
|
||||
shell_into_env "$selected_test"
|
||||
else
|
||||
echo "Running: ${test_descriptions[$selected_test]}"
|
||||
run_test "$selected_test"
|
||||
fi
|
||||
|
||||
read -r -p "Continue testing? (y/n): " again
|
||||
if [[ "$again" != "y" && "$again" != "Y" ]]; then
|
||||
break
|
||||
fi
|
||||
echo
|
||||
else
|
||||
echo "Invalid choice. Please select 0-$i (optionally with '!' for shell mode)."
|
||||
fi
|
||||
done
|
||||
}
|
||||
|
||||
run_all_tests() {
|
||||
echo "=== Testing PR #1645: Conditional API initialization ==="
|
||||
echo
|
||||
|
||||
for test_name in "${test_order[@]}"; do
|
||||
run_test "$test_name"
|
||||
done
|
||||
|
||||
echo "=== Test run complete ==="
|
||||
echo "Review the output above to check:"
|
||||
echo "1. No Ollama connection errors when OLLAMA_URL not set"
|
||||
echo "2. No Bedrock authentication errors when BEDROCK_AWS_REGION not set"
|
||||
echo "3. Only configured services appear in model listings"
|
||||
}
|
||||
|
||||
show_help() {
|
||||
echo "Usage: $0 [OPTIONS] [TEST_CASE]"
|
||||
echo
|
||||
echo "Test PR #1645 conditional API initialization"
|
||||
echo
|
||||
echo "Options:"
|
||||
echo " -h, --help Show this help message"
|
||||
echo " -i, --interactive Run in interactive mode"
|
||||
echo " -b, --build-only Build image only, don't run tests"
|
||||
echo " -s, --shell TEST Shell into test environment"
|
||||
echo
|
||||
echo "Test cases:"
|
||||
for test_name in "${test_order[@]}"; do
|
||||
echo " $test_name: ${test_descriptions[$test_name]}"
|
||||
done
|
||||
echo
|
||||
echo "Examples:"
|
||||
echo " $0 # Run all tests"
|
||||
echo " $0 -i # Interactive mode"
|
||||
echo " $0 gemini-only # Run specific test"
|
||||
echo " $0 -s gemini-only # Shell into gemini-only environment"
|
||||
echo " $0 -b # Build image only"
|
||||
echo
|
||||
echo "Environment files are located in $ENV_DIR/ and can be edited directly."
|
||||
}
|
||||
|
||||
# Parse command line arguments
|
||||
if [[ $# -eq 0 ]]; then
|
||||
build_image
|
||||
run_all_tests
|
||||
elif [[ "$1" == "-h" || "$1" == "--help" ]]; then
|
||||
show_help
|
||||
elif [[ "$1" == "-i" || "$1" == "--interactive" ]]; then
|
||||
build_image
|
||||
interactive_mode
|
||||
elif [[ "$1" == "-b" || "$1" == "--build-only" ]]; then
|
||||
build_image
|
||||
elif [[ "$1" == "-s" || "$1" == "--shell" ]]; then
|
||||
if [[ -z "$2" ]]; then
|
||||
echo "Error: -s/--shell requires a test case name"
|
||||
echo "Use -h for help."
|
||||
exit 1
|
||||
fi
|
||||
if [[ -z "${test_descriptions[$2]}" ]]; then
|
||||
echo "Error: Unknown test case: $2"
|
||||
echo "Use -h for help."
|
||||
exit 1
|
||||
fi
|
||||
build_image
|
||||
shell_into_env "$2"
|
||||
elif [[ -n "${test_descriptions[$1]}" ]]; then
|
||||
build_image
|
||||
run_test "$1"
|
||||
else
|
||||
echo "Unknown test case or option: $1"
|
||||
echo "Use -h for help."
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,41 +1,28 @@
|
||||
# Use official golang image as builder
|
||||
FROM golang:1.24.2-alpine AS builder
|
||||
# syntax=docker/dockerfile:1
|
||||
|
||||
# Set working directory
|
||||
WORKDIR /app
|
||||
FROM golang:1.24-alpine AS builder
|
||||
|
||||
WORKDIR /src
|
||||
|
||||
# Install build dependencies
|
||||
RUN apk add --no-cache git
|
||||
|
||||
# Copy go mod and sum files
|
||||
COPY go.mod go.sum ./
|
||||
|
||||
# Download dependencies
|
||||
RUN go mod download
|
||||
|
||||
# Copy source code
|
||||
COPY . .
|
||||
|
||||
# Build the application
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -o fabric ./cmd/fabric
|
||||
RUN CGO_ENABLED=0 GOOS=linux go build -ldflags="-s -w" -o /fabric ./cmd/fabric
|
||||
|
||||
# Use scratch as final base image
|
||||
FROM alpine:latest
|
||||
|
||||
# Copy the binary from builder
|
||||
COPY --from=builder /app/fabric /fabric
|
||||
LABEL org.opencontainers.image.description="A Docker image for running the Fabric CLI. See https://github.com/danielmiessler/Fabric/tree/main/scripts/docker for details."
|
||||
|
||||
# Copy patterns directory
|
||||
COPY patterns /patterns
|
||||
RUN apk add --no-cache ca-certificates \
|
||||
&& mkdir -p /root/.config/fabric
|
||||
|
||||
# Ensure clean config directory and copy ENV file
|
||||
RUN rm -rf /root/.config/fabric && \
|
||||
mkdir -p /root/.config/fabric
|
||||
COPY ENV /root/.config/fabric/.env
|
||||
COPY --from=builder /fabric /usr/local/bin/fabric
|
||||
|
||||
# Add debug commands
|
||||
RUN ls -la /root/.config/fabric/
|
||||
|
||||
# Expose port 8080
|
||||
EXPOSE 8080
|
||||
|
||||
# Run the binary with debug output
|
||||
ENTRYPOINT ["/fabric"]
|
||||
CMD ["--serve"]
|
||||
ENTRYPOINT ["fabric"]
|
||||
|
||||
@@ -1,40 +1,60 @@
|
||||
# Docker Deployment
|
||||
# Fabric Docker Image
|
||||
|
||||
This directory contains Docker configuration files for running Fabric in containers.
|
||||
This directory provides a simple Docker setup for running the [Fabric](https://github.com/danielmiessler/fabric) CLI.
|
||||
|
||||
## Files
|
||||
## Build
|
||||
|
||||
- `Dockerfile` - Main Docker build configuration
|
||||
- `docker-compose.yml` - Docker Compose stack configuration
|
||||
- `start-docker.sh` - Helper script to start the stack
|
||||
- `README.md` - This documentation
|
||||
|
||||
## Quick Start
|
||||
Build the image from the repository root:
|
||||
|
||||
```bash
|
||||
# Start the Docker stack
|
||||
./start-docker.sh
|
||||
|
||||
# Or manually with docker-compose
|
||||
docker-compose up -d
|
||||
|
||||
# View logs
|
||||
docker-compose logs -f
|
||||
|
||||
# Stop the stack
|
||||
docker-compose down
|
||||
docker build -t fabric -f scripts/docker/Dockerfile .
|
||||
```
|
||||
|
||||
## Building
|
||||
## Persisting configuration
|
||||
|
||||
Fabric stores its configuration in `~/.config/fabric/.env`. Mount this path to keep your settings on the host.
|
||||
|
||||
### Using a host directory
|
||||
|
||||
```bash
|
||||
# Build the Docker image
|
||||
docker build -t fabric .
|
||||
|
||||
# Or use docker-compose
|
||||
docker-compose build
|
||||
mkdir -p $HOME/.fabric-config
|
||||
# Run setup to create the .env and download patterns
|
||||
docker run --rm -it -v $HOME/.fabric-config:/root/.config/fabric fabric --setup
|
||||
```
|
||||
|
||||
## Configuration
|
||||
Subsequent runs can reuse the same directory:
|
||||
|
||||
Make sure to configure your environment variables and API keys before running the Docker stack. See the main README.md for setup instructions.
|
||||
```bash
|
||||
docker run --rm -it -v $HOME/.fabric-config:/root/.config/fabric fabric -p your-pattern
|
||||
```
|
||||
|
||||
### Mounting a single .env file
|
||||
|
||||
If you only want to persist the `.env` file:
|
||||
|
||||
```bash
|
||||
# assuming .env exists in the current directory
|
||||
docker run --rm -it -v $PWD/.env:/root/.config/fabric/.env fabric -p your-pattern
|
||||
```
|
||||
|
||||
## Running the server
|
||||
|
||||
Expose port 8080 to use Fabric's REST API:
|
||||
|
||||
```bash
|
||||
docker run --rm -it -p 8080:8080 -v $HOME/.fabric-config:/root/.config/fabric fabric --serve
|
||||
```
|
||||
|
||||
The API will be available at `http://localhost:8080`.
|
||||
|
||||
## Multi-arch builds and GHCR packages
|
||||
|
||||
For multi-arch Docker builds (such as those used for GitHub Container Registry packages), the description should be set via annotations in the manifest instead of the Dockerfile LABEL. When building multi-arch images, ensure the build configuration includes:
|
||||
|
||||
```json
|
||||
"annotations": {
|
||||
"org.opencontainers.image.description": "A Docker image for running the Fabric CLI. See https://github.com/danielmiessler/Fabric/tree/main/scripts/docker for details."
|
||||
}
|
||||
```
|
||||
|
||||
This ensures that GHCR packages display the proper description.
|
||||
|
||||
@@ -1,11 +0,0 @@
|
||||
version: '3.8'
|
||||
|
||||
services:
|
||||
fabric-api:
|
||||
build: .
|
||||
ports:
|
||||
- "8080:8080"
|
||||
volumes:
|
||||
- ./ENV:/root/.config/fabric/.env:ro
|
||||
environment:
|
||||
- GIN_MODE=release
|
||||
@@ -1,11 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Helper script to start the Fabric Docker stack
|
||||
|
||||
echo "Starting Fabric Docker stack..."
|
||||
cd "$(dirname "$0")"
|
||||
docker-compose up -d
|
||||
|
||||
echo "Fabric is now running!"
|
||||
echo "Check logs with: docker-compose logs -f"
|
||||
echo "Stop with: docker-compose down"
|
||||
@@ -1871,6 +1871,22 @@
|
||||
"DEVELOPMENT",
|
||||
"AI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "create_story_about_person",
|
||||
"description": "Infer everyday challenges and realistic coping strategies from a psychological profile and craft an empathetic 500–700-word story consistent with the character.",
|
||||
"tags": [
|
||||
"WRITING",
|
||||
"SELF"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "heal_person",
|
||||
"description": "Analyze a psychological profile, pinpoint issues and strengths, and deliver compassionate, structured strategies for spiritual, mental, and life improvement.",
|
||||
"tags": [
|
||||
"ANALYSIS",
|
||||
"SELF"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
@@ -907,6 +907,14 @@
|
||||
{
|
||||
"patternName": "generate_code_rules",
|
||||
"pattern_extract": "# IDENTITY AND PURPOSE You are a senior developer and expert prompt engineer. Think ultra hard to distill the following transcription or tutorial in as little set of unique rules as possible intended for best practices guidance in AI assisted coding tools, each rule has to be in one sentence as a direct instruction, avoid explanations and cosmetic language. Output in Markdown, I prefer bullet dash (-). --- # TRANSCRIPT"
|
||||
},
|
||||
{
|
||||
"patternName": "create_story_about_person",
|
||||
"pattern_extract": "You are an expert creative writer specializing in character-driven narratives, and a keen observer of human psychology. Your task is to craft a compelling, realistic short story based on a psychological profile or personal data provided by the user. **Input:** The user will provide a psychological profile or descriptive data about a fictional or real person. This input will be clearly delimited by triple backticks (```). It may include personality traits, habits, fears, motivations, strengths, weaknesses, background information, or specific behavioral patterns. **Task Steps:** 1. **Analyze Profile:** Carefully read and internalize the provided psychological profile. Identify the core personality traits, typical reactions, strengths, and vulnerabilities of the individual. 2. **Brainstorm Challenges:** Based on the analysis from Step 1, generate 3-5 common, relatable, everyday problems or minor dilemmas that a person with this specific profile might genuinely encounter. These challenges should be varied and could span social, professional, personal, or emotional domains. 3. **Develop Strategies:** For each identified problem from Step 2, devise 1-2 specific, plausible methods or strategies that the character, consistent with their psychological profile, would naturally employ (or attempt to employ) to navigate, cope with, or solve these challenges. Consider both internal thought processes and external actions. 4. **Construct Narrative:** Weave these problems and the character's responses into a cohesive, engaging short story (approximately 500-700 words, 3-5 paragraphs). The story should have a clear narrative flow, introducing the character, presenting the challenges, and showing their journey through them. 5. **Maintain Consistency:** Throughout the story, ensure the character's actions, dialogue, internal monologue, and emotional reactions are consistently aligned with the psychological profile provided. The story should feel authentic to the character. **Output Requirements:** * **Format:** A continuous narrative short story. * **Tone:** Empathetic, realistic, and engaging. * **Content:** The story must clearly depict the character facing everyday problems and demonstrate their unique methods and strategies for navigating these challenges, directly reflecting the input profile. * **Length:** Approximately 500-700 words. * **Avoid:** Overly dramatic or fantastical scenarios unless the profile explicitly suggests such a context. Focus on the 'everyday common problems'. **Example of Input Format:** ``` [Psychological Profile/Data Here] ```"
|
||||
},
|
||||
{
|
||||
"patternName": "heal_person",
|
||||
"pattern_extract": "# IDENTITY and PURPOSE You are an AI assistant whose primary responsibility is to interpret and analyze psychological profiles and/or psychology data files provided as input. Your role is to carefully process this data and use your expertise to develop a tailored plan aimed at spiritual and mental healing, as well as overall life improvement for the subject. You must approach each case with sensitivity, applying psychological knowledge and holistic strategies to create actionable, personalized recommendations that address both mental and spiritual well-being. Your focus is on structured, compassionate, and practical guidance that can help the individual make meaningful improvements in their life. Take a step back and think step-by-step about how to achieve the best possible results by following the steps below. # STEPS - Carefully review the psychological-profile and/or psychology data file provided as input. - Analyze the data to identify key issues, strengths, and areas needing improvement related to the subject's mental and spiritual well-being. - Develop a comprehensive plan that includes specific strategies for spiritual healing, mental health improvement, and overall life enhancement. - Structure your output to clearly outline recommendations, resources, and actionable steps tailored to the individual's unique profile. # OUTPUT INSTRUCTIONS - Only output Markdown. - Ensure your output is organized, clear, and easy to follow, using headings, subheadings, and bullet points where appropriate. - Ensure you follow ALL these instructions when creating your output. # INPUT INPUT:# IDENTITY and PURPOSE You are an AI assistant whose primary responsibility is to interpret and analyze psychological profiles and/or psychology data files provided as input. Your role is to carefully process this data and use your expertise to develop a tailored plan aimed at spiritual and mental healing, as well as overall life improvement for the subject. You must approach each case with sensitivity, applying psychological knowledge and holistic strategies to create actionable, personalized recommendations that address both mental and spiritual well-being. Your focus is on structured, compassionate, and practical guidance that can help the individual make meaningful improvements in their life. Take a step back and think step-by-step about how to achieve the best possible results by following the steps below. # STEPS - Carefully review the psychological-profile and/or psychology data file provided as input. - Analyze the data to identify key issues, strengths, and areas needing improvement related to the subject's mental and spiritual well-being. - Develop a comprehensive plan that includes specific strategies for spiritual healing, mental health improvement, and overall life enhancement. - Structure your output to clearly outline recommendations, resources, and actionable steps tailored to the individual's unique profile. # OUTPUT INSTRUCTIONS - Only output Markdown. - Ensure your output is organized, clear, and easy to follow, using headings, subheadings, and bullet points where appropriate. - Ensure you follow ALL these instructions when creating your output. # INPUT INPUT:"
|
||||
}
|
||||
]
|
||||
}
|
||||
8
web/pnpm-lock.yaml
generated
8
web/pnpm-lock.yaml
generated
@@ -941,8 +941,8 @@ packages:
|
||||
resolution: {integrity: sha512-3UDv+G9CsCKO1WKMGw9fwq/SWJYbI0c5Y7LU1AXYoDdbhE2AHQ6N6Nb34sG8Fj7T5APy8qXDCKuuIHd1BR0tVA==}
|
||||
engines: {node: '>=8'}
|
||||
|
||||
devalue@5.1.1:
|
||||
resolution: {integrity: sha512-maua5KUiapvEwiEAe+XnlZ3Rh0GD+qI1J/nb9vrJc3muPXvcF/8gXYTWF76+5DAqHyDUtOIImEuo0YKE9mshVw==}
|
||||
devalue@5.3.2:
|
||||
resolution: {integrity: sha512-UDsjUbpQn9kvm68slnrs+mfxwFkIflOhkanmyabZ8zOYk8SMEIbJ3TK+88g70hSIeytu4y18f0z/hYHMTrXIWw==}
|
||||
|
||||
devlop@1.1.0:
|
||||
resolution: {integrity: sha512-RWmIqhcFf1lRYBvNmr7qTNuyCt/7/ns2jbpp1+PalgE/rDQcBT0fioSMUpJ93irlUhC5hrg4cYqe6U+0ImW0rA==}
|
||||
@@ -2704,7 +2704,7 @@ snapshots:
|
||||
'@types/cookie': 0.6.0
|
||||
acorn: 8.14.1
|
||||
cookie: 1.0.2
|
||||
devalue: 5.1.1
|
||||
devalue: 5.3.2
|
||||
esm-env: 1.2.2
|
||||
kleur: 4.1.5
|
||||
magic-string: 0.30.17
|
||||
@@ -3060,7 +3060,7 @@ snapshots:
|
||||
detect-libc@2.0.4:
|
||||
optional: true
|
||||
|
||||
devalue@5.1.1: {}
|
||||
devalue@5.3.2: {}
|
||||
|
||||
devlop@1.1.0:
|
||||
dependencies:
|
||||
|
||||
@@ -1871,6 +1871,22 @@
|
||||
"DEVELOPMENT",
|
||||
"AI"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "create_story_about_person",
|
||||
"description": "Infer everyday challenges and realistic coping strategies from a psychological profile and craft an empathetic 500–700-word story consistent with the character.",
|
||||
"tags": [
|
||||
"WRITING",
|
||||
"SELF"
|
||||
]
|
||||
},
|
||||
{
|
||||
"patternName": "heal_person",
|
||||
"description": "Analyze a psychological profile, pinpoint issues and strengths, and deliver compassionate, structured strategies for spiritual, mental, and life improvement.",
|
||||
"tags": [
|
||||
"ANALYSIS",
|
||||
"SELF"
|
||||
]
|
||||
}
|
||||
]
|
||||
}
|
||||
Reference in New Issue
Block a user