Compare commits

...

95 Commits

Author SHA1 Message Date
github-actions[bot]
294a4635de chore(release): Update version to v1.4.354 2025-12-19 18:47:36 +00:00
Kayvan Sylvan
a70431eaa5 Merge pull request #1889 from ksylvan/kayvan/add-youtube-trabscription-to-swagger
docs: Add a YouTube transcript endpoint to the Swagger UI.
2025-12-19 10:44:47 -08:00
Changelog Bot
ac57c3d2b0 chore: incoming 1889 changelog entry 2025-12-19 10:42:38 -08:00
Kayvan Sylvan
5e4e4f4bf1 docs: Add YouTube transcript endpoint to Swagger UI.
- Add `/youtube/transcript` POST endpoint to Swagger docs
- Define `YouTubeRequest` schema with URL, language, timestamps fields
- Define `YouTubeResponse` schema with transcript and metadata fields
- Add API security requirement using ApiKeyAuth
- Document 200, 400, and 500 response codes
- Add godoc comments to YouTubeHandler struct methods
- Include example values for all request/response properties
2025-12-19 10:41:55 -08:00
github-actions[bot]
96225d4aea chore(release): Update version to v1.4.353 2025-12-19 16:21:50 +00:00
Kayvan Sylvan
adcdc0cf0b Merge pull request #1887 from bvandevliet/feat/yt-title-and-description
feat: correct video title and added description to yt transcript api response
2025-12-19 08:19:15 -08:00
Changelog Bot
e3f9b12fde chore: incoming 1887 changelog entry 2025-12-19 08:16:18 -08:00
Bob Vandevliet
7fa4c0a030 Updated API documentation. 2025-12-19 13:23:44 +01:00
Bob Vandevliet
8a3fa9337c feat: correct video title (instead of id) and added description to yt transcript api response 2025-12-19 13:14:12 +01:00
github-actions[bot]
26ac5f3bf9 chore(release): Update version to v1.4.352 2025-12-18 23:45:28 +00:00
Kayvan Sylvan
b4226da967 Merge pull request #1886 from ksylvan/kayvan/better-new-user-setup-experience
Enhanced Onboarding and Setup Experience
2025-12-18 15:42:59 -08:00
Changelog Bot
b2d24aa5c7 chore: incoming 1886 changelog entry 2025-12-18 15:03:22 -08:00
Kayvan Sylvan
9f79877524 User Experience: implement automated first-time setup and improved configuration validation
### CHANGES

- Add automated first-time setup for patterns and strategies.
- Implement configuration validation to warn about missing required components.
- Update setup menu to group plugins into required and optional.
- Provide helpful guidance when no patterns are found in listing.
- Expand localization support for setup and error messaging across languages.
- Enhance strategy manager to reload and count installed strategies.
- Improve pattern error handling with specific guidance for empty directories.
2025-12-18 14:48:50 -08:00
Kayvan Sylvan
829c182a9d chore: update README with new interactive Swagger available in v.1.4.350 2025-12-18 10:47:10 -08:00
github-actions[bot]
8475051a7c chore(release): Update version to v1.4.351 2025-12-18 18:37:22 +00:00
Kayvan Sylvan
9f3122ba35 Merge pull request #1882 from bvandevliet/fix/include-yt-dlp-in-docker-image
Added yt-dlp package to docker image.
2025-12-18 10:34:54 -08:00
Changelog Bot
f61db2cdce chore: incoming 1882 changelog entry 2025-12-18 10:30:23 -08:00
github-actions[bot]
8a2d5f82f1 chore(release): Update version to v1.4.350 2025-12-18 18:29:51 +00:00
Kayvan Sylvan
edaf1a0110 Merge pull request #1884 from ksylvan/kayvan/add-swagger-ui-to-server
Implement interactive Swagger API documentation and automated OpenAPI specification generation.
2025-12-18 10:27:06 -08:00
Changelog Bot
3a4468b970 chore: incoming 1884 changelog entry 2025-12-18 10:05:33 -08:00
Kayvan Sylvan
645190be3a feat: update REST API docs with new fields and examples
### CHANGES

- Add detailed prompt fields table with defaults
- Introduce chat options table with new parameters
- Include complete workflow examples for YouTube summary
- Provide alternative script and CLI comparison for flexibility
2025-12-18 07:30:34 -08:00
Kayvan Sylvan
c06c94f8b8 # CHANGES
- Add Swagger UI at `/swagger/index.html` endpoint
- Generate OpenAPI spec files (JSON and YAML)
- Document chat, patterns, and models endpoints
- Update contributing guide with Swagger annotation instructions
- Add swaggo dependencies to project
- Configure authentication bypass for Swagger documentation
- Add custom YAML handler for OpenAPI spec
- Update REST API documentation with Swagger links
- Add dictionary entries for new tools
2025-12-18 07:12:08 -08:00
Bob Vandevliet
d84bd6f989 Added yt-dlp package to docker image. 2025-12-18 11:16:39 +01:00
Kayvan Sylvan
7ab5e8956c Merge pull request #1880 from ksylvan/kayvan/rest-api-docs 2025-12-17 19:29:28 -08:00
Kayvan Sylvan
99b8b6a972 - Add README table-of-contents link for REST API.
- Document REST API server startup and capabilities.
- Add endpoint overview for chat, patterns, contexts.
- Describe sessions management and model listing endpoints.
- Provide curl examples for key API workflows.
- Explain Ollama compatibility mode endpoints and port.
2025-12-17 19:11:57 -08:00
github-actions[bot]
833b09081e chore(release): Update version to v1.4.349 2025-12-16 08:12:11 +00:00
Kayvan Sylvan
201d1fb791 Merge pull request #1877 from ksylvan/kayvan/modernize-part4-string-and-slice-syntax
modernize: update GitHub Actions and modernize Go code
2025-12-16 00:09:43 -08:00
Changelog Bot
6ecbd044e6 chore: incoming 1877 changelog entry 2025-12-16 00:06:39 -08:00
Kayvan Sylvan
fdadeae1e7 modernize: update GitHub Actions and modernize Go code with latest stdlib features
## CHANGES

- Upgrade GitHub Actions to latest versions (v6, v21)
- Add modernization check step in CI workflow
- Replace strings manipulation with `strings.CutPrefix` and `strings.CutSuffix`
- Replace manual loops with `slices.Contains` for validation
- Use `strings.SplitSeq` for iterator-based string splitting
- Replace `bytes.TrimPrefix` with `bytes.CutPrefix` for clarity
- Use `strings.Builder` instead of string concatenation
- Replace `fmt.Sprintf` with `fmt.Appendf` for efficiency
- Simplify padding calculation with `max` builtin
2025-12-15 23:55:37 -08:00
github-actions[bot]
57c3e36574 chore(release): Update version to v1.4.348 2025-12-16 07:34:45 +00:00
Kayvan Sylvan
1b98a8899f Merge pull request #1876 from ksylvan/kayvan/modernize-part3-typefor-and-range-loops
modernize Go code with TypeFor and range loops
2025-12-15 23:31:44 -08:00
Kayvan Sylvan
a4484d4e01 refactor: modernize Go code with TypeFor and range loops
- Replace reflect.TypeOf with TypeFor generic syntax
- Convert traditional for loops to range-based iterations
- Simplify reflection usage in CLI flag handling
- Update test loops to use range over integers
- Refactor string processing loops in template plugin
2025-12-15 23:29:41 -08:00
github-actions[bot]
005d43674f chore(release): Update version to v1.4.347 2025-12-16 06:51:40 +00:00
Kayvan Sylvan
3a69437790 Merge pull request #1875 from ksylvan/kayvan/modernize-part2-loops
modernize: update benchmarks to use b.Loop and refactor map copying
2025-12-15 22:48:59 -08:00
Changelog Bot
b057f52ca6 chore: incoming 1875 changelog entry 2025-12-15 22:46:45 -08:00
Kayvan Sylvan
dccdfbac8c test: update benchmarks to use b.Loop and refactor map copying
# CHANGES

- update benchmark loops to use cleaner `b.Loop()` syntax
- remove unnecessary `b.ResetTimer()` call in token benchmark
- use `maps.Copy` for merging variables in patterns handler
2025-12-15 22:40:55 -08:00
github-actions[bot]
98038707f1 chore(release): Update version to v1.4.346 2025-12-16 06:30:55 +00:00
Kayvan Sylvan
03b22a70f0 Merge pull request #1874 from ksylvan/kayvan/modernize-part1
refactor: replace interface{} with any across codebase
2025-12-15 22:28:15 -08:00
Kayvan Sylvan
66025d516c refactor: replace interface{} with any across codebase
- Part 1 of incorporating `modernize` tool into Fabric.
- Replace `interface{}` with `any` in slice type declarations
- Update map types from `map[string]interface{}` to `map[string]any`
- Change variadic function parameters to use `...any` instead of `...interface{}`
- Modernize JSON unmarshaling variables to `any` for consistency
- Update struct fields and method signatures to prefer `any` alias
- Ensure all type assertions and conversions use `any` throughout codebase
- Add PR guidelines in docs to encourage focused, reviewable changes
2025-12-15 22:25:18 -08:00
github-actions[bot]
32ef2b73c4 chore(release): Update version to v1.4.345 2025-12-15 06:03:18 +00:00
Kayvan Sylvan
656ca7ee28 Merge pull request #1870 from ksylvan/kayvan/update-web-ui-pdfjs-library
Web UI: upgrade pdfjs and add SSR-safe dynamic PDF worker init
2025-12-14 22:00:41 -08:00
Changelog Bot
0025466e4e chore: incoming 1870 changelog entry 2025-12-14 21:57:06 -08:00
Kayvan Sylvan
4c2b38ca53 feat: upgrade pdfjs and add SSR-safe dynamic PDF worker init
- Upgrade `pdfjs-dist` to v5 with new engine requirement
- Dynamically import PDF.js to avoid SSR import-time crashes
- Configure PDF worker via CDN using runtime PDF.js version
- Update PDF conversion pipeline to use lazy initialization
- Guard chat message localStorage persistence behind browser checks
- Reformat ChatService with consistent imports and typings
- Bump `patch-package` and refresh pnpm lock dependency graph
- Add `skeletonlabs` to VSCode spellcheck dictionary
2025-12-14 16:12:23 -08:00
github-actions[bot]
9c7ce4a974 chore(release): Update version to v1.4.344 2025-12-14 08:14:21 +00:00
Kayvan Sylvan
626c492c63 Merge pull request #1867 from jaredmontoya/update-flake
chore: update flake
2025-12-14 00:11:45 -08:00
Changelog Bot
71fb3fea7e chore: incoming 1867 changelog entry 2025-12-14 00:08:33 -08:00
Kayvan Sylvan
3bc1150da4 Merge branch 'main' into update-flake 2025-12-14 00:07:51 -08:00
github-actions[bot]
827e0aeca7 chore(release): Update version to v1.4.343 2025-12-14 08:05:48 +00:00
Kayvan Sylvan
0a1e01c4ab Merge pull request #1829 from danielmiessler/dependabot/npm_and_yarn/web/npm_and_yarn-3c67cbb9cd
chore(deps): bump js-yaml from 4.1.0 to 4.1.1 in /web in the npm_and_yarn group across 1 directory
2025-12-14 00:03:09 -08:00
Changelog Bot
6003bb2c86 chore: incoming 1829 changelog entry 2025-12-13 23:52:18 -08:00
dependabot[bot]
bb896b1064 chore(deps): bump js-yaml
Bumps the npm_and_yarn group with 1 update in the /web directory: [js-yaml](https://github.com/nodeca/js-yaml).


Updates `js-yaml` from 4.1.0 to 4.1.1
- [Changelog](https://github.com/nodeca/js-yaml/blob/master/CHANGELOG.md)
- [Commits](https://github.com/nodeca/js-yaml/compare/4.1.0...4.1.1)

---
updated-dependencies:
- dependency-name: js-yaml
  dependency-version: 4.1.1
  dependency-type: indirect
  dependency-group: npm_and_yarn
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-12-13 23:52:18 -08:00
jaredmontoya
d149c62a37 chore: update flake 2025-12-13 20:30:31 +01:00
github-actions[bot]
3d25fbc04c chore(release): Update version to v1.4.342 2025-12-13 08:11:50 +00:00
Kayvan Sylvan
4c822d2c59 Merge pull request #1866 from ksylvan/kayvan/errors-never-to-stdout
fix: write CLI and streaming errors to stderr
2025-12-13 00:09:09 -08:00
Changelog Bot
f1ffd6ee29 chore: incoming 1866 changelog entry 2025-12-13 00:07:08 -08:00
Kayvan Sylvan
deb59bdd21 fix: write CLI and streaming errors to stderr
## CHANGES
- Route CLI execution errors to standard error output
- Print Anthropic stream errors to stderr consistently
- Add os import to support stderr error writes
- Preserve help-output suppression and exit behavior
2025-12-13 00:02:44 -08:00
github-actions[bot]
2a1e8dcf12 chore(release): Update version to v1.4.341 2025-12-11 10:49:47 +00:00
Kayvan Sylvan
b6fd81dd16 Merge pull request #1860 from ksylvan/kayvan/fix-for-setup-reset-required-value-now-does-not-show-validation-error
fix: allow resetting required settings without validation errors
2025-12-11 18:47:16 +08:00
Kayvan Sylvan
5b723c9e92 fix: allow resetting required settings without validation errors
CHANGES
- update `Ask` to detect reset command and bypass validation
- refactor `OnAnswer` to support new `isReset` parameter logic
- invoke `ConfigureCustom` in `Setup` to avoid redundant re-validation
- add unit tests ensuring required fields can be reset
- add incoming 1860 changelog entry
2025-12-11 02:39:35 -08:00
github-actions[bot]
93f8978085 chore(release): Update version to v1.4.340 2025-12-08 00:36:16 +00:00
Kayvan Sylvan
4d91bf837f Merge pull request #1856 from ksylvan/kayvan/claude-haiku-4-5
Add support for new ClaudeHaiku 4.5 models
2025-12-08 08:33:51 +08:00
Changelog Bot
cb29a0d606 chore: incoming 1856 changelog entry 2025-12-08 08:30:17 +08:00
Kayvan Sylvan
b1eb7a82d9 feat: add support for new ClaudeHaiku models in client
### CHANGES

- Add `ModelClaudeHaiku4_5` to supported models
- Add `ModelClaudeHaiku4_5_20251001` to supported models
2025-12-08 08:21:18 +08:00
github-actions[bot]
bc8f5add00 chore(release): Update version to v1.4.339 2025-12-08 00:10:02 +00:00
Kayvan Sylvan
c3f874f985 Merge pull request #1855 from ksylvan/kayvan/ollama_image_handling
feat: add image attachment support for Ollama vision models
2025-12-08 08:07:33 +08:00
Changelog Bot
922df52d0c chore: incoming 1855 changelog entry 2025-12-08 08:00:59 +08:00
Kayvan Sylvan
4badfecadb feat: add multi-modal image support to Ollama client
## CHANGES

- Add base64 and io imports for image handling
- Store httpClient separately in Client struct for reuse
- Convert createChatRequest to return error for validation
- Implement convertMessage to handle multi-content chat messages
- Add loadImageBytes to fetch images from URLs
- Support base64 data URLs for inline images
- Handle HTTP image URLs with context propagation
- Replace debug print with proper debuglog usage
2025-12-08 07:48:36 +08:00
github-actions[bot]
83139a64d5 chore(release): Update version to v1.4.338 2025-12-04 13:34:00 +00:00
Kayvan Sylvan
78fd836532 Merge pull request #1852 from ksylvan/kayvan/add-abacus-provider-for-chatllm-models
Add Abacus vendor for ChatLLM models with static model list
2025-12-04 21:31:34 +08:00
Kayvan Sylvan
894459ddec feat: add static model support and register Abacus provider
CHANGES

- feat: detect modelsURL starting with 'static:' and route
- feat: implement getStaticModels returning curated Abacus model list
- feat: register Abacus provider with ModelsURL 'static:abacus'
- chore: add fmt import for error formatting in provider code
- test: extend provider tests to include Abacus existence
- chore: update .vscode settings add 'kimi' and 'qwen' contributors
2025-12-04 21:22:57 +08:00
github-actions[bot]
920c22c889 chore(release): Update version to v1.4.337 2025-12-04 04:21:35 +00:00
Kayvan Sylvan
a0f931feb0 Merge pull request #1851 from ksylvan/kayvan/add-z-ai-vendor-support
Add Z AI provider and glm model support
2025-12-04 12:19:13 +08:00
Kayvan Sylvan
4b080fd6dd feat: add Z AI provider and glm model support
- Add Z AI provider configuration to ProviderMap
- Include BaseURL for Z AI API endpoint
- Add test case for Z AI provider existence
- Add glm to OpenAI model prefixes list
- Reorder gpt-5 in model prefixes list
- Support new Z AI provider in OpenAI compatible plugins
2025-12-04 12:06:55 +08:00
github-actions[bot]
298abecb3f chore(release): Update version to v1.4.336 2025-12-01 11:37:19 +00:00
Kayvan Sylvan
e2d4aab775 Merge pull request #1848 from zeddy303/fix/localStorage-ssr-issue 2025-12-01 19:34:45 +08:00
Changelog Bot
17cac13584 chore: incoming 1848 changelog entry 2025-12-01 18:41:32 +08:00
zeddy303
e4a004cf88 Fix localStorage SSR error in favorites-store
Use SvelteKit's browser constant instead of typeof localStorage check
to properly handle server-side rendering. Prevents 'localStorage.getItem
is not a function' error when running dev server.
2025-11-29 13:06:54 -07:00
github-actions[bot]
fcb10feadd chore(release): Update version to v1.4.335 2025-11-28 02:17:17 +00:00
Kayvan Sylvan
9560537730 Merge pull request #1847 from ksylvan/kayvan/fix-ollama-model-raw-mode
Improve model name matching for NeedsRaw in Ollama plugin
2025-11-27 18:14:47 -08:00
Kayvan Sylvan
42fabab352 feat: improve model name matching in Ollama plugin
- Add "conceptmap" to VSCode dictionary settings
- Rename `ollamaPrefixes` variable to `ollamaSearchStrings`
- Replace `HasPrefix` with `Contains` for model matching
- Enable substring matching for Ollama model names
- chore: incoming 1847 changelog entry
2025-11-28 10:00:08 +08:00
Kayvan Sylvan
895ca1ad99 Merge branch 'danielmiessler:main' into main 2025-11-26 05:52:48 -08:00
Kayvan Sylvan
2ef7db8bb2 docs: Fix typo in README 2025-11-26 21:51:57 +08:00
github-actions[bot]
8491354a30 chore(release): Update version to v1.4.334 2025-11-26 13:40:22 +00:00
Kayvan Sylvan
1fd5b0d27b Merge pull request #1845 from ksylvan/kayvan/add-claude-opus-4-5-support
Add Claude Opus 4.5 Support
2025-11-26 05:38:02 -08:00
Kayvan Sylvan
7eb67ee82d chore: update Go dependencies and add new Claude Opus 4.5 model support
- Upgrade anthropic-sdk-go from v1.16.0 to v1.19.0
- Bump golang.org/x/text from v0.28.0 to v0.31.0
- Update golang.org/x/crypto from v0.41.0 to v0.45.0
- Upgrade golang.org/x/net from v0.43.0 to v0.47.0
- Bump golang.org/x/sync from v0.16.0 to v0.18.0
- Update golang.org/x/sys from v0.35.0 to v0.38.0
- Add Claude Opus 4.5 model variants to Anthropic client
- chore: incoming 1845 changelog entry
2025-11-26 21:34:54 +08:00
github-actions[bot]
e3df1e1c0a chore(release): Update version to v1.4.333 2025-11-25 22:49:42 +00:00
Kayvan Sylvan
6e939cfff4 Merge pull request #1844 from ksylvan/kayvan/concall-summary-pattern-followup
Correct directory name from `concall_summery` to `concall_summary`
2025-11-25 14:47:21 -08:00
Changelog Bot
9e2a35e150 chore: incoming 1844 changelog entry 2025-11-26 06:43:18 +08:00
Kayvan Sylvan
a3a1e616e7 fix: correct directory name from concall_summery to concall_summary
- Rename pattern directory to fix spelling error
- Add new pattern to explanations documentation
- Update suggest_pattern system with concall_summary references
- Include concall_summary in ANALYSIS category mappings
- Add concall_summary to BUSINESS category listings
- Append concall_summary to SUMMARIZE category references
- Update pattern descriptions JSON with new entry
- Generate pattern extracts for concall_summary functionality
- Add user documentation for earnings call analysis
- Include changelog entry for PR #1833
2025-11-26 06:31:32 +08:00
Kayvan Sylvan
98eddaf5e8 Merge pull request #1833 from junaid18183/main
Added concall_summery
2025-11-25 03:30:24 -08:00
github-actions[bot]
0ae20a8ccd chore(release): Update version to v1.4.332 2025-11-24 14:13:17 +00:00
Kayvan Sylvan
0fbc86be17 Merge pull request #1843 from ksylvan/kayvan/fix-vendor-listing-and-case-sensitivity
Implement case-insensitive vendor and model name matching
2025-11-24 06:10:45 -08:00
Changelog Bot
5b1a4ab306 chore: incoming 1843 changelog entry 2025-11-24 21:48:53 +08:00
Kayvan Sylvan
817e75853e fix: implement case-insensitive vendor and model name matching across the application
## CHANGES

- Add case-insensitive vendor lookup in VendorsManager
- Implement model name normalization in GetChatter method
- Add FilterByVendor method with case-insensitive matching
- Add FindModelNameCaseInsensitive helper for model queries
- Update group/item comparison to use case-insensitive checks
- Store vendors with lowercase keys internally
- Add comprehensive tests for case-insensitive functionality
- Fix vendor filtering for model listing command
2025-11-24 21:36:17 +08:00
Juned Memon
15c8a84b25 Added concall_summery 2025-11-17 15:53:25 +05:30
93 changed files with 5052 additions and 1545 deletions

View File

@@ -20,18 +20,22 @@ jobs:
contents: read
steps:
- name: Checkout code
uses: actions/checkout@v5
- name: Install Nix
uses: DeterminateSystems/nix-installer-action@main
uses: actions/checkout@v6
- name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version-file: ./go.mod
- name: Run tests
run: go test -v ./...
- name: Check for modernization opportunities
run: |
go run golang.org/x/tools/go/analysis/passes/modernize/cmd/modernize@latest ./...
- name: Install Nix
uses: DeterminateSystems/nix-installer-action@v21
- name: Check Formatting
run: nix flake check

View File

@@ -11,7 +11,7 @@ jobs:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
fetch-depth: 0
@@ -32,7 +32,7 @@ jobs:
- name: Upload Patterns Artifact
if: steps.check-changes.outputs.changes == 'true'
uses: actions/upload-artifact@v4
uses: actions/upload-artifact@v6
with:
name: patterns
path: patterns.zip

View File

@@ -15,12 +15,12 @@ jobs:
contents: read
steps:
- name: Checkout code
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version-file: ./go.mod
@@ -37,11 +37,11 @@ jobs:
contents: write
steps:
- name: Checkout code
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Set up Go
uses: actions/setup-go@v5
uses: actions/setup-go@v6
with:
go-version-file: ./go.mod
- name: Run GoReleaser

View File

@@ -24,17 +24,17 @@ concurrency:
jobs:
update-version:
if: >
${{ github.repository_owner == 'danielmiessler' }} &&
github.repository_owner == 'danielmiessler' &&
github.event_name == 'push' && github.ref == 'refs/heads/main'
runs-on: ubuntu-latest
steps:
- name: Checkout repository
uses: actions/checkout@v5
uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Install Nix
uses: DeterminateSystems/nix-installer-action@main
uses: DeterminateSystems/nix-installer-action@v21
- name: Set up Git
run: |

View File

@@ -7,7 +7,9 @@
"Anki",
"anthropics",
"Aoede",
"apikey",
"aplicar",
"Astley",
"atotto",
"Autonoe",
"azureml",
@@ -24,6 +26,7 @@
"compadd",
"compdef",
"compinit",
"conceptmap",
"creatordate",
"curcontext",
"custompatterns",
@@ -62,6 +65,7 @@
"gjson",
"GOARCH",
"GODEBUG",
"godoc",
"godotenv",
"GOEXPERIMENT",
"gofmt",
@@ -95,6 +99,7 @@
"joho",
"kballard",
"Keploy",
"kimi",
"Kore",
"ksylvan",
"Langdock",
@@ -150,6 +155,7 @@
"Pulcherrima",
"pycache",
"pyperclip",
"qwen",
"readystream",
"restapi",
"rmextension",
@@ -163,12 +169,14 @@
"sess",
"sgaunet",
"shellquote",
"skeletonlabs",
"SSEHTTP",
"storer",
"Streamlit",
"stretchr",
"subchunk",
"Sulafat",
"swaggo",
"synctest",
"talkpanel",
"Telos",
@@ -184,6 +192,7 @@
"updatepatterns",
"useb",
"USERPROFILE",
"varnames",
"videoid",
"webp",
"WEBVTT",

View File

@@ -1,5 +1,235 @@
# Changelog
## v1.4.354 (2025-12-19)
### PR [#1889](https://github.com/danielmiessler/Fabric/pull/1889) by [ksylvan](https://github.com/ksylvan): docs: Add a YouTube transcript endpoint to the Swagger UI
- Add `/youtube/transcript` POST endpoint to Swagger docs
- Define `YouTubeRequest` schema with URL, language, timestamps fields
- Define `YouTubeResponse` schema with transcript and metadata fields
- Add API security requirement using ApiKeyAuth
- Document 200, 400, and 500 response codes
## v1.4.353 (2025-12-19)
### PR [#1887](https://github.com/danielmiessler/Fabric/pull/1887) by [bvandevliet](https://github.com/bvandevliet): feat: correct video title and added description to yt transcript api response
- Feat: correct video title (instead of id) and added description to yt transcript api response
- Updated API documentation.
## v1.4.352 (2025-12-18)
### PR [#1886](https://github.com/danielmiessler/Fabric/pull/1886) by [ksylvan](https://github.com/ksylvan): Enhanced Onboarding and Setup Experience
- User Experience: implement automated first-time setup and improved configuration validation
- Add automated first-time setup for patterns and strategies
- Implement configuration validation to warn about missing required components
- Update setup menu to group plugins into required and optional
- Provide helpful guidance when no patterns are found in listing
### Direct commits
- Chore: update README with new interactive Swagger available in v.1.4.350
## v1.4.351 (2025-12-18)
### PR [#1882](https://github.com/danielmiessler/Fabric/pull/1882) by [bvandevliet](https://github.com/bvandevliet): Added yt-dlp package to docker image
- Added yt-dlp package to docker image.
## v1.4.350 (2025-12-18)
### PR [#1880](https://github.com/danielmiessler/Fabric/pull/1880) by [ksylvan](https://github.com/ksylvan): docs: add REST API server section and new endpoint reference
- Add README table-of-contents link for REST API
- Document REST API server startup and capabilities
- Add endpoint overview for chat, patterns, contexts
- Describe sessions management and model listing endpoints
- Provide curl examples for key API workflows
### PR [#1884](https://github.com/danielmiessler/Fabric/pull/1884) by [ksylvan](https://github.com/ksylvan): Implement interactive Swagger API documentation and automated OpenAPI specification generation
- Add Swagger UI at `/swagger/index.html` endpoint
- Generate OpenAPI spec files (JSON and YAML)
- Document chat, patterns, and models endpoints
- Update contributing guide with Swagger annotation instructions
- Configure authentication bypass for Swagger documentation
## v1.4.349 (2025-12-16)
### PR [#1877](https://github.com/danielmiessler/Fabric/pull/1877) by [ksylvan](https://github.com/ksylvan): modernize: update GitHub Actions and modernize Go code
- Modernize GitHub Actions and Go code with latest stdlib features
- Upgrade GitHub Actions to latest versions (v6, v21) and add modernization check step
- Replace strings manipulation with `strings.CutPrefix` and `strings.CutSuffix`
- Replace manual loops with `slices.Contains` for validation and use `strings.SplitSeq` for iterator-based splitting
- Replace `fmt.Sprintf` with `fmt.Appendf` for efficiency and simplify padding calculation with `max` builtin
## v1.4.348 (2025-12-16)
### PR [#1876](https://github.com/danielmiessler/Fabric/pull/1876) by [ksylvan](https://github.com/ksylvan): modernize Go code with TypeFor and range loops
- Replace reflect.TypeOf with TypeFor generic syntax for improved type handling
- Convert traditional for loops to range-based iterations for better code readability
- Simplify reflection usage in CLI flag handling to reduce complexity
- Update test loops to use range over integers for cleaner test code
- Refactor string processing loops in template plugin to use modern Go patterns
## v1.4.347 (2025-12-16)
### PR [#1875](https://github.com/danielmiessler/Fabric/pull/1875) by [ksylvan](https://github.com/ksylvan): modernize: update benchmarks to use b.Loop and refactor map copying
- Updated benchmark loops to use cleaner `b.Loop()` syntax
- Removed unnecessary `b.ResetTimer()` call in token benchmark
- Used `maps.Copy` for merging variables in patterns handler
## v1.4.346 (2025-12-16)
### PR [#1874](https://github.com/danielmiessler/Fabric/pull/1874) by [ksylvan](https://github.com/ksylvan): refactor: replace interface{} with any across codebase
- Part 1 of dealing with #1873 as pointed out by @philoserf
- Replace `interface{}` with `any` in slice type declarations throughout the codebase
- Update map types from `map[string]interface{}` to `map[string]any` for modern Go standards
- Change variadic function parameters to use `...any` instead of `...interface{}`
- Modernize JSON unmarshaling variables to use `any` for consistency
- Update struct fields and method signatures to prefer the `any` alias over legacy interface syntax
## v1.4.345 (2025-12-15)
### PR [#1870](https://github.com/danielmiessler/Fabric/pull/1870) by [ksylvan](https://github.com/ksylvan): Web UI: upgrade pdfjs and add SSR-safe dynamic PDF worker init
- Upgrade `pdfjs-dist` to v5 with new engine requirement
- Dynamically import PDF.js to avoid SSR import-time crashes
- Configure PDF worker via CDN using runtime PDF.js version
- Update PDF conversion pipeline to use lazy initialization
- Guard chat message localStorage persistence behind browser checks
## v1.4.344 (2025-12-14)
### PR [#1867](https://github.com/danielmiessler/Fabric/pull/1867) by [jaredmontoya](https://github.com/jaredmontoya): chore: update flake
- Chore: update flake
- Merge branch 'main' into update-flake
## v1.4.343 (2025-12-14)
### PR [#1829](https://github.com/danielmiessler/Fabric/pull/1829) by [dependabo](https://github.com/apps/dependabot): chore(deps): bump js-yaml from 4.1.0 to 4.1.1 in /web in the npm_and_yarn group across 1 directory
- Updated js-yaml dependency from version 4.1.0 to 4.1.1 in the /web directory
## v1.4.342 (2025-12-13)
### PR [#1866](https://github.com/danielmiessler/Fabric/pull/1866) by [ksylvan](https://github.com/ksylvan): fix: write CLI and streaming errors to stderr
- Fix: write CLI and streaming errors to stderr
- Route CLI execution errors to standard error output
- Print Anthropic stream errors to stderr consistently
- Add os import to support stderr error writes
- Preserve help-output suppression and exit behavior
## v1.4.341 (2025-12-10)
### PR [#1860](https://github.com/danielmiessler/Fabric/pull/1860) by [ksylvan](https://github.com/ksylvan): fix: allow resetting required settings without validation errors
- Fix: allow resetting required settings without validation errors
- Update `Ask` to detect reset command and bypass validation
- Refactor `OnAnswer` to support new `isReset` parameter logic
- Invoke `ConfigureCustom` in `Setup` to avoid redundant re-validation
- Add unit tests ensuring required fields can be reset
## v1.4.340 (2025-12-08)
### PR [#1856](https://github.com/danielmiessler/Fabric/pull/1856) by [ksylvan](https://github.com/ksylvan): Add support for new ClaudeHaiku 4.5 models
- Add support for new ClaudeHaiku models in client
- Add `ModelClaudeHaiku4_5` to supported models
- Add `ModelClaudeHaiku4_5_20251001` to supported models
## v1.4.339 (2025-12-08)
### PR [#1855](https://github.com/danielmiessler/Fabric/pull/1855) by [ksylvan](https://github.com/ksylvan): feat: add image attachment support for Ollama vision models
- Add multi-modal image support to Ollama client
- Implement convertMessage to handle multi-content chat messages
- Add loadImageBytes to fetch images from URLs
- Support base64 data URLs for inline images
- Handle HTTP image URLs with context propagation
## v1.4.338 (2025-12-04)
### PR [#1852](https://github.com/danielmiessler/Fabric/pull/1852) by [ksylvan](https://github.com/ksylvan): Add Abacus vendor for ChatLLM models with static model list
- Add static model support and register Abacus provider
- Detect modelsURL starting with 'static:' and route appropriately
- Implement getStaticModels returning curated Abacus model list
- Register Abacus provider with ModelsURL 'static:abacus'
- Extend provider tests to include Abacus existence
## v1.4.337 (2025-12-04)
### PR [#1851](https://github.com/danielmiessler/Fabric/pull/1851) by [ksylvan](https://github.com/ksylvan): Add Z AI provider and glm model support
- Add Z AI provider configuration to ProviderMap
- Include BaseURL for Z AI API endpoint
- Add test case for Z AI provider existence
- Add glm to OpenAI model prefixes list
- Support new Z AI provider in OpenAI compatible plugins
## v1.4.336 (2025-12-01)
### PR [#1848](https://github.com/danielmiessler/Fabric/pull/1848) by [zeddy303](https://github.com/zeddy303): Fix localStorage SSR error in favorites-store
- Fix localStorage SSR error in favorites-store by using SvelteKit's browser constant instead of typeof localStorage check to properly handle server-side rendering and prevent 'localStorage.getItem is not a function' error when running dev server
## v1.4.335 (2025-11-28)
### PR [#1847](https://github.com/danielmiessler/Fabric/pull/1847) by [ksylvan](https://github.com/ksylvan): Improve model name matching for NeedsRaw in Ollama plugin
- Improved model name matching in Ollama plugin by replacing prefix-based matching with substring matching
- Enhanced NeedsRaw functionality to support more flexible model name detection
- Renamed `ollamaPrefixes` variable to `ollamaSearchStrings` for better code clarity
- Replaced `HasPrefix` function with `Contains` for more comprehensive model matching
- Added "conceptmap" to VSCode dictionary settings
### Direct commits
- Merge branch 'danielmiessler:main' into main
- Docs: Fix typo in README
## v1.4.334 (2025-11-26)
### PR [#1845](https://github.com/danielmiessler/Fabric/pull/1845) by [ksylvan](https://github.com/ksylvan): Add Claude Opus 4.5 Support
- Add Claude Opus 4.5 model variants to Anthropic client
- Upgrade anthropic-sdk-go from v1.16.0 to v1.19.0
- Update golang.org/x/crypto from v0.41.0 to v0.45.0
- Upgrade golang.org/x/net from v0.43.0 to v0.47.0
- Bump golang.org/x/text from v0.28.0 to v0.31.0
## v1.4.333 (2025-11-25)
### PR [#1833](https://github.com/danielmiessler/Fabric/pull/1833) by [junaid18183](https://github.com/junaid18183): Added concall_summary
- Added concall_summery pattern to extract strategic insights from earnings transcripts for investors.
### PR [#1844](https://github.com/danielmiessler/Fabric/pull/1844) by [ksylvan](https://github.com/ksylvan): Correct directory name from `concall_summery` to `concall_summary`
- Fix: correct directory name from `concall_summery` to `concall_summary`
- Rename pattern directory to fix spelling error
- Update suggest_pattern system with concall_summary references
- Add concall_summary to BUSINESS and SUMMARIZE category listings
- Add user documentation for earnings call analysis
## v1.4.332 (2025-11-24)
### PR [#1843](https://github.com/danielmiessler/Fabric/pull/1843) by [ksylvan](https://github.com/ksylvan): Implement case-insensitive vendor and model name matching
- Fix: implement case-insensitive vendor and model name matching across the application
- Add case-insensitive vendor lookup in VendorsManager
- Implement model name normalization in GetChatter method
- Add FilterByVendor method with case-insensitive matching
- Add FindModelNameCaseInsensitive helper for model queries
## v1.4.331 (2025-11-22)
### PR [#1839](https://github.com/danielmiessler/Fabric/pull/1839) by [ksylvan](https://github.com/ksylvan): Add GitHub Models Provider and Refactor Fetching Fallback Logic

View File

@@ -38,6 +38,7 @@
[Philosophy](#philosophy) •
[Installation](#installation) •
[Usage](#usage) •
[REST API](#rest-api-server) •
[Examples](#examples) •
[Just Use the Patterns](#just-use-the-patterns) •
[Custom Patterns](#custom-patterns) •
@@ -73,6 +74,11 @@ Below are the **new features and capabilities** we've added (newest first):
### Recent Major Features
- [v1.4.350](https://github.com/danielmiessler/fabric/releases/tag/v1.4.350) (Dec 18, 2025) — **Interactive API Documentation**: Adds Swagger/OpenAPI UI at `/swagger/index.html` with comprehensive REST API documentation, enhanced developer guides, and improved endpoint discoverability for easier integration.
- [v1.4.338](https://github.com/danielmiessler/fabric/releases/tag/v1.4.338) (Dec 4, 2025) — Add Abacus vendor support for Chat-LLM
models (see [RouteLLM APIs](https://abacus.ai/app/route-llm-apis)).
- [v1.4.337](https://github.com/danielmiessler/fabric/releases/tag/v1.4.337) (Dec 4, 2025) — Add "Z AI" vendor support. See the [Z AI overview](https://docs.z.ai/guides/overview/overview) page for more details.
- [v1.4.334](https://github.com/danielmiessler/fabric/releases/tag/v1.4.334) (Nov 26, 2025) — **Claude Opus 4.5**: Updates the Anthropic SDK to the latest and adds the new [Claude Opus 4.5](https://www.anthropic.com/news/claude-opus-4-5) to the available models.
- [v1.4.331](https://github.com/danielmiessler/fabric/releases/tag/v1.4.331) (Nov 23, 2025) — **Support for GitHub Models**: Adds support for using GitHub Models.
- [v1.4.322](https://github.com/danielmiessler/fabric/releases/tag/v1.4.322) (Nov 5, 2025) — **Interactive HTML Concept Maps and Claude Sonnet 4.5**: Adds `create_conceptmap` pattern for visual knowledge representation using Vis.js, introduces WELLNESS category with psychological analysis patterns, and upgrades to Claude Sonnet 4.5
- [v1.4.317](https://github.com/danielmiessler/fabric/releases/tag/v1.4.317) (Sep 21, 2025) — **Portuguese Language Variants**: Adds BCP 47 locale normalization with support for Brazilian Portuguese (pt-BR) and European Portuguese (pt-PT) with intelligent fallback chains
@@ -166,6 +172,7 @@ Keep in mind that many of these were recorded when Fabric was Python-based, so r
- [Usage](#usage)
- [Debug Levels](#debug-levels)
- [Extensions](#extensions)
- [REST API Server](#rest-api-server)
- [Our approach to prompting](#our-approach-to-prompting)
- [Examples](#examples)
- [Just use the Patterns](#just-use-the-patterns)
@@ -295,7 +302,7 @@ docker run --rm -it -v $HOME/.fabric-config:/root/.config/fabric kayvan/fabric:l
# Use Fabric with your patterns
docker run --rm -it -v $HOME/.fabric-config:/root/.config/fabric kayvan/fabric:latest -p summarize
# Run the REST API server
# Run the REST API server (see REST API Server section)
docker run --rm -it -p 8080:8080 -v $HOME/.fabric-config:/root/.config/fabric kayvan/fabric:latest --serve
```
@@ -717,6 +724,25 @@ Fabric supports extensions that can be called within patterns. See the [Extensio
**Important:** Extensions only work within pattern files, not via direct stdin. See the guide for details and examples.
## REST API Server
Fabric includes a built-in REST API server that exposes all core functionality over HTTP. Start the server with:
```bash
fabric --serve
```
The server provides endpoints for:
- Chat completions with streaming responses
- Pattern management (create, read, update, delete)
- Context and session management
- Model and vendor listing
- YouTube transcript extraction
- Configuration management
For complete endpoint documentation, authentication setup, and usage examples, see [REST API Documentation](docs/rest-api.md).
## Our approach to prompting
Fabric _Patterns_ are different than most prompts you'll see.

View File

@@ -109,11 +109,11 @@ func ScanDirectory(rootDir string, maxDepth int, instructions string, ignoreList
}
// Create final data structure
var data []interface{}
var data []any
data = append(data, rootItem)
// Add report
reportItem := map[string]interface{}{
reportItem := map[string]any{
"type": "report",
"directories": dirCount,
"files": fileCount,
@@ -121,7 +121,7 @@ func ScanDirectory(rootDir string, maxDepth int, instructions string, ignoreList
data = append(data, reportItem)
// Add instructions
instructionsItem := map[string]interface{}{
instructionsItem := map[string]any{
"type": "instructions",
"name": "code_change_instructions",
"details": instructions,

View File

@@ -12,7 +12,7 @@ import (
func main() {
err := cli.Cli(version)
if err != nil && !flags.WroteHelp(err) {
fmt.Printf("%s\n", err)
fmt.Fprintf(os.Stderr, "%s\n", err)
os.Exit(1)
}
}

View File

@@ -1,3 +1,3 @@
package main
var version = "v1.4.331"
var version = "v1.4.354"

Binary file not shown.

View File

@@ -574,8 +574,8 @@ func (g *Generator) extractChanges(pr *github.PR) []string {
}
if len(changes) == 0 && pr.Body != "" {
lines := strings.Split(pr.Body, "\n")
for _, line := range lines {
lines := strings.SplitSeq(pr.Body, "\n")
for line := range lines {
line = strings.TrimSpace(line)
if strings.HasPrefix(line, "- ") || strings.HasPrefix(line, "* ") {
change := strings.TrimPrefix(strings.TrimPrefix(line, "- "), "* ")

View File

@@ -159,7 +159,7 @@ func (g *Generator) CreateNewChangelogEntry(version string) error {
for _, file := range files {
// Extract PR number from filename (e.g., "1640.txt" -> 1640)
filename := filepath.Base(file)
if prNumStr := strings.TrimSuffix(filename, ".txt"); prNumStr != filename {
if prNumStr, ok := strings.CutSuffix(filename, ".txt"); ok {
if prNum, err := strconv.Atoi(prNumStr); err == nil {
processedPRs[prNum] = true
prNumbers = append(prNumbers, prNum)

View File

@@ -333,7 +333,7 @@ func (c *Client) FetchAllMergedPRsGraphQL(since time.Time) ([]*PR, error) {
for {
// Prepare variables
variables := map[string]interface{}{
variables := map[string]any{
"owner": graphql.String(c.owner),
"repo": graphql.String(c.repo),
"after": (*graphql.String)(after),

View File

@@ -0,0 +1,84 @@
# IDENTITY and PURPOSE
You are an equity research analyst specializing in earnings and conference call analysis. Your role involves carefully examining transcripts to extract actionable insights that can inform investment decisions. You need to focus on several key areas, including management commentary, analyst questions, financial and operational insights, risks and red flags, hidden signals, and an executive summary. Your task is to distill complex information into clear, concise bullet points, capturing strategic themes, growth drivers, and potential concerns. It is crucial to interpret the tone, identify contradictions, and highlight any subtle cues that may indicate future strategic shifts or risks.
Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
# STEPS
* Analyze the transcript to extract management commentary, focusing on strategic themes, growth drivers, margin commentary, guidance, tone analysis, and any contradictions or vague areas.
* Extract a summary of the content in exactly **25 words**, including who is presenting and the content being discussed; place this under a **SUMMARY** section.
* For each analyst's question, determine the underlying concern, summarize managements exact answer, evaluate if the answers address the question fully, and identify anything the management avoided or deflected.
* Gather financial and operational insights, including commentary on demand, pricing, capacity, market share, cost inflation, raw material trends, and supply-chain issues.
* Identify risks and red flags by noting any negative commentary, early warning signs, unusual wording, delayed responses, repeated disclaimers, and areas where management seemed less confident.
* Detect hidden signals such as forward-looking hints, unasked but important questions, and subtle cues about strategy shifts or stress.
* Create an executive summary in bullet points, listing the 10 most important takeaways, 3 surprises, and 3 things to track in the next quarter.
# OUTPUT STRUCTURE
* MANAGEMENT COMMENTARY
* Key strategic themes
* Growth drivers discussed
* Margin commentary
* Guidance (explicit + implicit)
* Tone analysis (positive/neutral/negative)
* Any contradictions or vague areas
* ANALYST QUESTIONS (Q&A)
* For each analyst (use bullets, one analyst per bullet-group):
* Underlying concern (what the question REALLY asked)
* Managements exact answer (concise)
* Answer completeness (Yes/No — short explanation)
* Items management avoided or deflected
* FINANCIAL & OPERATIONAL INSIGHTS
* Demand, pricing, capacity, market share commentary
* Cost inflation, raw material trends, supply-chain issues
* Segment-wise performance and commentary (if applicable)
* RISKS & RED FLAGS
* Negative commentary or early-warning signs
* Unusual wording, delayed responses, repeated disclaimers
* Areas where management was less confident
* HIDDEN SIGNALS
* Forward-looking hints and tone shifts
* Important topics not asked by analysts but relevant
* Subtle cues of strategy change, stress, or opportunity
* EXECUTIVE SUMMARY
* 10 most important takeaways (bullet points)
* 3 surprises (bullet points)
* 3 things to track next quarter (bullet points)
* SUMMARY (exactly 25 words)
* A single 25-word sentence summarizing who presented and what was discussed
# OUTPUT INSTRUCTIONS
* Only output Markdown.
* Provide everything in clear, crisp bullet points.
* Use bulleted lists only; do not use numbered lists.
* Begin the output with the **SUMMARY** (exactly 25 words), then the sections in the order shown under **OUTPUT STRUCTURE**.
* For **ANALYST QUESTIONS (Q&A)**, keep each analysts Q&A grouped and separated by a blank line for readability.
* For **EXECUTIVE SUMMARY**, present the 10 takeaways first, then the 3 surprises, then the 3 things to track.
* Keep each bullet concise — prefer single-sentence bullets.
* Do not include warnings, meta-comments, or process notes in the final output.
* Do not repeat ideas, insights, quotes, habits, facts, or references across bullets.
* When interpreting tone or identifying a hidden signal, be explicit about the textual clue supporting that interpretation (briefly, within the same bullet).
* If any numeric figure or explicit guidance is cited in the transcript, reproduce it verbatim in the relevant bullet and mark it as **(quoted)**.
* If information is missing or management declined to answer, state that clearly within the relevant bullet.
* Ensure fidelity: do not invent facts not in the transcript. If you infer, label it as an inference.
* Ensure you follow ALL these instructions when creating your output.
# INPUT
INPUT:

View File

@@ -46,188 +46,189 @@
42. **clean_text**: Fix broken or malformatted text by correcting line breaks, punctuation, capitalization, and paragraphs without altering content or spelling.
43. **coding_master**: Explain a coding concept to a beginner, providing examples, and formatting code in markdown with specific output sections like ideas, recommendations, facts, and insights.
44. **compare_and_contrast**: Compare and contrast a list of items in a markdown table, with items on the left and topics on top.
45. **convert_to_markdown**: Convert content to clean, complete Markdown format, preserving all original structure, formatting, links, and code blocks without alterations.
46. **create_5_sentence_summary**: Create concise summaries or answers to input at 5 different levels of depth, from 5 words to 1 word.
47. **create_academic_paper**: Generate a high-quality academic paper in LaTeX format with clear concepts, structured content, and a professional layout.
48. **create_ai_jobs_analysis**: Analyze job categories' susceptibility to automation, identify resilient roles, and provide strategies for personal adaptation to AI-driven changes in the workforce.
49. **create_aphorisms**: Find and generate a list of brief, witty statements.
50. **create_art_prompt**: Generates a detailed, compelling visual description of a concept, including stylistic references and direct AI instructions for creating art.
51. **create_better_frame**: Identifies and analyzes different frames of interpreting reality, emphasizing the power of positive, productive lenses in shaping outcomes.
52. **create_coding_feature**: Generates secure and composable code features using modern technology and best practices from project specifications.
53. **create_coding_project**: Generate wireframes and starter code for any coding ideas that you have.
54. **create_command**: Helps determine the correct parameters and switches for penetration testing tools based on a brief description of the objective.
55. **create_conceptmap**: Transforms unstructured text or markdown content into an interactive HTML concept map using Vis.js by extracting key concepts and their logical relationships.
56. **create_cyber_summary**: Summarizes cybersecurity threats, vulnerabilities, incidents, and malware with a 25-word summary and categorized bullet points, after thoroughly analyzing and mapping the provided input.
57. **create_design_document**: Creates a detailed design document for a system using the C4 model, addressing business and security postures, and including a system context diagram.
58. **create_diy**: Creates structured "Do It Yourself" tutorial patterns by analyzing prompts, organizing requirements, and providing step-by-step instructions in Markdown format.
59. **create_excalidraw_visualization**: Creates complex Excalidraw diagrams to visualize relationships between concepts and ideas in structured format.
60. **create_flash_cards**: Creates flashcards for key concepts, definitions, and terms with question-answer format for educational purposes.
61. **create_formal_email**: Crafts professional, clear, and respectful emails by analyzing context, tone, and purpose, ensuring proper structure and formatting.
62. **create_git_diff_commit**: Generates Git commands and commit messages for reflecting changes in a repository, using conventional commits and providing concise shell commands for updates.
63. **create_graph_from_input**: Generates a CSV file with progress-over-time data for a security program, focusing on relevant metrics and KPIs.
64. **create_hormozi_offer**: Creates a customized business offer based on principles from Alex Hormozi's book, "$100M Offers."
65. **create_idea_compass**: Organizes and structures ideas by exploring their definition, evidence, sources, and related themes or consequences.
66. **create_investigation_visualization**: Creates detailed Graphviz visualizations of complex input, highlighting key aspects and providing clear, well-annotated diagrams for investigative analysis and conclusions.
67. **create_keynote**: Creates TED-style keynote presentations with a clear narrative, structured slides, and speaker notes, emphasizing impactful takeaways and cohesive flow.
68. **create_loe_document**: Creates detailed Level of Effort documents for estimating work effort, resources, and costs for tasks or projects.
69. **create_logo**: Creates simple, minimalist company logos without text, generating AI prompts for vector graphic logos based on input.
70. **create_markmap_visualization**: Transforms complex ideas into clear visualizations using MarkMap syntax, simplifying concepts into diagrams with relationships, boxes, arrows, and labels.
71. **create_mermaid_visualization**: Creates detailed, standalone visualizations of concepts using Mermaid (Markdown) syntax, ensuring clarity and coherence in diagrams.
72. **create_mermaid_visualization_for_github**: Creates standalone, detailed visualizations using Mermaid (Markdown) syntax to effectively explain complex concepts, ensuring clarity and precision.
73. **create_micro_summary**: Summarizes content into a concise, 20-word summary with main points and takeaways, formatted in Markdown.
74. **create_mnemonic_phrases**: Creates memorable mnemonic sentences from given words to aid in memory retention and learning.
75. **create_network_threat_landscape**: Analyzes open ports and services from a network scan and generates a comprehensive, insightful, and detailed security threat report in Markdown.
76. **create_newsletter_entry**: Condenses provided article text into a concise, objective, newsletter-style summary with a title in the style of Frontend Weekly.
77. **create_npc**: Generates a detailed D&D 5E NPC, including background, flaws, stats, appearance, personality, goals, and more in Markdown format.
78. **create_pattern**: Extracts, organizes, and formats LLM/AI prompts into structured sections, detailing the AI's role, instructions, output format, and any provided examples for clarity and accuracy.
79. **create_prd**: Creates a precise Product Requirements Document (PRD) in Markdown based on input.
80. **create_prediction_block**: Extracts and formats predictions from input into a structured Markdown block for a blog post.
81. **create_quiz**: Creates a three-phase reading plan based on an author or topic to help the user become significantly knowledgeable, including core, extended, and supplementary readings.
82. **create_reading_plan**: Generates review questions based on learning objectives from the input, adapted to the specified student level, and outputs them in a clear markdown format.
83. **create_recursive_outline**: Breaks down complex tasks or projects into manageable, hierarchical components with recursive outlining for clarity and simplicity.
84. **create_report_finding**: Creates a detailed, structured security finding report in markdown, including sections on Description, Risk, Recommendations, References, One-Sentence-Summary, and Quotes.
85. **create_rpg_summary**: Summarizes an in-person RPG session with key events, combat details, player stats, and role-playing highlights in a structured format.
86. **create_security_update**: Creates concise security updates for newsletters, covering stories, threats, advisories, vulnerabilities, and a summary of key issues.
87. **create_show_intro**: Creates compelling short intros for podcasts, summarizing key topics and themes discussed in the episode.
88. **create_sigma_rules**: Extracts Tactics, Techniques, and Procedures (TTPs) from security news and converts them into Sigma detection rules for host-based detections.
89. **create_story_about_people_interaction**: Analyze two personas, compare their dynamics, and craft a realistic, character-driven story from those insights.
90. **create_story_about_person**: Creates compelling, realistic short stories based on psychological profiles, showing how characters navigate everyday problems using strategies consistent with their personality traits.
91. **create_story_explanation**: Summarizes complex content in a clear, approachable story format that makes the concepts easy to understand.
92. **create_stride_threat_model**: Create a STRIDE-based threat model for a system design, identifying assets, trust boundaries, data flows, and prioritizing threats with mitigations.
93. **create_summary**: Summarizes content into a 20-word sentence, 10 main points (16 words max), and 5 key takeaways in Markdown format.
94. **create_tags**: Identifies at least 5 tags from text content for mind mapping tools, including authors and existing tags if present.
95. **create_threat_scenarios**: Identifies likely attack methods for any system by providing a narrative-based threat model, balancing risk and opportunity.
96. **create_ttrc_graph**: Creates a CSV file showing the progress of Time to Remediate Critical Vulnerabilities over time using given data.
97. **create_ttrc_narrative**: Creates a persuasive narrative highlighting progress in reducing the Time to Remediate Critical Vulnerabilities metric over time.
98. **create_upgrade_pack**: Extracts world model and task algorithm updates from content, providing beliefs about how the world works and task performance.
99. **create_user_story**: Writes concise and clear technical user stories for new features in complex software programs, formatted for all stakeholders.
100. **create_video_chapters**: Extracts interesting topics and timestamps from a transcript, providing concise summaries of key moments.
101. **create_visualization**: Transforms complex ideas into visualizations using intricate ASCII art, simplifying concepts where necessary.
102. **dialog_with_socrates**: Engages in deep, meaningful dialogues to explore and challenge beliefs using the Socratic method.
103. **enrich_blog_post**: Enhances Markdown blog files by applying instructions to improve structure, visuals, and readability for HTML rendering.
104. **explain_code**: Explains code, security tool output, configuration text, and answers questions based on the provided input.
105. **explain_docs**: Improves and restructures tool documentation into clear, concise instructions, including overviews, usage, use cases, and key features.
106. **explain_math**: Helps you understand mathematical concepts in a clear and engaging way.
107. **explain_project**: Summarizes project documentation into clear, concise sections covering the project, problem, solution, installation, usage, and examples.
108. **explain_terms**: Produces a glossary of advanced terms from content, providing a definition, analogy, and explanation of why each term matters.
109. **export_data_as_csv**: Extracts and outputs all data structures from the input in properly formatted CSV data.
110. **extract_algorithm_update_recommendations**: Extracts concise, practical algorithm update recommendations from the input and outputs them in a bulleted list.
111. **extract_article_wisdom**: Extracts surprising, insightful, and interesting information from content, categorizing it into sections like summary, ideas, quotes, facts, references, and recommendations.
112. **extract_book_ideas**: Extracts and outputs 50 to 100 of the most surprising, insightful, and interesting ideas from a book's content.
113. **extract_book_recommendations**: Extracts and outputs 50 to 100 practical, actionable recommendations from a book's content.
114. **extract_business_ideas**: Extracts top business ideas from content and elaborates on the best 10 with unique differentiators.
115. **extract_characters**: Identify all characters (human and non-human), resolve their aliases and pronouns into canonical names, and produce detailed descriptions of each character's role, motivations, and interactions ranked by narrative importance.
116. **extract_controversial_ideas**: Extracts and outputs controversial statements and supporting quotes from the input in a structured Markdown list.
117. **extract_core_message**: Extracts and outputs a clear, concise sentence that articulates the core message of a given text or body of work.
118. **extract_ctf_writeup**: Extracts a short writeup from a warstory-like text about a cyber security engagement.
119. **extract_domains**: Extracts domains and URLs from content to identify sources used for articles, newsletters, and other publications.
120. **extract_extraordinary_claims**: Extracts and outputs a list of extraordinary claims from conversations, focusing on scientifically disputed or false statements.
121. **extract_ideas**: Extracts and outputs all the key ideas from input, presented as 15-word bullet points in Markdown.
122. **extract_insights**: Extracts and outputs the most powerful and insightful ideas from text, formatted as 16-word bullet points in the INSIGHTS section, also IDEAS section.
123. **extract_insights_dm**: Extracts and outputs all valuable insights and a concise summary of the content, including key points and topics discussed.
124. **extract_instructions**: Extracts clear, actionable step-by-step instructions and main objectives from instructional video transcripts, organizing them into a concise list.
125. **extract_jokes**: Extracts jokes from text content, presenting each joke with its punchline in separate bullet points.
126. **extract_latest_video**: Extracts the latest video URL from a YouTube RSS feed and outputs the URL only.
127. **extract_main_activities**: Extracts key events and activities from transcripts or logs, providing a summary of what happened.
128. **extract_main_idea**: Extracts the main idea and key recommendation from the input, summarizing them in 15-word sentences.
129. **extract_mcp_servers**: Identify and summarize Model Context Protocol (MCP) servers referenced in the input along with their key details.
130. **extract_most_redeeming_thing**: Extracts the most redeeming aspect from an input, summarizing it in a single 15-word sentence.
131. **extract_patterns**: Extracts and analyzes recurring, surprising, and insightful patterns from input, providing detailed analysis and advice for builders.
132. **extract_poc**: Extracts proof of concept URLs and validation methods from security reports, providing the URL and command to run.
133. **extract_predictions**: Extracts predictions from input, including specific details such as date, confidence level, and verification method.
134. **extract_primary_problem**: Extracts the primary problem with the world as presented in a given text or body of work.
135. **extract_primary_solution**: Extracts the primary solution for the world as presented in a given text or body of work.
136. **extract_product_features**: Extracts and outputs a list of product features from the provided input in a bulleted format.
137. **extract_questions**: Extracts and outputs all questions asked by the interviewer in a conversation or interview.
138. **extract_recipe**: Extracts and outputs a recipe with a short meal description, ingredients with measurements, and preparation steps.
139. **extract_recommendations**: Extracts and outputs concise, practical recommendations from a given piece of content in a bulleted list.
140. **extract_references**: Extracts and outputs a bulleted list of references to art, stories, books, literature, and other sources from content.
141. **extract_skills**: Extracts and classifies skills from a job description into a table, separating each skill and classifying it as either hard or soft.
142. **extract_song_meaning**: Analyzes a song to provide a summary of its meaning, supported by detailed evidence from lyrics, artist commentary, and fan analysis.
143. **extract_sponsors**: Extracts and lists official sponsors and potential sponsors from a provided transcript.
144. **extract_videoid**: Extracts and outputs the video ID from any given URL.
145. **extract_wisdom**: Extracts surprising, insightful, and interesting information from text on topics like human flourishing, AI, learning, and more.
146. **extract_wisdom_agents**: Extracts valuable insights, ideas, quotes, and references from content, emphasizing topics like human flourishing, AI, learning, and technology.
147. **extract_wisdom_dm**: Extracts all valuable, insightful, and thought-provoking information from content, focusing on topics like human flourishing, AI, learning, and technology.
148. **extract_wisdom_nometa**: Extracts insights, ideas, quotes, habits, facts, references, and recommendations from content, focusing on human flourishing, AI, technology, and related topics.
149. **find_female_life_partner**: Analyzes criteria for finding a female life partner and provides clear, direct, and poetic descriptions.
150. **find_hidden_message**: Extracts overt and hidden political messages, justifications, audience actions, and a cynical analysis from content.
151. **find_logical_fallacies**: Identifies and analyzes fallacies in arguments, classifying them as formal or informal with detailed reasoning.
152. **fix_typos**: Proofreads and corrects typos, spelling, grammar, and punctuation errors in text.
153. **generate_code_rules**: Compile best-practice coding rules and guardrails for AI-assisted development workflows from the provided content.
154. **get_wow_per_minute**: Determines the wow-factor of content per minute based on surprise, novelty, insight, value, and wisdom, measuring how rewarding the content is for the viewer.
155. **heal_person**: Develops a comprehensive plan for spiritual and mental healing based on psychological profiles, providing personalized recommendations for mental health improvement and overall life enhancement.
156. **humanize**: Rewrites AI-generated text to sound natural, conversational, and easy to understand, maintaining clarity and simplicity.
157. **identify_dsrp_distinctions**: Encourages creative, systems-based thinking by exploring distinctions, boundaries, and their implications, drawing on insights from prominent systems thinkers.
158. **identify_dsrp_perspectives**: Explores the concept of distinctions in systems thinking, focusing on how boundaries define ideas, influence understanding, and reveal or obscure insights.
159. **identify_dsrp_relationships**: Encourages exploration of connections, distinctions, and boundaries between ideas, inspired by systems thinkers to reveal new insights and patterns in complex systems.
160. **identify_dsrp_systems**: Encourages organizing ideas into systems of parts and wholes, inspired by systems thinkers to explore relationships and how changes in organization impact meaning and understanding.
161. **identify_job_stories**: Identifies key job stories or requirements for roles.
162. **improve_academic_writing**: Refines text into clear, concise academic language while improving grammar, coherence, and clarity, with a list of changes.
163. **improve_prompt**: Improves an LLM/AI prompt by applying expert prompt writing strategies for better results and clarity.
164. **improve_report_finding**: Improves a penetration test security finding by providing detailed descriptions, risks, recommendations, references, quotes, and a concise summary in markdown format.
165. **improve_writing**: Refines text by correcting grammar, enhancing style, improving clarity, and maintaining the original meaning. skills.
166. **judge_output**: Evaluates Honeycomb queries by judging their effectiveness, providing critiques and outcomes based on language nuances and analytics relevance.
167. **label_and_rate**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
168. **md_callout**: Classifies content and generates a markdown callout based on the provided text, selecting the most appropriate type.
169. **model_as_sherlock_freud**: Builds psychological models using detective reasoning and psychoanalytic insight to understand human behavior.
170. **official_pattern_template**: Template to use if you want to create new fabric patterns.
171. **predict_person_actions**: Predicts behavioral responses based on psychological profiles and challenges.
172. **prepare_7s_strategy**: Prepares a comprehensive briefing document from 7S's strategy capturing organizational profile, strategic elements, and market dynamics with clear, concise, and organized content.
173. **provide_guidance**: Provides psychological and life coaching advice, including analysis, recommendations, and potential diagnoses, with a compassionate and honest tone.
174. **rate_ai_response**: Rates the quality of AI responses by comparing them to top human expert performance, assigning a letter grade, reasoning, and providing a 1-100 score based on the evaluation.
175. **rate_ai_result**: Assesses the quality of AI/ML/LLM work by deeply analyzing content, instructions, and output, then rates performance based on multiple dimensions, including coverage, creativity, and interdisciplinary thinking.
176. **rate_content**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
177. **rate_value**: Produces the best possible output by deeply analyzing and understanding the input and its intended purpose.
178. **raw_query**: Fully digests and contemplates the input to produce the best possible result based on understanding the sender's intent.
179. **recommend_artists**: Recommends a personalized festival schedule with artists aligned to your favorite styles and interests, including rationale.
180. **recommend_pipeline_upgrades**: Optimizes vulnerability-checking pipelines by incorporating new information and improving their efficiency, with detailed explanations of changes.
181. **recommend_talkpanel_topics**: Produces a clean set of proposed talks or panel talking points for a person based on their interests and goals, formatted for submission to a conference organizer.
182. **recommend_yoga_practice**: Provides personalized yoga sequences, meditation guidance, and holistic lifestyle advice based on individual profiles.
183. **refine_design_document**: Refines a design document based on a design review by analyzing, mapping concepts, and implementing changes using valid Markdown.
184. **review_design**: Reviews and analyzes architecture design, focusing on clarity, component design, system integrations, security, performance, scalability, and data management.
185. **sanitize_broken_html_to_markdown**: Converts messy HTML into clean, properly formatted Markdown, applying custom styling and ensuring compatibility with Vite.
186. **suggest_pattern**: Suggests appropriate fabric patterns or commands based on user input, providing clear explanations and options for users.
187. **summarize**: Summarizes content into a 20-word sentence, main points, and takeaways, formatted with numbered lists in Markdown.
188. **summarize_board_meeting**: Creates formal meeting notes from board meeting transcripts for corporate governance documentation.
189. **summarize_debate**: Summarizes debates, identifies primary disagreement, extracts arguments, and provides analysis of evidence and argument strength to predict outcomes.
190. **summarize_git_changes**: Summarizes recent project updates from the last 7 days, focusing on key changes with enthusiasm.
191. **summarize_git_diff**: Summarizes and organizes Git diff changes with clear, succinct commit messages and bullet points.
192. **summarize_lecture**: Extracts relevant topics, definitions, and tools from lecture transcripts, providing structured summaries with timestamps and key takeaways.
193. **summarize_legislation**: Summarizes complex political proposals and legislation by analyzing key points, proposed changes, and providing balanced, positive, and cynical characterizations.
194. **summarize_meeting**: Analyzes meeting transcripts to extract a structured summary, including an overview, key points, tasks, decisions, challenges, timeline, references, and next steps.
195. **summarize_micro**: Summarizes content into a 20-word sentence, 3 main points, and 3 takeaways, formatted in clear, concise Markdown.
196. **summarize_newsletter**: Extracts the most meaningful, interesting, and useful content from a newsletter, summarizing key sections such as content, opinions, tools, companies, and follow-up items in clear, structured Markdown.
197. **summarize_paper**: Summarizes an academic paper by detailing its title, authors, technical approach, distinctive features, experimental setup, results, advantages, limitations, and conclusion in a clear, structured format using human-readable Markdown.
198. **summarize_prompt**: Summarizes AI chat prompts by describing the primary function, unique approach, and expected output in a concise paragraph. The summary is focused on the prompt's purpose without unnecessary details or formatting.
199. **summarize_pull-requests**: Summarizes pull requests for a coding project by providing a summary and listing the top PRs with human-readable descriptions.
200. **summarize_rpg_session**: Summarizes a role-playing game session by extracting key events, combat stats, character changes, quotes, and more.
201. **t_analyze_challenge_handling**: Provides 8-16 word bullet points evaluating how well challenges are being addressed, calling out any lack of effort.
202. **t_check_dunning_kruger**: Assess narratives for Dunning-Kruger patterns by contrasting self-perception with demonstrated competence and confidence cues.
203. **t_check_metrics**: Analyzes deep context from the TELOS file and input instruction, then provides a wisdom-based output while considering metrics and KPIs to assess recent improvements.
204. **t_create_h3_career**: Summarizes context and produces wisdom-based output by deeply analyzing both the TELOS File and the input instruction, considering the relationship between the two.
205. **t_create_opening_sentences**: Describes from TELOS file the person's identity, goals, and actions in 4 concise, 32-word bullet points, humbly.
206. **t_describe_life_outlook**: Describes from TELOS file a person's life outlook in 5 concise, 16-word bullet points.
207. **t_extract_intro_sentences**: Summarizes from TELOS file a person's identity, work, and current projects in 5 concise and grounded bullet points.
208. **t_extract_panel_topics**: Creates 5 panel ideas with titles and descriptions based on deep context from a TELOS file and input.
209. **t_find_blindspots**: Identify potential blindspots in thinking, frames, or models that may expose the individual to error or risk.
210. **t_find_negative_thinking**: Analyze a TELOS file and input to identify negative thinking in documents or journals, followed by tough love encouragement.
211. **t_find_neglected_goals**: Analyze a TELOS file and input instructions to identify goals or projects that have not been worked on recently.
212. **t_give_encouragement**: Analyze a TELOS file and input instructions to evaluate progress, provide encouragement, and offer recommendations for continued effort.
213. **t_red_team_thinking**: Analyze a TELOS file and input instructions to red-team thinking, models, and frames, then provide recommendations for improvement.
214. **t_threat_model_plans**: Analyze a TELOS file and input instructions to create threat models for a life plan and recommend improvements.
215. **t_visualize_mission_goals_projects**: Analyze a TELOS file and input instructions to create an ASCII art diagram illustrating the relationship of missions, goals, and projects.
216. **t_year_in_review**: Analyze a TELOS file to create insights about a person or entity, then summarize accomplishments and visualizations in bullet points.
217. **to_flashcards**: Create Anki flashcards from a given text, focusing on concise, optimized questions and answers without external context.
218. **transcribe_minutes**: Extracts (from meeting transcription) meeting minutes, identifying actionables, insightful ideas, decisions, challenges, and next steps in a structured format.
219. **translate**: Translates sentences or documentation into the specified language code while maintaining the original formatting and tone.
220. **tweet**: Provides a step-by-step guide on crafting engaging tweets with emojis, covering Twitter basics, account creation, features, and audience targeting.
221. **write_essay**: Writes essays in the style of a specified author, embodying their unique voice, vocabulary, and approach. Uses `author_name` variable.
222. **write_essay_pg**: Writes concise, clear essays in the style of Paul Graham, focusing on simplicity, clarity, and illumination of the provided topic.
223. **write_hackerone_report**: Generates concise, clear, and reproducible bug bounty reports, detailing vulnerability impact, steps to reproduce, and exploit details for triagers.
224. **write_latex**: Generates syntactically correct LaTeX code for a new.tex document, ensuring proper formatting and compatibility with pdflatex.
225. **write_micro_essay**: Writes concise, clear, and illuminating essays on the given topic in the style of Paul Graham.
226. **write_nuclei_template_rule**: Generates Nuclei YAML templates for detecting vulnerabilities using HTTP requests, matchers, extractors, and dynamic data extraction.
227. **write_pull-request**: Drafts detailed pull request descriptions, explaining changes, providing reasoning, and identifying potential bugs from the git diff command output.
228. **write_semgrep_rule**: Creates accurate and working Semgrep rules based on input, following syntax guidelines and specific language considerations.
229. **youtube_summary**: Create concise, timestamped Youtube video summaries that highlight key points.
45. **concall_summary**: Analyzes earnings and conference call transcripts to extract management commentary, analyst Q&A, financial insights, risks, and executive summaries.
46. **convert_to_markdown**: Convert content to clean, complete Markdown format, preserving all original structure, formatting, links, and code blocks without alterations.
47. **create_5_sentence_summary**: Create concise summaries or answers to input at 5 different levels of depth, from 5 words to 1 word.
48. **create_academic_paper**: Generate a high-quality academic paper in LaTeX format with clear concepts, structured content, and a professional layout.
49. **create_ai_jobs_analysis**: Analyze job categories' susceptibility to automation, identify resilient roles, and provide strategies for personal adaptation to AI-driven changes in the workforce.
50. **create_aphorisms**: Find and generate a list of brief, witty statements.
51. **create_art_prompt**: Generates a detailed, compelling visual description of a concept, including stylistic references and direct AI instructions for creating art.
52. **create_better_frame**: Identifies and analyzes different frames of interpreting reality, emphasizing the power of positive, productive lenses in shaping outcomes.
53. **create_coding_feature**: Generates secure and composable code features using modern technology and best practices from project specifications.
54. **create_coding_project**: Generate wireframes and starter code for any coding ideas that you have.
55. **create_command**: Helps determine the correct parameters and switches for penetration testing tools based on a brief description of the objective.
56. **create_conceptmap**: Transforms unstructured text or markdown content into an interactive HTML concept map using Vis.js by extracting key concepts and their logical relationships.
57. **create_cyber_summary**: Summarizes cybersecurity threats, vulnerabilities, incidents, and malware with a 25-word summary and categorized bullet points, after thoroughly analyzing and mapping the provided input.
58. **create_design_document**: Creates a detailed design document for a system using the C4 model, addressing business and security postures, and including a system context diagram.
59. **create_diy**: Creates structured "Do It Yourself" tutorial patterns by analyzing prompts, organizing requirements, and providing step-by-step instructions in Markdown format.
60. **create_excalidraw_visualization**: Creates complex Excalidraw diagrams to visualize relationships between concepts and ideas in structured format.
61. **create_flash_cards**: Creates flashcards for key concepts, definitions, and terms with question-answer format for educational purposes.
62. **create_formal_email**: Crafts professional, clear, and respectful emails by analyzing context, tone, and purpose, ensuring proper structure and formatting.
63. **create_git_diff_commit**: Generates Git commands and commit messages for reflecting changes in a repository, using conventional commits and providing concise shell commands for updates.
64. **create_graph_from_input**: Generates a CSV file with progress-over-time data for a security program, focusing on relevant metrics and KPIs.
65. **create_hormozi_offer**: Creates a customized business offer based on principles from Alex Hormozi's book, "$100M Offers."
66. **create_idea_compass**: Organizes and structures ideas by exploring their definition, evidence, sources, and related themes or consequences.
67. **create_investigation_visualization**: Creates detailed Graphviz visualizations of complex input, highlighting key aspects and providing clear, well-annotated diagrams for investigative analysis and conclusions.
68. **create_keynote**: Creates TED-style keynote presentations with a clear narrative, structured slides, and speaker notes, emphasizing impactful takeaways and cohesive flow.
69. **create_loe_document**: Creates detailed Level of Effort documents for estimating work effort, resources, and costs for tasks or projects.
70. **create_logo**: Creates simple, minimalist company logos without text, generating AI prompts for vector graphic logos based on input.
71. **create_markmap_visualization**: Transforms complex ideas into clear visualizations using MarkMap syntax, simplifying concepts into diagrams with relationships, boxes, arrows, and labels.
72. **create_mermaid_visualization**: Creates detailed, standalone visualizations of concepts using Mermaid (Markdown) syntax, ensuring clarity and coherence in diagrams.
73. **create_mermaid_visualization_for_github**: Creates standalone, detailed visualizations using Mermaid (Markdown) syntax to effectively explain complex concepts, ensuring clarity and precision.
74. **create_micro_summary**: Summarizes content into a concise, 20-word summary with main points and takeaways, formatted in Markdown.
75. **create_mnemonic_phrases**: Creates memorable mnemonic sentences from given words to aid in memory retention and learning.
76. **create_network_threat_landscape**: Analyzes open ports and services from a network scan and generates a comprehensive, insightful, and detailed security threat report in Markdown.
77. **create_newsletter_entry**: Condenses provided article text into a concise, objective, newsletter-style summary with a title in the style of Frontend Weekly.
78. **create_npc**: Generates a detailed D&D 5E NPC, including background, flaws, stats, appearance, personality, goals, and more in Markdown format.
79. **create_pattern**: Extracts, organizes, and formats LLM/AI prompts into structured sections, detailing the AI's role, instructions, output format, and any provided examples for clarity and accuracy.
80. **create_prd**: Creates a precise Product Requirements Document (PRD) in Markdown based on input.
81. **create_prediction_block**: Extracts and formats predictions from input into a structured Markdown block for a blog post.
82. **create_quiz**: Creates a three-phase reading plan based on an author or topic to help the user become significantly knowledgeable, including core, extended, and supplementary readings.
83. **create_reading_plan**: Generates review questions based on learning objectives from the input, adapted to the specified student level, and outputs them in a clear markdown format.
84. **create_recursive_outline**: Breaks down complex tasks or projects into manageable, hierarchical components with recursive outlining for clarity and simplicity.
85. **create_report_finding**: Creates a detailed, structured security finding report in markdown, including sections on Description, Risk, Recommendations, References, One-Sentence-Summary, and Quotes.
86. **create_rpg_summary**: Summarizes an in-person RPG session with key events, combat details, player stats, and role-playing highlights in a structured format.
87. **create_security_update**: Creates concise security updates for newsletters, covering stories, threats, advisories, vulnerabilities, and a summary of key issues.
88. **create_show_intro**: Creates compelling short intros for podcasts, summarizing key topics and themes discussed in the episode.
89. **create_sigma_rules**: Extracts Tactics, Techniques, and Procedures (TTPs) from security news and converts them into Sigma detection rules for host-based detections.
90. **create_story_about_people_interaction**: Analyze two personas, compare their dynamics, and craft a realistic, character-driven story from those insights.
91. **create_story_about_person**: Creates compelling, realistic short stories based on psychological profiles, showing how characters navigate everyday problems using strategies consistent with their personality traits.
92. **create_story_explanation**: Summarizes complex content in a clear, approachable story format that makes the concepts easy to understand.
93. **create_stride_threat_model**: Create a STRIDE-based threat model for a system design, identifying assets, trust boundaries, data flows, and prioritizing threats with mitigations.
94. **create_summary**: Summarizes content into a 20-word sentence, 10 main points (16 words max), and 5 key takeaways in Markdown format.
95. **create_tags**: Identifies at least 5 tags from text content for mind mapping tools, including authors and existing tags if present.
96. **create_threat_scenarios**: Identifies likely attack methods for any system by providing a narrative-based threat model, balancing risk and opportunity.
97. **create_ttrc_graph**: Creates a CSV file showing the progress of Time to Remediate Critical Vulnerabilities over time using given data.
98. **create_ttrc_narrative**: Creates a persuasive narrative highlighting progress in reducing the Time to Remediate Critical Vulnerabilities metric over time.
99. **create_upgrade_pack**: Extracts world model and task algorithm updates from content, providing beliefs about how the world works and task performance.
100. **create_user_story**: Writes concise and clear technical user stories for new features in complex software programs, formatted for all stakeholders.
101. **create_video_chapters**: Extracts interesting topics and timestamps from a transcript, providing concise summaries of key moments.
102. **create_visualization**: Transforms complex ideas into visualizations using intricate ASCII art, simplifying concepts where necessary.
103. **dialog_with_socrates**: Engages in deep, meaningful dialogues to explore and challenge beliefs using the Socratic method.
104. **enrich_blog_post**: Enhances Markdown blog files by applying instructions to improve structure, visuals, and readability for HTML rendering.
105. **explain_code**: Explains code, security tool output, configuration text, and answers questions based on the provided input.
106. **explain_docs**: Improves and restructures tool documentation into clear, concise instructions, including overviews, usage, use cases, and key features.
107. **explain_math**: Helps you understand mathematical concepts in a clear and engaging way.
108. **explain_project**: Summarizes project documentation into clear, concise sections covering the project, problem, solution, installation, usage, and examples.
109. **explain_terms**: Produces a glossary of advanced terms from content, providing a definition, analogy, and explanation of why each term matters.
110. **export_data_as_csv**: Extracts and outputs all data structures from the input in properly formatted CSV data.
111. **extract_algorithm_update_recommendations**: Extracts concise, practical algorithm update recommendations from the input and outputs them in a bulleted list.
112. **extract_article_wisdom**: Extracts surprising, insightful, and interesting information from content, categorizing it into sections like summary, ideas, quotes, facts, references, and recommendations.
113. **extract_book_ideas**: Extracts and outputs 50 to 100 of the most surprising, insightful, and interesting ideas from a book's content.
114. **extract_book_recommendations**: Extracts and outputs 50 to 100 practical, actionable recommendations from a book's content.
115. **extract_business_ideas**: Extracts top business ideas from content and elaborates on the best 10 with unique differentiators.
116. **extract_characters**: Identify all characters (human and non-human), resolve their aliases and pronouns into canonical names, and produce detailed descriptions of each character's role, motivations, and interactions ranked by narrative importance.
117. **extract_controversial_ideas**: Extracts and outputs controversial statements and supporting quotes from the input in a structured Markdown list.
118. **extract_core_message**: Extracts and outputs a clear, concise sentence that articulates the core message of a given text or body of work.
119. **extract_ctf_writeup**: Extracts a short writeup from a warstory-like text about a cyber security engagement.
120. **extract_domains**: Extracts domains and URLs from content to identify sources used for articles, newsletters, and other publications.
121. **extract_extraordinary_claims**: Extracts and outputs a list of extraordinary claims from conversations, focusing on scientifically disputed or false statements.
122. **extract_ideas**: Extracts and outputs all the key ideas from input, presented as 15-word bullet points in Markdown.
123. **extract_insights**: Extracts and outputs the most powerful and insightful ideas from text, formatted as 16-word bullet points in the INSIGHTS section, also IDEAS section.
124. **extract_insights_dm**: Extracts and outputs all valuable insights and a concise summary of the content, including key points and topics discussed.
125. **extract_instructions**: Extracts clear, actionable step-by-step instructions and main objectives from instructional video transcripts, organizing them into a concise list.
126. **extract_jokes**: Extracts jokes from text content, presenting each joke with its punchline in separate bullet points.
127. **extract_latest_video**: Extracts the latest video URL from a YouTube RSS feed and outputs the URL only.
128. **extract_main_activities**: Extracts key events and activities from transcripts or logs, providing a summary of what happened.
129. **extract_main_idea**: Extracts the main idea and key recommendation from the input, summarizing them in 15-word sentences.
130. **extract_mcp_servers**: Identify and summarize Model Context Protocol (MCP) servers referenced in the input along with their key details.
131. **extract_most_redeeming_thing**: Extracts the most redeeming aspect from an input, summarizing it in a single 15-word sentence.
132. **extract_patterns**: Extracts and analyzes recurring, surprising, and insightful patterns from input, providing detailed analysis and advice for builders.
133. **extract_poc**: Extracts proof of concept URLs and validation methods from security reports, providing the URL and command to run.
134. **extract_predictions**: Extracts predictions from input, including specific details such as date, confidence level, and verification method.
135. **extract_primary_problem**: Extracts the primary problem with the world as presented in a given text or body of work.
136. **extract_primary_solution**: Extracts the primary solution for the world as presented in a given text or body of work.
137. **extract_product_features**: Extracts and outputs a list of product features from the provided input in a bulleted format.
138. **extract_questions**: Extracts and outputs all questions asked by the interviewer in a conversation or interview.
139. **extract_recipe**: Extracts and outputs a recipe with a short meal description, ingredients with measurements, and preparation steps.
140. **extract_recommendations**: Extracts and outputs concise, practical recommendations from a given piece of content in a bulleted list.
141. **extract_references**: Extracts and outputs a bulleted list of references to art, stories, books, literature, and other sources from content.
142. **extract_skills**: Extracts and classifies skills from a job description into a table, separating each skill and classifying it as either hard or soft.
143. **extract_song_meaning**: Analyzes a song to provide a summary of its meaning, supported by detailed evidence from lyrics, artist commentary, and fan analysis.
144. **extract_sponsors**: Extracts and lists official sponsors and potential sponsors from a provided transcript.
145. **extract_videoid**: Extracts and outputs the video ID from any given URL.
146. **extract_wisdom**: Extracts surprising, insightful, and interesting information from text on topics like human flourishing, AI, learning, and more.
147. **extract_wisdom_agents**: Extracts valuable insights, ideas, quotes, and references from content, emphasizing topics like human flourishing, AI, learning, and technology.
148. **extract_wisdom_dm**: Extracts all valuable, insightful, and thought-provoking information from content, focusing on topics like human flourishing, AI, learning, and technology.
149. **extract_wisdom_nometa**: Extracts insights, ideas, quotes, habits, facts, references, and recommendations from content, focusing on human flourishing, AI, technology, and related topics.
150. **find_female_life_partner**: Analyzes criteria for finding a female life partner and provides clear, direct, and poetic descriptions.
151. **find_hidden_message**: Extracts overt and hidden political messages, justifications, audience actions, and a cynical analysis from content.
152. **find_logical_fallacies**: Identifies and analyzes fallacies in arguments, classifying them as formal or informal with detailed reasoning.
153. **fix_typos**: Proofreads and corrects typos, spelling, grammar, and punctuation errors in text.
154. **generate_code_rules**: Compile best-practice coding rules and guardrails for AI-assisted development workflows from the provided content.
155. **get_wow_per_minute**: Determines the wow-factor of content per minute based on surprise, novelty, insight, value, and wisdom, measuring how rewarding the content is for the viewer.
156. **heal_person**: Develops a comprehensive plan for spiritual and mental healing based on psychological profiles, providing personalized recommendations for mental health improvement and overall life enhancement.
157. **humanize**: Rewrites AI-generated text to sound natural, conversational, and easy to understand, maintaining clarity and simplicity.
158. **identify_dsrp_distinctions**: Encourages creative, systems-based thinking by exploring distinctions, boundaries, and their implications, drawing on insights from prominent systems thinkers.
159. **identify_dsrp_perspectives**: Explores the concept of distinctions in systems thinking, focusing on how boundaries define ideas, influence understanding, and reveal or obscure insights.
160. **identify_dsrp_relationships**: Encourages exploration of connections, distinctions, and boundaries between ideas, inspired by systems thinkers to reveal new insights and patterns in complex systems.
161. **identify_dsrp_systems**: Encourages organizing ideas into systems of parts and wholes, inspired by systems thinkers to explore relationships and how changes in organization impact meaning and understanding.
162. **identify_job_stories**: Identifies key job stories or requirements for roles.
163. **improve_academic_writing**: Refines text into clear, concise academic language while improving grammar, coherence, and clarity, with a list of changes.
164. **improve_prompt**: Improves an LLM/AI prompt by applying expert prompt writing strategies for better results and clarity.
165. **improve_report_finding**: Improves a penetration test security finding by providing detailed descriptions, risks, recommendations, references, quotes, and a concise summary in markdown format.
166. **improve_writing**: Refines text by correcting grammar, enhancing style, improving clarity, and maintaining the original meaning. skills.
167. **judge_output**: Evaluates Honeycomb queries by judging their effectiveness, providing critiques and outcomes based on language nuances and analytics relevance.
168. **label_and_rate**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
169. **md_callout**: Classifies content and generates a markdown callout based on the provided text, selecting the most appropriate type.
170. **model_as_sherlock_freud**: Builds psychological models using detective reasoning and psychoanalytic insight to understand human behavior.
171. **official_pattern_template**: Template to use if you want to create new fabric patterns.
172. **predict_person_actions**: Predicts behavioral responses based on psychological profiles and challenges.
173. **prepare_7s_strategy**: Prepares a comprehensive briefing document from 7S's strategy capturing organizational profile, strategic elements, and market dynamics with clear, concise, and organized content.
174. **provide_guidance**: Provides psychological and life coaching advice, including analysis, recommendations, and potential diagnoses, with a compassionate and honest tone.
175. **rate_ai_response**: Rates the quality of AI responses by comparing them to top human expert performance, assigning a letter grade, reasoning, and providing a 1-100 score based on the evaluation.
176. **rate_ai_result**: Assesses the quality of AI/ML/LLM work by deeply analyzing content, instructions, and output, then rates performance based on multiple dimensions, including coverage, creativity, and interdisciplinary thinking.
177. **rate_content**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
178. **rate_value**: Produces the best possible output by deeply analyzing and understanding the input and its intended purpose.
179. **raw_query**: Fully digests and contemplates the input to produce the best possible result based on understanding the sender's intent.
180. **recommend_artists**: Recommends a personalized festival schedule with artists aligned to your favorite styles and interests, including rationale.
181. **recommend_pipeline_upgrades**: Optimizes vulnerability-checking pipelines by incorporating new information and improving their efficiency, with detailed explanations of changes.
182. **recommend_talkpanel_topics**: Produces a clean set of proposed talks or panel talking points for a person based on their interests and goals, formatted for submission to a conference organizer.
183. **recommend_yoga_practice**: Provides personalized yoga sequences, meditation guidance, and holistic lifestyle advice based on individual profiles.
184. **refine_design_document**: Refines a design document based on a design review by analyzing, mapping concepts, and implementing changes using valid Markdown.
185. **review_design**: Reviews and analyzes architecture design, focusing on clarity, component design, system integrations, security, performance, scalability, and data management.
186. **sanitize_broken_html_to_markdown**: Converts messy HTML into clean, properly formatted Markdown, applying custom styling and ensuring compatibility with Vite.
187. **suggest_pattern**: Suggests appropriate fabric patterns or commands based on user input, providing clear explanations and options for users.
188. **summarize**: Summarizes content into a 20-word sentence, main points, and takeaways, formatted with numbered lists in Markdown.
189. **summarize_board_meeting**: Creates formal meeting notes from board meeting transcripts for corporate governance documentation.
190. **summarize_debate**: Summarizes debates, identifies primary disagreement, extracts arguments, and provides analysis of evidence and argument strength to predict outcomes.
191. **summarize_git_changes**: Summarizes recent project updates from the last 7 days, focusing on key changes with enthusiasm.
192. **summarize_git_diff**: Summarizes and organizes Git diff changes with clear, succinct commit messages and bullet points.
193. **summarize_lecture**: Extracts relevant topics, definitions, and tools from lecture transcripts, providing structured summaries with timestamps and key takeaways.
194. **summarize_legislation**: Summarizes complex political proposals and legislation by analyzing key points, proposed changes, and providing balanced, positive, and cynical characterizations.
195. **summarize_meeting**: Analyzes meeting transcripts to extract a structured summary, including an overview, key points, tasks, decisions, challenges, timeline, references, and next steps.
196. **summarize_micro**: Summarizes content into a 20-word sentence, 3 main points, and 3 takeaways, formatted in clear, concise Markdown.
197. **summarize_newsletter**: Extracts the most meaningful, interesting, and useful content from a newsletter, summarizing key sections such as content, opinions, tools, companies, and follow-up items in clear, structured Markdown.
198. **summarize_paper**: Summarizes an academic paper by detailing its title, authors, technical approach, distinctive features, experimental setup, results, advantages, limitations, and conclusion in a clear, structured format using human-readable Markdown.
199. **summarize_prompt**: Summarizes AI chat prompts by describing the primary function, unique approach, and expected output in a concise paragraph. The summary is focused on the prompt's purpose without unnecessary details or formatting.
200. **summarize_pull-requests**: Summarizes pull requests for a coding project by providing a summary and listing the top PRs with human-readable descriptions.
201. **summarize_rpg_session**: Summarizes a role-playing game session by extracting key events, combat stats, character changes, quotes, and more.
202. **t_analyze_challenge_handling**: Provides 8-16 word bullet points evaluating how well challenges are being addressed, calling out any lack of effort.
203. **t_check_dunning_kruger**: Assess narratives for Dunning-Kruger patterns by contrasting self-perception with demonstrated competence and confidence cues.
204. **t_check_metrics**: Analyzes deep context from the TELOS file and input instruction, then provides a wisdom-based output while considering metrics and KPIs to assess recent improvements.
205. **t_create_h3_career**: Summarizes context and produces wisdom-based output by deeply analyzing both the TELOS File and the input instruction, considering the relationship between the two.
206. **t_create_opening_sentences**: Describes from TELOS file the person's identity, goals, and actions in 4 concise, 32-word bullet points, humbly.
207. **t_describe_life_outlook**: Describes from TELOS file a person's life outlook in 5 concise, 16-word bullet points.
208. **t_extract_intro_sentences**: Summarizes from TELOS file a person's identity, work, and current projects in 5 concise and grounded bullet points.
209. **t_extract_panel_topics**: Creates 5 panel ideas with titles and descriptions based on deep context from a TELOS file and input.
210. **t_find_blindspots**: Identify potential blindspots in thinking, frames, or models that may expose the individual to error or risk.
211. **t_find_negative_thinking**: Analyze a TELOS file and input to identify negative thinking in documents or journals, followed by tough love encouragement.
212. **t_find_neglected_goals**: Analyze a TELOS file and input instructions to identify goals or projects that have not been worked on recently.
213. **t_give_encouragement**: Analyze a TELOS file and input instructions to evaluate progress, provide encouragement, and offer recommendations for continued effort.
214. **t_red_team_thinking**: Analyze a TELOS file and input instructions to red-team thinking, models, and frames, then provide recommendations for improvement.
215. **t_threat_model_plans**: Analyze a TELOS file and input instructions to create threat models for a life plan and recommend improvements.
216. **t_visualize_mission_goals_projects**: Analyze a TELOS file and input instructions to create an ASCII art diagram illustrating the relationship of missions, goals, and projects.
217. **t_year_in_review**: Analyze a TELOS file to create insights about a person or entity, then summarize accomplishments and visualizations in bullet points.
218. **to_flashcards**: Create Anki flashcards from a given text, focusing on concise, optimized questions and answers without external context.
219. **transcribe_minutes**: Extracts (from meeting transcription) meeting minutes, identifying actionables, insightful ideas, decisions, challenges, and next steps in a structured format.
220. **translate**: Translates sentences or documentation into the specified language code while maintaining the original formatting and tone.
221. **tweet**: Provides a step-by-step guide on crafting engaging tweets with emojis, covering Twitter basics, account creation, features, and audience targeting.
222. **write_essay**: Writes essays in the style of a specified author, embodying their unique voice, vocabulary, and approach. Uses `author_name` variable.
223. **write_essay_pg**: Writes concise, clear essays in the style of Paul Graham, focusing on simplicity, clarity, and illumination of the provided topic.
224. **write_hackerone_report**: Generates concise, clear, and reproducible bug bounty reports, detailing vulnerability impact, steps to reproduce, and exploit details for triagers.
225. **write_latex**: Generates syntactically correct LaTeX code for a new.tex document, ensuring proper formatting and compatibility with pdflatex.
226. **write_micro_essay**: Writes concise, clear, and illuminating essays on the given topic in the style of Paul Graham.
227. **write_nuclei_template_rule**: Generates Nuclei YAML templates for detecting vulnerabilities using HTTP requests, matchers, extractors, and dynamic data extraction.
228. **write_pull-request**: Drafts detailed pull request descriptions, explaining changes, providing reasoning, and identifying potential bugs from the git diff command output.
229. **write_semgrep_rule**: Creates accurate and working Semgrep rules based on input, following syntax guidelines and specific language considerations.
230. **youtube_summary**: Create concise, timestamped Youtube video summaries that highlight key points.

View File

@@ -73,11 +73,11 @@ Match the request to one or more of these primary categories:
**AI**: ai, create_ai_jobs_analysis, create_art_prompt, create_pattern, create_prediction_block, extract_mcp_servers, extract_wisdom_agents, generate_code_rules, improve_prompt, judge_output, rate_ai_response, rate_ai_result, raw_query, suggest_pattern, summarize_prompt
**ANALYSIS**: ai, analyze_answers, analyze_bill, analyze_bill_short, analyze_candidates, analyze_cfp_submission, analyze_claims, analyze_comments, analyze_debate, analyze_email_headers, analyze_incident, analyze_interviewer_techniques, analyze_logs, analyze_malware, analyze_military_strategy, analyze_mistakes, analyze_paper, analyze_paper_simple, analyze_patent, analyze_personality, analyze_presentation, analyze_product_feedback, analyze_proposition, analyze_prose, analyze_prose_json, analyze_prose_pinker, analyze_risk, analyze_sales_call, analyze_spiritual_text, analyze_tech_impact, analyze_terraform_plan, analyze_threat_report, analyze_threat_report_cmds, analyze_threat_report_trends, apply_ul_tags, check_agreement, compare_and_contrast, create_ai_jobs_analysis, create_idea_compass, create_investigation_visualization, create_prediction_block, create_recursive_outline, create_story_about_people_interaction, create_tags, dialog_with_socrates, extract_main_idea, extract_predictions, find_hidden_message, find_logical_fallacies, get_wow_per_minute, identify_dsrp_distinctions, identify_dsrp_perspectives, identify_dsrp_relationships, identify_dsrp_systems, identify_job_stories, label_and_rate, model_as_sherlock_freud, predict_person_actions, prepare_7s_strategy, provide_guidance, rate_content, rate_value, recommend_artists, recommend_talkpanel_topics, review_design, summarize_board_meeting, t_analyze_challenge_handling, t_check_dunning_kruger, t_check_metrics, t_describe_life_outlook, t_extract_intro_sentences, t_extract_panel_topics, t_find_blindspots, t_find_negative_thinking, t_red_team_thinking, t_threat_model_plans, t_year_in_review, write_hackerone_report
**ANALYSIS**: ai, analyze_answers, analyze_bill, analyze_bill_short, analyze_candidates, analyze_cfp_submission, analyze_claims, analyze_comments, analyze_debate, analyze_email_headers, analyze_incident, analyze_interviewer_techniques, analyze_logs, analyze_malware, analyze_military_strategy, analyze_mistakes, analyze_paper, analyze_paper_simple, analyze_patent, analyze_personality, analyze_presentation, analyze_product_feedback, analyze_proposition, analyze_prose, analyze_prose_json, analyze_prose_pinker, analyze_risk, analyze_sales_call, analyze_spiritual_text, analyze_tech_impact, analyze_terraform_plan, analyze_threat_report, analyze_threat_report_cmds, analyze_threat_report_trends, apply_ul_tags, check_agreement, compare_and_contrast, concall_summary, create_ai_jobs_analysis, create_idea_compass, create_investigation_visualization, create_prediction_block, create_recursive_outline, create_story_about_people_interaction, create_tags, dialog_with_socrates, extract_main_idea, extract_predictions, find_hidden_message, find_logical_fallacies, get_wow_per_minute, identify_dsrp_distinctions, identify_dsrp_perspectives, identify_dsrp_relationships, identify_dsrp_systems, identify_job_stories, label_and_rate, model_as_sherlock_freud, predict_person_actions, prepare_7s_strategy, provide_guidance, rate_content, rate_value, recommend_artists, recommend_talkpanel_topics, review_design, summarize_board_meeting, t_analyze_challenge_handling, t_check_dunning_kruger, t_check_metrics, t_describe_life_outlook, t_extract_intro_sentences, t_extract_panel_topics, t_find_blindspots, t_find_negative_thinking, t_red_team_thinking, t_threat_model_plans, t_year_in_review, write_hackerone_report
**BILL**: analyze_bill, analyze_bill_short
**BUSINESS**: check_agreement, create_ai_jobs_analysis, create_formal_email, create_hormozi_offer, create_loe_document, create_logo, create_newsletter_entry, create_prd, explain_project, extract_business_ideas, extract_characters, extract_product_features, extract_skills, extract_sponsors, identify_job_stories, prepare_7s_strategy, rate_value, t_check_metrics, t_create_h3_career, t_visualize_mission_goals_projects, t_year_in_review, transcribe_minutes
**BUSINESS**: check_agreement, concall_summary, create_ai_jobs_analysis, create_formal_email, create_hormozi_offer, create_loe_document, create_logo, create_newsletter_entry, create_prd, explain_project, extract_business_ideas, extract_characters, extract_product_features, extract_skills, extract_sponsors, identify_job_stories, prepare_7s_strategy, rate_value, t_check_metrics, t_create_h3_career, t_visualize_mission_goals_projects, t_year_in_review, transcribe_minutes
**CLASSIFICATION**: apply_ul_tags
@@ -109,7 +109,7 @@ Match the request to one or more of these primary categories:
**STRATEGY**: analyze_military_strategy, create_better_frame, prepare_7s_strategy, t_analyze_challenge_handling, t_find_blindspots, t_find_negative_thinking, t_find_neglected_goals, t_red_team_thinking, t_threat_model_plans, t_visualize_mission_goals_projects
**SUMMARIZE**: capture_thinkers_work, create_5_sentence_summary, create_micro_summary, create_newsletter_entry, create_show_intro, create_summary, extract_core_message, extract_latest_video, extract_main_idea, summarize, summarize_board_meeting, summarize_debate, summarize_git_changes, summarize_git_diff, summarize_lecture, summarize_legislation, summarize_meeting, summarize_micro, summarize_newsletter, summarize_paper, summarize_pull-requests, summarize_rpg_session, youtube_summary
**SUMMARIZE**: capture_thinkers_work, concall_summary, create_5_sentence_summary, create_micro_summary, create_newsletter_entry, create_show_intro, create_summary, extract_core_message, extract_latest_video, extract_main_idea, summarize, summarize_board_meeting, summarize_debate, summarize_git_changes, summarize_git_diff, summarize_lecture, summarize_legislation, summarize_meeting, summarize_micro, summarize_newsletter, summarize_paper, summarize_pull-requests, summarize_rpg_session, youtube_summary
**VISUALIZE**: create_conceptmap, create_excalidraw_visualization, create_graph_from_input, create_idea_compass, create_investigation_visualization, create_keynote, create_logo, create_markmap_visualization, create_mermaid_visualization, create_mermaid_visualization_for_github, create_video_chapters, create_visualization, enrich_blog_post, t_visualize_mission_goals_projects

View File

@@ -196,6 +196,10 @@ Review contract to identify stipulations, issues, and changes for negotiation.
Create comparisons table, highlighting key differences and similarities.
### concall_summary
Analyze earnings call transcripts to extract management insights, financial metrics, and investment implications.
### create_ai_jobs_analysis
Identify automation risks and career resilience strategies.

View File

@@ -51,6 +51,29 @@ docs: update installation instructions
## Pull Request Process
### Pull Request Guidelines
**Keep pull requests focused and minimal.**
PRs that touch a large number of files (50+) without clear functional justification will likely be rejected without detailed review.
#### Why we enforce this
- **Reviewability**: Large PRs are effectively un-reviewable. Studies show reviewer effectiveness drops significantly after ~200-400 lines of code. A 93-file "cleanup" PR cannot receive meaningful review.
- **Git history**: Sweeping changes pollute `git blame`, making it harder to trace when and why functional changes were made.
- **Merge conflicts**: Large PRs increase the likelihood of conflicts with other contributors' work.
- **Risk**: More changed lines means more opportunities for subtle bugs, even in "safe" refactors.
#### What to do instead
If you have a large change in mind, break it into logical, independently-mergeable slices. For example:
- ✅ "Replace `interface{}` with `any` across codebase" (single mechanical change, easy to verify)
- ✅ "Migrate to `strings.CutPrefix` in `internal/cli`" (scoped to one package)
- ❌ "Modernize codebase with multiple idiom updates" (too broad, impossible to review)
For sweeping refactors or style changes, **open an issue first** to discuss the approach with maintainers before investing time in the work.
### Changelog Generation (REQUIRED)
After opening your PR, generate a changelog entry:
@@ -142,6 +165,79 @@ Example output here
- Include usage examples
- Keep documentation current
### REST API Documentation
When adding or modifying REST API endpoints, you must update the Swagger documentation:
**1. Add Swagger annotations to your handler:**
```go
// HandlerName godoc
// @Summary Short description of what this endpoint does
// @Description Detailed description of the endpoint's functionality
// @Tags category-name
// @Accept json
// @Produce json
// @Param name path string true "Parameter description"
// @Param body body RequestType true "Request body description"
// @Success 200 {object} ResponseType "Success description"
// @Failure 400 {object} map[string]string "Bad request"
// @Failure 500 {object} map[string]string "Server error"
// @Security ApiKeyAuth
// @Router /endpoint/path [get]
func (h *Handler) HandlerName(c *gin.Context) {
// Implementation
}
```
**2. Regenerate Swagger documentation:**
```bash
# Install swag CLI if you haven't already
go install github.com/swaggo/swag/cmd/swag@latest
# Generate updated documentation
swag init -g internal/server/serve.go -o docs
```
**3. Commit the generated files:**
The following files will be updated and should be committed:
- `docs/swagger.json`
- `docs/swagger.yaml`
- `docs/docs.go`
**4. Test your changes:**
Start the server and verify your endpoint appears in Swagger UI:
```bash
go run ./cmd/fabric --serve
# Open http://localhost:8080/swagger/index.html
```
**Examples to follow:**
- Chat endpoint: `internal/server/chat.go:58-68`
- Patterns endpoint: `internal/server/patterns.go:36-45`
- Models endpoint: `internal/server/models.go:20-28`
**Common annotation tags:**
- `@Summary` - One-line description (required)
- `@Description` - Detailed explanation
- `@Tags` - Logical grouping (e.g., "patterns", "chat", "models")
- `@Accept` - Input content type (e.g., "json")
- `@Produce` - Output content type (e.g., "json", "text/event-stream")
- `@Param` - Request parameters (path, query, body)
- `@Success` - Successful response (include status code and type)
- `@Failure` - Error responses
- `@Security` - Authentication requirement (use "ApiKeyAuth" for API key)
- `@Router` - Endpoint path and HTTP method
For complete Swagger annotation syntax, see the [swaggo documentation](https://github.com/swaggo/swag#declarative-comments-format)
## Getting Help
- Check existing issues first

536
docs/docs.go Normal file
View File

@@ -0,0 +1,536 @@
// Package docs Code generated by swaggo/swag. DO NOT EDIT
package docs
import "github.com/swaggo/swag"
const docTemplate = `{
"schemes": {{ marshal .Schemes }},
"swagger": "2.0",
"info": {
"description": "{{escape .Description}}",
"title": "{{.Title}}",
"contact": {
"name": "Fabric Support",
"url": "https://github.com/danielmiessler/fabric"
},
"license": {
"name": "MIT",
"url": "https://opensource.org/licenses/MIT"
},
"version": "{{.Version}}"
},
"host": "{{.Host}}",
"basePath": "{{.BasePath}}",
"paths": {
"/chat": {
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Stream AI responses using Server-Sent Events (SSE)",
"consumes": [
"application/json"
],
"produces": [
"text/event-stream"
],
"tags": [
"chat"
],
"summary": "Stream chat completions",
"parameters": [
{
"description": "Chat request with prompts and options",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/restapi.ChatRequest"
}
}
],
"responses": {
"200": {
"description": "Streaming response",
"schema": {
"$ref": "#/definitions/restapi.StreamResponse"
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
},
"/models/names": {
"get": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Get a list of all available AI models grouped by vendor",
"produces": [
"application/json"
],
"tags": [
"models"
],
"summary": "List all available models",
"responses": {
"200": {
"description": "Returns models (array) and vendors (map)",
"schema": {
"type": "object",
"additionalProperties": true
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
},
"/patterns/{name}": {
"get": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Retrieve a pattern by name",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"patterns"
],
"summary": "Get a pattern",
"parameters": [
{
"type": "string",
"description": "Pattern name",
"name": "name",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/fsdb.Pattern"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
},
"/patterns/{name}/apply": {
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Apply a pattern with variable substitution",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"patterns"
],
"summary": "Apply pattern with variables",
"parameters": [
{
"type": "string",
"description": "Pattern name",
"name": "name",
"in": "path",
"required": true
},
{
"description": "Pattern application request",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/restapi.PatternApplyRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/fsdb.Pattern"
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
},
"/youtube/transcript": {
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Retrieves the transcript of a YouTube video along with video metadata (title and description)",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"youtube"
],
"summary": "Get YouTube video transcript",
"parameters": [
{
"description": "YouTube transcript request with URL, language, and timestamp options",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/restapi.YouTubeRequest"
}
}
],
"responses": {
"200": {
"description": "Successful response with transcript and metadata",
"schema": {
"$ref": "#/definitions/restapi.YouTubeResponse"
}
},
"400": {
"description": "Bad request - invalid URL or playlist URL provided",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"500": {
"description": "Internal server error - failed to retrieve transcript or metadata",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
}
},
"definitions": {
"domain.ThinkingLevel": {
"type": "string",
"enum": [
"off",
"low",
"medium",
"high"
],
"x-enum-varnames": [
"ThinkingOff",
"ThinkingLow",
"ThinkingMedium",
"ThinkingHigh"
]
},
"fsdb.Pattern": {
"type": "object",
"properties": {
"description": {
"type": "string"
},
"name": {
"type": "string"
},
"pattern": {
"type": "string"
}
}
},
"restapi.ChatRequest": {
"type": "object",
"properties": {
"audioFormat": {
"type": "string"
},
"audioOutput": {
"type": "boolean"
},
"frequencyPenalty": {
"type": "number",
"format": "float64"
},
"imageBackground": {
"type": "string"
},
"imageCompression": {
"type": "integer"
},
"imageFile": {
"type": "string"
},
"imageQuality": {
"type": "string"
},
"imageSize": {
"type": "string"
},
"language": {
"description": "Add Language field to bind from request",
"type": "string"
},
"maxTokens": {
"type": "integer"
},
"model": {
"type": "string"
},
"modelContextLength": {
"type": "integer"
},
"notification": {
"type": "boolean"
},
"notificationCommand": {
"type": "string"
},
"presencePenalty": {
"type": "number",
"format": "float64"
},
"prompts": {
"type": "array",
"items": {
"$ref": "#/definitions/restapi.PromptRequest"
}
},
"raw": {
"type": "boolean"
},
"search": {
"type": "boolean"
},
"searchLocation": {
"type": "string"
},
"seed": {
"type": "integer"
},
"suppressThink": {
"type": "boolean"
},
"temperature": {
"type": "number",
"format": "float64"
},
"thinkEndTag": {
"type": "string"
},
"thinkStartTag": {
"type": "string"
},
"thinking": {
"$ref": "#/definitions/domain.ThinkingLevel"
},
"topP": {
"type": "number",
"format": "float64"
},
"voice": {
"type": "string"
}
}
},
"restapi.PatternApplyRequest": {
"type": "object",
"properties": {
"input": {
"type": "string"
},
"variables": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
},
"restapi.PromptRequest": {
"type": "object",
"properties": {
"contextName": {
"type": "string"
},
"model": {
"type": "string"
},
"patternName": {
"type": "string"
},
"strategyName": {
"description": "Optional strategy name",
"type": "string"
},
"userInput": {
"type": "string"
},
"variables": {
"description": "Pattern variables",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"vendor": {
"type": "string"
}
}
},
"restapi.StreamResponse": {
"type": "object",
"properties": {
"content": {
"description": "The actual content",
"type": "string"
},
"format": {
"description": "\"markdown\", \"mermaid\", \"plain\"",
"type": "string"
},
"type": {
"description": "\"content\", \"error\", \"complete\"",
"type": "string"
}
}
},
"restapi.YouTubeRequest": {
"type": "object",
"required": [
"url"
],
"properties": {
"language": {
"description": "Language code for transcript (default: \"en\")",
"type": "string",
"example": "en"
},
"timestamps": {
"description": "Include timestamps in the transcript (default: false)",
"type": "boolean",
"example": false
},
"url": {
"description": "YouTube video URL (required)",
"type": "string",
"example": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
}
}
},
"restapi.YouTubeResponse": {
"type": "object",
"properties": {
"description": {
"description": "Video description from YouTube metadata",
"type": "string",
"example": "This is the video description from YouTube..."
},
"title": {
"description": "Video title from YouTube metadata",
"type": "string",
"example": "Example Video Title"
},
"transcript": {
"description": "The video transcript text",
"type": "string",
"example": "This is the video transcript..."
},
"videoId": {
"description": "YouTube video ID",
"type": "string",
"example": "dQw4w9WgXcQ"
}
}
}
},
"securityDefinitions": {
"ApiKeyAuth": {
"type": "apiKey",
"name": "X-API-Key",
"in": "header"
}
}
}`
// SwaggerInfo holds exported Swagger Info so clients can modify it
var SwaggerInfo = &swag.Spec{
Version: "1.0",
Host: "localhost:8080",
BasePath: "/",
Schemes: []string{},
Title: "Fabric REST API",
Description: "REST API for Fabric AI augmentation framework. Provides endpoints for chat completions, pattern management, contexts, sessions, and more.",
InfoInstanceName: "swagger",
SwaggerTemplate: docTemplate,
LeftDelim: "{{",
RightDelim: "}}",
}
func init() {
swag.Register(SwaggerInfo.InstanceName(), SwaggerInfo)
}

491
docs/rest-api.md Normal file
View File

@@ -0,0 +1,491 @@
# Fabric REST API
Fabric's REST API provides HTTP access to all core functionality: chat completions, pattern management, contexts, sessions, and more.
## Quick Start
Start the server:
```bash
fabric --serve
```
The server runs on `http://localhost:8080` by default.
Test it:
```bash
curl http://localhost:8080/patterns/names
```
## Interactive API Documentation
Fabric includes Swagger/OpenAPI documentation with an interactive UI:
- **Swagger UI**: [http://localhost:8080/swagger/index.html](http://localhost:8080/swagger/index.html)
- **OpenAPI JSON**: [http://localhost:8080/swagger/doc.json](http://localhost:8080/swagger/doc.json)
- **OpenAPI YAML**: [http://localhost:8080/swagger/swagger.yaml](http://localhost:8080/swagger/swagger.yaml)
The Swagger UI lets you:
- Browse all available endpoints
- View request/response schemas
- Test API calls directly in your browser
- See authentication requirements
**Note:** Swagger documentation endpoints are publicly accessible even when API key authentication is enabled. Only the actual API endpoints require authentication
## Server Options
| Flag | Description | Default |
| ------ | ------------- | --------- |
| `--serve` | Start the REST API server | - |
| `--address` | Server address and port | `:8080` |
| `--api-key` | Enable API key authentication | (none) |
Example with custom configuration:
```bash
fabric --serve --address :9090 --api-key my_secret_key
```
## Authentication
When you set an API key with `--api-key`, all requests must include:
```http
X-API-Key: your-api-key-here
```
Example:
```bash
curl -H "X-API-Key: my_secret_key" http://localhost:8080/patterns/names
```
Without an API key, the server accepts all requests and logs a warning.
## Endpoints
### Chat Completions
Stream AI responses using Server-Sent Events (SSE).
**Endpoint:** `POST /chat`
**Request:**
```json
{
"prompts": [
{
"userInput": "Explain quantum computing",
"vendor": "openai",
"model": "gpt-4o",
"patternName": "explain",
"contextName": "",
"strategyName": "",
"variables": {}
}
],
"language": "en",
"temperature": 0.7,
"topP": 0.9,
"frequencyPenalty": 0,
"presencePenalty": 0,
"thinking": 0
}
```
**Prompt Fields:**
| Field | Required | Default | Description |
| ------- | ---------- | --------- | ------------- |
| `userInput` | **Yes** | - | Your message or question |
| `vendor` | **Yes** | - | AI provider: `openai`, `anthropic`, `gemini`, `ollama`, etc. |
| `model` | **Yes** | - | Model name: `gpt-4o`, `claude-sonnet-4.5`, `gemini-2.0-flash-exp`, etc. |
| `patternName` | No | `""` | Pattern to apply (from `~/.config/fabric/patterns/`) |
| `contextName` | No | `""` | Context to prepend (from `~/.config/fabric/contexts/`) |
| `strategyName` | No | `""` | Strategy to use (from `~/.config/fabric/strategies/`) |
| `variables` | No | `{}` | Variable substitutions for patterns (e.g., `{"role": "expert"}`) |
**Chat Options:**
| Field | Required | Default | Description |
| ------- | ---------- | --------- | ------------- |
| `language` | No | `"en"` | Language code for responses |
| `temperature` | No | `0.7` | Randomness (0.0-1.0) |
| `topP` | No | `0.9` | Nucleus sampling (0.0-1.0) |
| `frequencyPenalty` | No | `0.0` | Reduce repetition (-2.0 to 2.0) |
| `presencePenalty` | No | `0.0` | Encourage new topics (-2.0 to 2.0) |
| `thinking` | No | `0` | Reasoning level (0=off, or numeric for tokens) |
**Response:**
Server-Sent Events stream with `Content-Type: text/readystream`. Each line contains JSON:
```json
{"type": "content", "format": "markdown", "content": "Quantum computing uses..."}
{"type": "content", "format": "markdown", "content": " quantum mechanics..."}
{"type": "complete", "format": "markdown", "content": ""}
```
**Types:**
- `content` - Response chunk
- `error` - Error message
- `complete` - Stream finished
**Formats:**
- `markdown` - Standard text
- `mermaid` - Mermaid diagram
- `plain` - Plain text
**Example:**
```bash
curl -X POST http://localhost:8080/chat \
-H "Content-Type: application/json" \
-d '{
"prompts": [{
"userInput": "What is Fabric?",
"vendor": "openai",
"model": "gpt-4o",
"patternName": "explain"
}]
}'
```
### Patterns
Manage reusable AI prompts.
| Method | Endpoint | Description |
| -------- | ---------- | ------------- |
| `GET` | `/patterns/names` | List all pattern names |
| `GET` | `/patterns/:name` | Get pattern content |
| `GET` | `/patterns/exists/:name` | Check if pattern exists |
| `POST` | `/patterns/:name` | Create or update pattern |
| `DELETE` | `/patterns/:name` | Delete pattern |
| `PUT` | `/patterns/rename/:oldName/:newName` | Rename pattern |
| `POST` | `/patterns/:name/apply` | Apply pattern with variables |
**Example - Get pattern:**
```bash
curl http://localhost:8080/patterns/summarize
```
**Example - Apply pattern with variables:**
```bash
curl -X POST http://localhost:8080/patterns/translate/apply \
-H "Content-Type: application/json" \
-d '{
"input": "Hello world",
"variables": {"lang_code": "es"}
}'
```
**Example - Create pattern:**
```bash
curl -X POST http://localhost:8080/patterns/my_custom_pattern \
-H "Content-Type: text/plain" \
-d "You are an expert in explaining complex topics simply..."
```
### Contexts
Manage context snippets that prepend to prompts.
| Method | Endpoint | Description |
| -------- | ---------- | ------------- |
| `GET` | `/contexts/names` | List all context names |
| `GET` | `/contexts/:name` | Get context content |
| `GET` | `/contexts/exists/:name` | Check if context exists |
| `POST` | `/contexts/:name` | Create or update context |
| `DELETE` | `/contexts/:name` | Delete context |
| `PUT` | `/contexts/rename/:oldName/:newName` | Rename context |
### Sessions
Manage chat conversation history.
| Method | Endpoint | Description |
| -------- | ---------- | ------------- |
| `GET` | `/sessions/names` | List all session names |
| `GET` | `/sessions/:name` | Get session messages (JSON array) |
| `GET` | `/sessions/exists/:name` | Check if session exists |
| `POST` | `/sessions/:name` | Save session messages |
| `DELETE` | `/sessions/:name` | Delete session |
| `PUT` | `/sessions/rename/:oldName/:newName` | Rename session |
### Models
List available AI models.
**Endpoint:** `GET /models/names`
**Response:**
```json
{
"models": ["gpt-4o", "gpt-4o-mini", "claude-sonnet-4.5", "gemini-2.0-flash-exp"],
"vendors": {
"openai": ["gpt-4o", "gpt-4o-mini"],
"anthropic": ["claude-sonnet-4.5", "claude-opus-4.5"],
"gemini": ["gemini-2.0-flash-exp", "gemini-2.0-flash-thinking-exp"]
}
}
```
### Strategies
List available prompt strategies (Chain of Thought, etc.).
**Endpoint:** `GET /strategies`
**Response:**
```json
[
{
"name": "chain_of_thought",
"description": "Think step by step",
"prompt": "Let's think through this step by step..."
}
]
```
### YouTube Transcripts
Extract transcripts from YouTube videos.
**Endpoint:** `POST /youtube/transcript`
**Request:**
```json
{
"url": "https://youtube.com/watch?v=dQw4w9WgXcQ",
"timestamps": false
}
```
**Response:**
```json
{
"videoId": "Video ID",
"title": "Video Title",
"description" : "Video description...",
"transcript": "Full transcript text..."
}
```
**Example:**
```bash
curl -X POST http://localhost:8080/youtube/transcript \
-H "Content-Type: application/json" \
-d '{"url": "https://youtube.com/watch?v=dQw4w9WgXcQ", "timestamps": true}'
```
### Configuration
Manage API keys and environment settings.
**Get configuration:**
`GET /config`
Returns API keys and URLs for all configured vendors.
**Update configuration:**
`POST /config/update`
```json
{
"OPENAI_API_KEY": "sk-...",
"ANTHROPIC_API_KEY": "sk-ant-..."
}
```
Updates `~/.config/fabric/.env` with new values.
## Complete Workflow Examples
### Example: Summarize a YouTube Video
This example shows how to extract a YouTube transcript and summarize it using the `youtube_summary` pattern. This requires two API calls:
#### Step 1: Extract the transcript
```bash
curl -X POST http://localhost:8080/youtube/transcript \
-H "Content-Type: application/json" \
-d '{
"url": "https://youtube.com/watch?v=dQw4w9WgXcQ",
"timestamps": false
}' > transcript.json
```
Response:
```json
{
"videoId": "dQw4w9WgXcQ",
"title": "Rick Astley - Never Gonna Give You Up (Official Video)",
"description": "The official video for “Never Gonna Give You Up” by Rick Astley...",
"transcript": "We're no strangers to love. You know the rules and so do I..."
}
```
#### Step 2: Summarize the transcript
Extract the transcript text and send it to the chat endpoint with the `youtube_summary` pattern:
```bash
# Extract transcript text from JSON
TRANSCRIPT=$(cat transcript.json | jq -r '.transcript')
# Send to chat endpoint with pattern
curl -X POST http://localhost:8080/chat \
-H "Content-Type: application/json" \
-d "{
\"prompts\": [{
\"userInput\": \"$TRANSCRIPT\",
\"vendor\": \"openai\",
\"model\": \"gpt-4o\",
\"patternName\": \"youtube_summary\"
}]
}"
```
#### Combined one-liner (using jq)
```bash
curl -s -X POST http://localhost:8080/youtube/transcript \
-H "Content-Type: application/json" \
-d '{"url": "https://youtube.com/watch?v=dQw4w9WgXcQ", "timestamps": false}' | \
jq -r '.transcript' | \
xargs -I {} curl -X POST http://localhost:8080/chat \
-H "Content-Type: application/json" \
-d "{\"prompts\":[{\"userInput\":\"{}\",\"vendor\":\"openai\",\"model\":\"gpt-4o\",\"patternName\":\"youtube_summary\"}]}"
```
#### Alternative: Using a script
```bash
#!/bin/bash
YOUTUBE_URL="https://youtube.com/watch?v=dQw4w9WgXcQ"
API_BASE="http://localhost:8080"
# Step 1: Get transcript
echo "Extracting transcript..."
TRANSCRIPT=$(curl -s -X POST "$API_BASE/youtube/transcript" \
-H "Content-Type: application/json" \
-d "{\"url\":\"$YOUTUBE_URL\",\"timestamps\":false}" | jq -r '.transcript')
# Step 2: Summarize with pattern
echo "Generating summary..."
curl -X POST "$API_BASE/chat" \
-H "Content-Type: application/json" \
-d "{
\"prompts\": [{
\"userInput\": $(echo "$TRANSCRIPT" | jq -Rs .),
\"vendor\": \"openai\",
\"model\": \"gpt-4o\",
\"patternName\": \"youtube_summary\"
}]
}"
```
#### Comparison with CLI
The CLI combines these steps automatically:
```bash
# CLI version (single command)
fabric -y "https://youtube.com/watch?v=dQw4w9WgXcQ" --pattern youtube_summary
```
The API provides more flexibility by separating transcript extraction and summarization, allowing you to:
- Extract the transcript once and process it multiple ways
- Apply different patterns to the same transcript
- Store the transcript for later use
- Use different models or vendors for summarization
## Docker Usage
Run the server in Docker:
```bash
# Setup (first time)
mkdir -p $HOME/.fabric-config
docker run --rm -it \
-v $HOME/.fabric-config:/root/.config/fabric \
kayvan/fabric:latest --setup
# Start server
docker run --rm -it \
-p 8080:8080 \
-v $HOME/.fabric-config:/root/.config/fabric \
kayvan/fabric:latest --serve
# With authentication
docker run --rm -it \
-p 8080:8080 \
-v $HOME/.fabric-config:/root/.config/fabric \
kayvan/fabric:latest --serve --api-key my_secret_key
```
## Ollama Compatibility Mode
Fabric can emulate Ollama's API endpoints:
```bash
fabric --serveOllama --address :11434
```
This mode provides:
- `GET /api/tags` - Lists patterns as models
- `GET /api/version` - Server version
- `POST /api/chat` - Ollama-compatible chat endpoint
## Error Handling
All endpoints return standard HTTP status codes:
- `200 OK` - Success
- `400 Bad Request` - Invalid input
- `401 Unauthorized` - Missing or invalid API key
- `404 Not Found` - Resource not found
- `500 Internal Server Error` - Server error
Error responses include JSON with details:
```json
{
"error": "Pattern not found: nonexistent"
}
```
## Rate Limiting
The server does not implement rate limiting. When deploying publicly, use a reverse proxy (nginx, Caddy) with rate limiting enabled.
## CORS
The server sets CORS headers for local development:
```http
Access-Control-Allow-Origin: http://localhost:5173
```
For production, configure CORS through a reverse proxy.

512
docs/swagger.json Normal file
View File

@@ -0,0 +1,512 @@
{
"swagger": "2.0",
"info": {
"description": "REST API for Fabric AI augmentation framework. Provides endpoints for chat completions, pattern management, contexts, sessions, and more.",
"title": "Fabric REST API",
"contact": {
"name": "Fabric Support",
"url": "https://github.com/danielmiessler/fabric"
},
"license": {
"name": "MIT",
"url": "https://opensource.org/licenses/MIT"
},
"version": "1.0"
},
"host": "localhost:8080",
"basePath": "/",
"paths": {
"/chat": {
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Stream AI responses using Server-Sent Events (SSE)",
"consumes": [
"application/json"
],
"produces": [
"text/event-stream"
],
"tags": [
"chat"
],
"summary": "Stream chat completions",
"parameters": [
{
"description": "Chat request with prompts and options",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/restapi.ChatRequest"
}
}
],
"responses": {
"200": {
"description": "Streaming response",
"schema": {
"$ref": "#/definitions/restapi.StreamResponse"
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
},
"/models/names": {
"get": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Get a list of all available AI models grouped by vendor",
"produces": [
"application/json"
],
"tags": [
"models"
],
"summary": "List all available models",
"responses": {
"200": {
"description": "Returns models (array) and vendors (map)",
"schema": {
"type": "object",
"additionalProperties": true
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
},
"/patterns/{name}": {
"get": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Retrieve a pattern by name",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"patterns"
],
"summary": "Get a pattern",
"parameters": [
{
"type": "string",
"description": "Pattern name",
"name": "name",
"in": "path",
"required": true
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/fsdb.Pattern"
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
},
"/patterns/{name}/apply": {
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Apply a pattern with variable substitution",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"patterns"
],
"summary": "Apply pattern with variables",
"parameters": [
{
"type": "string",
"description": "Pattern name",
"name": "name",
"in": "path",
"required": true
},
{
"description": "Pattern application request",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/restapi.PatternApplyRequest"
}
}
],
"responses": {
"200": {
"description": "OK",
"schema": {
"$ref": "#/definitions/fsdb.Pattern"
}
},
"400": {
"description": "Bad Request",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"500": {
"description": "Internal Server Error",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
},
"/youtube/transcript": {
"post": {
"security": [
{
"ApiKeyAuth": []
}
],
"description": "Retrieves the transcript of a YouTube video along with video metadata (title and description)",
"consumes": [
"application/json"
],
"produces": [
"application/json"
],
"tags": [
"youtube"
],
"summary": "Get YouTube video transcript",
"parameters": [
{
"description": "YouTube transcript request with URL, language, and timestamp options",
"name": "request",
"in": "body",
"required": true,
"schema": {
"$ref": "#/definitions/restapi.YouTubeRequest"
}
}
],
"responses": {
"200": {
"description": "Successful response with transcript and metadata",
"schema": {
"$ref": "#/definitions/restapi.YouTubeResponse"
}
},
"400": {
"description": "Bad request - invalid URL or playlist URL provided",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
},
"500": {
"description": "Internal server error - failed to retrieve transcript or metadata",
"schema": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
}
}
}
},
"definitions": {
"domain.ThinkingLevel": {
"type": "string",
"enum": [
"off",
"low",
"medium",
"high"
],
"x-enum-varnames": [
"ThinkingOff",
"ThinkingLow",
"ThinkingMedium",
"ThinkingHigh"
]
},
"fsdb.Pattern": {
"type": "object",
"properties": {
"description": {
"type": "string"
},
"name": {
"type": "string"
},
"pattern": {
"type": "string"
}
}
},
"restapi.ChatRequest": {
"type": "object",
"properties": {
"audioFormat": {
"type": "string"
},
"audioOutput": {
"type": "boolean"
},
"frequencyPenalty": {
"type": "number",
"format": "float64"
},
"imageBackground": {
"type": "string"
},
"imageCompression": {
"type": "integer"
},
"imageFile": {
"type": "string"
},
"imageQuality": {
"type": "string"
},
"imageSize": {
"type": "string"
},
"language": {
"description": "Add Language field to bind from request",
"type": "string"
},
"maxTokens": {
"type": "integer"
},
"model": {
"type": "string"
},
"modelContextLength": {
"type": "integer"
},
"notification": {
"type": "boolean"
},
"notificationCommand": {
"type": "string"
},
"presencePenalty": {
"type": "number",
"format": "float64"
},
"prompts": {
"type": "array",
"items": {
"$ref": "#/definitions/restapi.PromptRequest"
}
},
"raw": {
"type": "boolean"
},
"search": {
"type": "boolean"
},
"searchLocation": {
"type": "string"
},
"seed": {
"type": "integer"
},
"suppressThink": {
"type": "boolean"
},
"temperature": {
"type": "number",
"format": "float64"
},
"thinkEndTag": {
"type": "string"
},
"thinkStartTag": {
"type": "string"
},
"thinking": {
"$ref": "#/definitions/domain.ThinkingLevel"
},
"topP": {
"type": "number",
"format": "float64"
},
"voice": {
"type": "string"
}
}
},
"restapi.PatternApplyRequest": {
"type": "object",
"properties": {
"input": {
"type": "string"
},
"variables": {
"type": "object",
"additionalProperties": {
"type": "string"
}
}
}
},
"restapi.PromptRequest": {
"type": "object",
"properties": {
"contextName": {
"type": "string"
},
"model": {
"type": "string"
},
"patternName": {
"type": "string"
},
"strategyName": {
"description": "Optional strategy name",
"type": "string"
},
"userInput": {
"type": "string"
},
"variables": {
"description": "Pattern variables",
"type": "object",
"additionalProperties": {
"type": "string"
}
},
"vendor": {
"type": "string"
}
}
},
"restapi.StreamResponse": {
"type": "object",
"properties": {
"content": {
"description": "The actual content",
"type": "string"
},
"format": {
"description": "\"markdown\", \"mermaid\", \"plain\"",
"type": "string"
},
"type": {
"description": "\"content\", \"error\", \"complete\"",
"type": "string"
}
}
},
"restapi.YouTubeRequest": {
"type": "object",
"required": [
"url"
],
"properties": {
"language": {
"description": "Language code for transcript (default: \"en\")",
"type": "string",
"example": "en"
},
"timestamps": {
"description": "Include timestamps in the transcript (default: false)",
"type": "boolean",
"example": false
},
"url": {
"description": "YouTube video URL (required)",
"type": "string",
"example": "https://www.youtube.com/watch?v=dQw4w9WgXcQ"
}
}
},
"restapi.YouTubeResponse": {
"type": "object",
"properties": {
"description": {
"description": "Video description from YouTube metadata",
"type": "string",
"example": "This is the video description from YouTube..."
},
"title": {
"description": "Video title from YouTube metadata",
"type": "string",
"example": "Example Video Title"
},
"transcript": {
"description": "The video transcript text",
"type": "string",
"example": "This is the video transcript..."
},
"videoId": {
"description": "YouTube video ID",
"type": "string",
"example": "dQw4w9WgXcQ"
}
}
}
},
"securityDefinitions": {
"ApiKeyAuth": {
"type": "apiKey",
"name": "X-API-Key",
"in": "header"
}
}
}

344
docs/swagger.yaml Normal file
View File

@@ -0,0 +1,344 @@
basePath: /
definitions:
domain.ThinkingLevel:
enum:
- "off"
- low
- medium
- high
type: string
x-enum-varnames:
- ThinkingOff
- ThinkingLow
- ThinkingMedium
- ThinkingHigh
fsdb.Pattern:
properties:
description:
type: string
name:
type: string
pattern:
type: string
type: object
restapi.ChatRequest:
properties:
audioFormat:
type: string
audioOutput:
type: boolean
frequencyPenalty:
format: float64
type: number
imageBackground:
type: string
imageCompression:
type: integer
imageFile:
type: string
imageQuality:
type: string
imageSize:
type: string
language:
description: Add Language field to bind from request
type: string
maxTokens:
type: integer
model:
type: string
modelContextLength:
type: integer
notification:
type: boolean
notificationCommand:
type: string
presencePenalty:
format: float64
type: number
prompts:
items:
$ref: '#/definitions/restapi.PromptRequest'
type: array
raw:
type: boolean
search:
type: boolean
searchLocation:
type: string
seed:
type: integer
suppressThink:
type: boolean
temperature:
format: float64
type: number
thinkEndTag:
type: string
thinkStartTag:
type: string
thinking:
$ref: '#/definitions/domain.ThinkingLevel'
topP:
format: float64
type: number
voice:
type: string
type: object
restapi.PatternApplyRequest:
properties:
input:
type: string
variables:
additionalProperties:
type: string
type: object
type: object
restapi.PromptRequest:
properties:
contextName:
type: string
model:
type: string
patternName:
type: string
strategyName:
description: Optional strategy name
type: string
userInput:
type: string
variables:
additionalProperties:
type: string
description: Pattern variables
type: object
vendor:
type: string
type: object
restapi.StreamResponse:
properties:
content:
description: The actual content
type: string
format:
description: '"markdown", "mermaid", "plain"'
type: string
type:
description: '"content", "error", "complete"'
type: string
type: object
restapi.YouTubeRequest:
properties:
language:
description: 'Language code for transcript (default: "en")'
example: en
type: string
timestamps:
description: 'Include timestamps in the transcript (default: false)'
example: false
type: boolean
url:
description: YouTube video URL (required)
example: https://www.youtube.com/watch?v=dQw4w9WgXcQ
type: string
required:
- url
type: object
restapi.YouTubeResponse:
properties:
description:
description: Video description from YouTube metadata
example: This is the video description from YouTube...
type: string
title:
description: Video title from YouTube metadata
example: Example Video Title
type: string
transcript:
description: The video transcript text
example: This is the video transcript...
type: string
videoId:
description: YouTube video ID
example: dQw4w9WgXcQ
type: string
type: object
host: localhost:8080
info:
contact:
name: Fabric Support
url: https://github.com/danielmiessler/fabric
description: REST API for Fabric AI augmentation framework. Provides endpoints for
chat completions, pattern management, contexts, sessions, and more.
license:
name: MIT
url: https://opensource.org/licenses/MIT
title: Fabric REST API
version: "1.0"
paths:
/chat:
post:
consumes:
- application/json
description: Stream AI responses using Server-Sent Events (SSE)
parameters:
- description: Chat request with prompts and options
in: body
name: request
required: true
schema:
$ref: '#/definitions/restapi.ChatRequest'
produces:
- text/event-stream
responses:
"200":
description: Streaming response
schema:
$ref: '#/definitions/restapi.StreamResponse'
"400":
description: Bad Request
schema:
additionalProperties:
type: string
type: object
security:
- ApiKeyAuth: []
summary: Stream chat completions
tags:
- chat
/models/names:
get:
description: Get a list of all available AI models grouped by vendor
produces:
- application/json
responses:
"200":
description: Returns models (array) and vendors (map)
schema:
additionalProperties: true
type: object
"500":
description: Internal Server Error
schema:
additionalProperties:
type: string
type: object
security:
- ApiKeyAuth: []
summary: List all available models
tags:
- models
/patterns/{name}:
get:
consumes:
- application/json
description: Retrieve a pattern by name
parameters:
- description: Pattern name
in: path
name: name
required: true
type: string
produces:
- application/json
responses:
"200":
description: OK
schema:
$ref: '#/definitions/fsdb.Pattern'
"500":
description: Internal Server Error
schema:
additionalProperties:
type: string
type: object
security:
- ApiKeyAuth: []
summary: Get a pattern
tags:
- patterns
/patterns/{name}/apply:
post:
consumes:
- application/json
description: Apply a pattern with variable substitution
parameters:
- description: Pattern name
in: path
name: name
required: true
type: string
- description: Pattern application request
in: body
name: request
required: true
schema:
$ref: '#/definitions/restapi.PatternApplyRequest'
produces:
- application/json
responses:
"200":
description: OK
schema:
$ref: '#/definitions/fsdb.Pattern'
"400":
description: Bad Request
schema:
additionalProperties:
type: string
type: object
"500":
description: Internal Server Error
schema:
additionalProperties:
type: string
type: object
security:
- ApiKeyAuth: []
summary: Apply pattern with variables
tags:
- patterns
/youtube/transcript:
post:
consumes:
- application/json
description: Retrieves the transcript of a YouTube video along with video metadata
(title and description)
parameters:
- description: YouTube transcript request with URL, language, and timestamp
options
in: body
name: request
required: true
schema:
$ref: '#/definitions/restapi.YouTubeRequest'
produces:
- application/json
responses:
"200":
description: Successful response with transcript and metadata
schema:
$ref: '#/definitions/restapi.YouTubeResponse'
"400":
description: Bad request - invalid URL or playlist URL provided
schema:
additionalProperties:
type: string
type: object
"500":
description: Internal server error - failed to retrieve transcript or metadata
schema:
additionalProperties:
type: string
type: object
security:
- ApiKeyAuth: []
summary: Get YouTube video transcript
tags:
- youtube
securityDefinitions:
ApiKeyAuth:
in: header
name: X-API-Key
type: apiKey
swagger: "2.0"

24
flake.lock generated
View File

@@ -5,11 +5,11 @@
"systems": "systems"
},
"locked": {
"lastModified": 1694529238,
"narHash": "sha256-zsNZZGTGnMOf9YpHKJqMSsa0dXbfmxeoJ7xHlrt+xmY=",
"lastModified": 1731533236,
"narHash": "sha256-l0KFg5HjrsfsO/JpG+r7fRrqm12kzFHyUHqHCVpMMbI=",
"owner": "numtide",
"repo": "flake-utils",
"rev": "ff7b65b44d01cf9ba6a71320833626af21126384",
"rev": "11707dc2f618dd54ca8739b309ec4fc024de578b",
"type": "github"
},
"original": {
@@ -26,11 +26,11 @@
]
},
"locked": {
"lastModified": 1742209644,
"narHash": "sha256-jMy1XqXqD0/tJprEbUmKilTkvbDY/C0ZGSsJJH4TNCE=",
"lastModified": 1763982521,
"narHash": "sha256-ur4QIAHwgFc0vXiaxn5No/FuZicxBr2p0gmT54xZkUQ=",
"owner": "nix-community",
"repo": "gomod2nix",
"rev": "8f3534eb8f6c5c3fce799376dc3b91bae6b11884",
"rev": "02e63a239d6eabd595db56852535992c898eba72",
"type": "github"
},
"original": {
@@ -41,11 +41,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1745234285,
"narHash": "sha256-GfpyMzxwkfgRVN0cTGQSkTC0OHhEkv3Jf6Tcjm//qZ0=",
"lastModified": 1765472234,
"narHash": "sha256-9VvC20PJPsleGMewwcWYKGzDIyjckEz8uWmT0vCDYK0=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "c11863f1e964833214b767f4a369c6e6a7aba141",
"rev": "2fbfb1d73d239d2402a8fe03963e37aab15abe8b",
"type": "github"
},
"original": {
@@ -100,11 +100,11 @@
]
},
"locked": {
"lastModified": 1744961264,
"narHash": "sha256-aRmUh0AMwcbdjJHnytg1e5h5ECcaWtIFQa6d9gI85AI=",
"lastModified": 1762938485,
"narHash": "sha256-AlEObg0syDl+Spi4LsZIBrjw+snSVU4T8MOeuZJUJjM=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "8d404a69efe76146368885110f29a2ca3700bee6",
"rev": "5b4ee75aeefd1e2d5a1cc43cf6ba65eba75e83e4",
"type": "github"
},
"original": {

54
go.mod
View File

@@ -3,14 +3,14 @@ module github.com/danielmiessler/fabric
go 1.25.1
require (
github.com/anthropics/anthropic-sdk-go v1.16.0
github.com/anthropics/anthropic-sdk-go v1.19.0
github.com/atotto/clipboard v0.1.4
github.com/aws/aws-sdk-go-v2 v1.39.0
github.com/aws/aws-sdk-go-v2/config v1.31.8
github.com/aws/aws-sdk-go-v2/service/bedrock v1.46.1
github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.40.1
github.com/gabriel-vasile/mimetype v1.4.9
github.com/gin-gonic/gin v1.10.1
github.com/gabriel-vasile/mimetype v1.4.12
github.com/gin-gonic/gin v1.11.0
github.com/go-git/go-git/v5 v5.16.2
github.com/go-shiori/go-readability v0.0.0-20250217085726-9f5bf5ca7612
github.com/google/go-github/v66 v66.0.0
@@ -28,8 +28,11 @@ require (
github.com/sgaunet/perplexity-go/v2 v2.8.0
github.com/spf13/cobra v1.9.1
github.com/stretchr/testify v1.11.1
github.com/swaggo/files v1.0.1
github.com/swaggo/gin-swagger v1.6.1
github.com/swaggo/swag v1.16.6
golang.org/x/oauth2 v0.30.0
golang.org/x/text v0.28.0
golang.org/x/text v0.32.0
google.golang.org/api v0.247.0
gopkg.in/yaml.v3 v3.0.1
)
@@ -37,8 +40,27 @@ require (
require (
github.com/Azure/azure-sdk-for-go/sdk/azcore v1.19.1 // indirect
github.com/Azure/azure-sdk-for-go/sdk/internal v1.11.2 // indirect
github.com/KyleBanks/depth v1.2.1 // indirect
github.com/bytedance/gopkg v0.1.3 // indirect
github.com/go-openapi/jsonpointer v0.22.4 // indirect
github.com/go-openapi/jsonreference v0.21.4 // indirect
github.com/go-openapi/spec v0.22.2 // indirect
github.com/go-openapi/swag/conv v0.25.4 // indirect
github.com/go-openapi/swag/jsonname v0.25.4 // indirect
github.com/go-openapi/swag/jsonutils v0.25.4 // indirect
github.com/go-openapi/swag/loading v0.25.4 // indirect
github.com/go-openapi/swag/stringutils v0.25.4 // indirect
github.com/go-openapi/swag/typeutils v0.25.4 // indirect
github.com/go-openapi/swag/yamlutils v0.25.4 // indirect
github.com/goccy/go-yaml v1.19.1 // indirect
github.com/google/go-cmp v0.7.0 // indirect
github.com/gorilla/websocket v1.5.3 // indirect
github.com/quic-go/qpack v0.6.0 // indirect
github.com/quic-go/quic-go v0.57.1 // indirect
go.uber.org/mock v0.6.0 // indirect
go.yaml.in/yaml/v3 v3.0.4 // indirect
golang.org/x/mod v0.31.0 // indirect
golang.org/x/tools v0.40.0 // indirect
)
require (
@@ -63,10 +85,10 @@ require (
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.34.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 // indirect
github.com/aws/smithy-go v1.23.0 // indirect
github.com/bytedance/sonic v1.13.3 // indirect
github.com/bytedance/sonic/loader v0.2.4 // indirect
github.com/bytedance/sonic v1.14.2 // indirect
github.com/bytedance/sonic/loader v0.4.0 // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/cloudwego/base64x v0.1.5 // indirect
github.com/cloudwego/base64x v0.1.6 // indirect
github.com/coder/websocket v1.8.13 // indirect
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
github.com/davecgh/go-spew v1.1.2-0.20180830191138-d8f796af33cc // indirect
@@ -79,7 +101,7 @@ require (
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.26.0 // indirect
github.com/go-playground/validator/v10 v10.29.0 // indirect
github.com/go-shiori/dom v0.0.0-20230515143342-73569d674e1c // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/gogs/chardet v0.0.0-20211120154057-b7413eaefb8f // indirect
@@ -93,7 +115,7 @@ require (
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
github.com/klauspost/cpuid/v2 v2.2.10 // indirect
github.com/klauspost/cpuid/v2 v2.3.0 // indirect
github.com/leodido/go-urn v1.4.0 // indirect
github.com/mattn/go-isatty v0.0.20 // indirect
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
@@ -110,23 +132,23 @@ require (
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tidwall/sjson v1.2.5 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.14 // indirect
github.com/ugorji/go/codec v1.3.1 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
golang.org/x/arch v0.18.0 // indirect
golang.org/x/crypto v0.41.0 // indirect
golang.org/x/arch v0.23.0 // indirect
golang.org/x/crypto v0.46.0 // indirect
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b // indirect
golang.org/x/net v0.43.0 // indirect
golang.org/x/sync v0.16.0 // indirect
golang.org/x/sys v0.35.0 // indirect
golang.org/x/net v0.48.0 // indirect
golang.org/x/sync v0.19.0 // indirect
golang.org/x/sys v0.39.0 // indirect
google.golang.org/genai v1.17.0
google.golang.org/genproto/googleapis/api v0.0.0-20250818200422-3122310a409c // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c // indirect
google.golang.org/grpc v1.74.2 // indirect
google.golang.org/protobuf v1.36.7 // indirect
google.golang.org/protobuf v1.36.11 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
)

133
go.sum
View File

@@ -18,6 +18,8 @@ github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2 h1:oygO0locgZJ
github.com/AzureAD/microsoft-authentication-library-for-go v1.4.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
github.com/BurntSushi/toml v1.5.0 h1:W5quZX/G/csjUnuI8SUYlsHs9M38FC7znL0lIO+DvMg=
github.com/BurntSushi/toml v1.5.0/go.mod h1:ukJfTF/6rtPPRCnwkur4qwRxa8vTRFBF0uk2lLoLwho=
github.com/KyleBanks/depth v1.2.1 h1:5h8fQADFrWtarTdtDudMmGsC7GPbOAu6RVB3ffsVFHc=
github.com/KyleBanks/depth v1.2.1/go.mod h1:jzSb9d0L43HxTQfT+oSA1EEp2q+ne2uh6XgeJcm8brE=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
@@ -27,8 +29,8 @@ github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kk
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/anthropics/anthropic-sdk-go v1.16.0 h1:nRkOFDqYXsHteoIhjdJr/5dsiKbFF3rflSv8ax50y8o=
github.com/anthropics/anthropic-sdk-go v1.16.0/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE=
github.com/anthropics/anthropic-sdk-go v1.19.0 h1:mO6E+ffSzLRvR/YUH9KJC0uGw0uV8GjISIuzem//3KE=
github.com/anthropics/anthropic-sdk-go v1.19.0/go.mod h1:WTz31rIUHUHqai2UslPpw5CwXrQP3geYBioRV4WOLvE=
github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de h1:FxWPpzIjnTlhPwqqXc4/vE0f7GvRjuAsbW+HOIe8KnA=
github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de/go.mod h1:DCaWoUhZrYW9p1lxo/cm8EmUOOzAPSEZNGF2DK1dJgw=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
@@ -67,16 +69,16 @@ github.com/aws/aws-sdk-go-v2/service/sts v1.38.4 h1:PR00NXRYgY4FWHqOGx3fC3lhVKjs
github.com/aws/aws-sdk-go-v2/service/sts v1.38.4/go.mod h1:Z+Gd23v97pX9zK97+tX4ppAgqCt3Z2dIXB02CtBncK8=
github.com/aws/smithy-go v1.23.0 h1:8n6I3gXzWJB2DxBDnfxgBaSX6oe0d/t10qGz7OKqMCE=
github.com/aws/smithy-go v1.23.0/go.mod h1:t1ufH5HMublsJYulve2RKmHDC15xu1f26kHCp/HgceI=
github.com/bytedance/sonic v1.13.3 h1:MS8gmaH16Gtirygw7jV91pDCN33NyMrPbN7qiYhEsF0=
github.com/bytedance/sonic v1.13.3/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY=
github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/bytedance/gopkg v0.1.3 h1:TPBSwH8RsouGCBcMBktLt1AymVo2TVsBVCY4b6TnZ/M=
github.com/bytedance/gopkg v0.1.3/go.mod h1:576VvJ+eJgyCzdjS+c4+77QF3p7ubbtiKARP3TxducM=
github.com/bytedance/sonic v1.14.2 h1:k1twIoe97C1DtYUo+fZQy865IuHia4PR5RPiuGPPIIE=
github.com/bytedance/sonic v1.14.2/go.mod h1:T80iDELeHiHKSc0C9tubFygiuXoGzrkjKzX2quAx980=
github.com/bytedance/sonic/loader v0.4.0 h1:olZ7lEqcxtZygCK9EKYKADnpQoYkRQxaeY2NYzevs+o=
github.com/bytedance/sonic/loader v0.4.0/go.mod h1:AR4NYCk5DdzZizZ5djGqQ92eEhCCcdf5x77udYiSJRo=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/cloudwego/base64x v0.1.6 h1:t11wG9AECkCDk5fMSoxmufanudBtJ+/HemLstXDLI2M=
github.com/cloudwego/base64x v0.1.6/go.mod h1:OFcloc187FXDaYHvrNIjxSe8ncn0OOM8gEHfghB2IPU=
github.com/coder/websocket v1.8.13 h1:f3QZdXy7uGVz+4uCJy2nTZyM0yTBj8yANEHhqlXZ9FE=
github.com/coder/websocket v1.8.13/go.mod h1:LNVeNrXQZfe5qhS9ALED3uA+l5pPqvwXg3CKoDBB2gs=
github.com/cpuguy83/go-md2man/v2 v2.0.6/go.mod h1:oOW0eioCTA6cOiMLiUPZOpcVxMig6NIQQ7OS05n1F4g=
@@ -92,12 +94,14 @@ github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/gabriel-vasile/mimetype v1.4.12 h1:e9hWvmLYvtp846tLHam2o++qitpguFiYCKbn0w9jyqw=
github.com/gabriel-vasile/mimetype v1.4.12/go.mod h1:d+9Oxyo1wTzWdyVUPMmXFvp4F9tea18J8ufA774AB3s=
github.com/gin-contrib/gzip v0.0.6 h1:NjcunTcGAj5CO1gn4N8jHOSIeRFHIbn51z6K+xaN4d4=
github.com/gin-contrib/gzip v0.0.6/go.mod h1:QOJlmV2xmayAjkNS2Y8NQsMneuRShOU/kjovCXNuzzk=
github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w=
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ=
github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
github.com/gin-gonic/gin v1.11.0 h1:OW/6PLjyusp2PPXtyxKHU0RbX6I/l28FTdDlae5ueWk=
github.com/gin-gonic/gin v1.11.0/go.mod h1:+iq/FyxlGzII0KHiBGjuNn4UNENUlKbGlNmc+W50Dls=
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
@@ -113,20 +117,49 @@ github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-openapi/jsonpointer v0.22.4 h1:dZtK82WlNpVLDW2jlA1YCiVJFVqkED1MegOUy9kR5T4=
github.com/go-openapi/jsonpointer v0.22.4/go.mod h1:elX9+UgznpFhgBuaMQ7iu4lvvX1nvNsesQ3oxmYTw80=
github.com/go-openapi/jsonreference v0.21.4 h1:24qaE2y9bx/q3uRK/qN+TDwbok1NhbSmGjjySRCHtC8=
github.com/go-openapi/jsonreference v0.21.4/go.mod h1:rIENPTjDbLpzQmQWCj5kKj3ZlmEh+EFVbz3RTUh30/4=
github.com/go-openapi/spec v0.22.2 h1:KEU4Fb+Lp1qg0V4MxrSCPv403ZjBl8Lx1a83gIPU8Qc=
github.com/go-openapi/spec v0.22.2/go.mod h1:iIImLODL2loCh3Vnox8TY2YWYJZjMAKYyLH2Mu8lOZs=
github.com/go-openapi/swag v0.19.15 h1:D2NRCBzS9/pEY3gP9Nl8aDqGUcPFrwG2p+CNFrLyrCM=
github.com/go-openapi/swag/conv v0.25.4 h1:/Dd7p0LZXczgUcC/Ikm1+YqVzkEeCc9LnOWjfkpkfe4=
github.com/go-openapi/swag/conv v0.25.4/go.mod h1:3LXfie/lwoAv0NHoEuY1hjoFAYkvlqI/Bn5EQDD3PPU=
github.com/go-openapi/swag/jsonname v0.25.4 h1:bZH0+MsS03MbnwBXYhuTttMOqk+5KcQ9869Vye1bNHI=
github.com/go-openapi/swag/jsonname v0.25.4/go.mod h1:GPVEk9CWVhNvWhZgrnvRA6utbAltopbKwDu8mXNUMag=
github.com/go-openapi/swag/jsonutils v0.25.4 h1:VSchfbGhD4UTf4vCdR2F4TLBdLwHyUDTd1/q4i+jGZA=
github.com/go-openapi/swag/jsonutils v0.25.4/go.mod h1:7OYGXpvVFPn4PpaSdPHJBtF0iGnbEaTk8AvBkoWnaAY=
github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4 h1:IACsSvBhiNJwlDix7wq39SS2Fh7lUOCJRmx/4SN4sVo=
github.com/go-openapi/swag/jsonutils/fixtures_test v0.25.4/go.mod h1:Mt0Ost9l3cUzVv4OEZG+WSeoHwjWLnarzMePNDAOBiM=
github.com/go-openapi/swag/loading v0.25.4 h1:jN4MvLj0X6yhCDduRsxDDw1aHe+ZWoLjW+9ZQWIKn2s=
github.com/go-openapi/swag/loading v0.25.4/go.mod h1:rpUM1ZiyEP9+mNLIQUdMiD7dCETXvkkC30z53i+ftTE=
github.com/go-openapi/swag/stringutils v0.25.4 h1:O6dU1Rd8bej4HPA3/CLPciNBBDwZj9HiEpdVsb8B5A8=
github.com/go-openapi/swag/stringutils v0.25.4/go.mod h1:GTsRvhJW5xM5gkgiFe0fV3PUlFm0dr8vki6/VSRaZK0=
github.com/go-openapi/swag/typeutils v0.25.4 h1:1/fbZOUN472NTc39zpa+YGHn3jzHWhv42wAJSN91wRw=
github.com/go-openapi/swag/typeutils v0.25.4/go.mod h1:Ou7g//Wx8tTLS9vG0UmzfCsjZjKhpjxayRKTHXf2pTE=
github.com/go-openapi/swag/yamlutils v0.25.4 h1:6jdaeSItEUb7ioS9lFoCZ65Cne1/RZtPBZ9A56h92Sw=
github.com/go-openapi/swag/yamlutils v0.25.4/go.mod h1:MNzq1ulQu+yd8Kl7wPOut/YHAAU/H6hL91fF+E2RFwc=
github.com/go-openapi/testify/enable/yaml/v2 v2.0.2 h1:0+Y41Pz1NkbTHz8NngxTuAXxEodtNSI1WG1c/m5Akw4=
github.com/go-openapi/testify/enable/yaml/v2 v2.0.2/go.mod h1:kme83333GCtJQHXQ8UKX3IBZu6z8T5Dvy5+CW3NLUUg=
github.com/go-openapi/testify/v2 v2.0.2 h1:X999g3jeLcoY8qctY/c/Z8iBHTbwLz7R2WXd6Ub6wls=
github.com/go-openapi/testify/v2 v2.0.2/go.mod h1:HCPmvFFnheKK2BuwSA0TbbdxJ3I16pjwMkYkP4Ywn54=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
github.com/go-playground/assert/v2 v2.2.0/go.mod h1:VDjEfimB/XKnb+ZQfWdccd7VUvScMdVu0Titje2rxJ4=
github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/oXslEjJA=
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/go-playground/validator/v10 v10.29.0 h1:lQlF5VNJWNlRbRZNeOIkWElR+1LL/OuHcc0Kp14w1xk=
github.com/go-playground/validator/v10 v10.29.0/go.mod h1:D6QxqeMlgIPuT02L66f2ccrZ7AGgHkzKmmTMZhk/Kc4=
github.com/go-shiori/dom v0.0.0-20230515143342-73569d674e1c h1:wpkoddUomPfHiOziHZixGO5ZBS73cKqVzZipfrLmO1w=
github.com/go-shiori/dom v0.0.0-20230515143342-73569d674e1c/go.mod h1:oVDCh3qjJMLVUSILBRwrm+Bc6RNXGZYtoh9xdvf1ffM=
github.com/go-shiori/go-readability v0.0.0-20250217085726-9f5bf5ca7612 h1:BYLNYdZaepitbZreRIa9xeCQZocWmy/wj4cGIH0qyw0=
github.com/go-shiori/go-readability v0.0.0-20250217085726-9f5bf5ca7612/go.mod h1:wgqthQa8SAYs0yyljVeCOQlZ027VW5CmLsbi9jWC08c=
github.com/goccy/go-json v0.10.5 h1:Fq85nIqj+gXn/S5ahsiTlK3TmC85qgirsdTP/+DeaC4=
github.com/goccy/go-json v0.10.5/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/goccy/go-yaml v1.19.1 h1:3rG3+v8pkhRqoQ/88NYNMHYVGYztCOCIZ7UQhu7H+NE=
github.com/goccy/go-yaml v1.19.1/go.mod h1:XBurs7gK8ATbW4ZPGKgcbrY1Br56PdM69F7LkFRi1kA=
github.com/gogs/chardet v0.0.0-20211120154057-b7413eaefb8f h1:3BSP1Tbs2djlpprl7wCLuiqMaUh5SJkkzI2gDs+FgLs=
github.com/gogs/chardet v0.0.0-20211120154057-b7413eaefb8f/go.mod h1:Pcatq5tYkCW2Q6yrR2VRHlbHpZ/R4/7qyL1TCF7vl14=
github.com/golang-jwt/jwt/v5 v5.2.2 h1:Rl4B7itRWVtYIHFrSNd7vhTiz9UpLdi6gZhZ3wEeDy8=
@@ -170,10 +203,8 @@ github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51 h1:Z9n2FFNU
github.com/kballard/go-shellquote v0.0.0-20180428030007-95032a82bc51/go.mod h1:CzGEWj7cYgsdH8dAjBGEr58BoE7ScuLd+fwFZ44+/x8=
github.com/kevinburke/ssh_config v1.2.0 h1:x584FjTGwHzMwvHx18PXxbBVzfnxogHaAReU4gf13a4=
github.com/kevinburke/ssh_config v1.2.0/go.mod h1:CT57kijsi8u/K/BOFA39wgDQJ9CxiF4nAY/ojJ6r6mM=
github.com/klauspost/cpuid/v2 v2.0.9/go.mod h1:FInQzS24/EEf25PyTYn52gqo7WaD8xa0213Md/qVLRg=
github.com/klauspost/cpuid/v2 v2.2.10 h1:tBs3QSyvjDyFTq3uoc/9xFpCuOsJQFNPiAhYdw2skhE=
github.com/klauspost/cpuid/v2 v2.2.10/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/knz/go-libedit v1.10.1/go.mod h1:MZTVkCWyz0oBc7JOWP3wNAzd002ZbM/5hgShxwh4x8M=
github.com/klauspost/cpuid/v2 v2.3.0 h1:S4CRMLnYUhGeDFDqkGriYKdfoFlDnMtqTiI/sFzhA9Y=
github.com/klauspost/cpuid/v2 v2.3.0/go.mod h1:hqwkgyIinND0mEev00jJYCxPNVRVXFQeu1XKlok6oO0=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.1 h1:flRD4NNwYAUpkphVc1HcthR4KEIFJ65n8Mw5qdRn3LE=
github.com/kr/pretty v0.3.1/go.mod h1:hoEshYVHaxMs3cyo3Yncou5ZscifuDolrwPKZanG3xk=
@@ -218,6 +249,10 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2 h1:Jamvg5psRIccs7FGNTlIRMkT8wgtp5eCXdBlqhYGL6U=
github.com/pmezard/go-difflib v1.0.1-0.20181226105442-5d4384ee4fb2/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/quic-go/qpack v0.6.0 h1:g7W+BMYynC1LbYLSqRt8PBg5Tgwxn214ZZR34VIOjz8=
github.com/quic-go/qpack v0.6.0/go.mod h1:lUpLKChi8njB4ty2bFLX2x4gzDqXwUpaO1DP9qMDZII=
github.com/quic-go/quic-go v0.57.1 h1:25KAAR9QR8KZrCZRThWMKVAwGoiHIrNbT72ULHTuI10=
github.com/quic-go/quic-go v0.57.1/go.mod h1:ly4QBAjHA2VhdnxhojRsCUOeJwKYg+taDlos92xb1+s=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
@@ -239,15 +274,23 @@ github.com/spf13/pflag v1.0.6/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.11.1 h1:7s2iGBzp5EwR7/aIZr8ao5+dra3wiQyKjjFuvgVKu7U=
github.com/stretchr/testify v1.11.1/go.mod h1:wZwfW3scLgRK+23gO65QZefKpKQRnfz6sD981Nm4B6U=
github.com/swaggo/files v1.0.1 h1:J1bVJ4XHZNq0I46UU90611i9/YzdrF7x92oX1ig5IdE=
github.com/swaggo/files v1.0.1/go.mod h1:0qXmMNH6sXNf+73t65aKeB+ApmgxdnkQzVTAj2uaMUg=
github.com/swaggo/gin-swagger v1.6.1 h1:Ri06G4gc9N4t4k8hekMigJ9zKTFSlqj/9paAQCQs7cY=
github.com/swaggo/gin-swagger v1.6.1/go.mod h1:LQ+hJStHakCWRiK/YNYtJOu4mR2FP+pxLnILT/qNiTw=
github.com/swaggo/swag v1.16.6 h1:qBNcx53ZaX+M5dxVyTrgQ0PJ/ACK+NzhwcbieTt+9yI=
github.com/swaggo/swag v1.16.6/go.mod h1:ngP2etMK5a0P3QBizic5MEwpRmluJZPHjXcMoj4Xesg=
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.18.0 h1:FIDeeyB800efLX89e5a8Y0BNH+LOngJyGrIWxG2FKQY=
github.com/tidwall/gjson v1.18.0/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
@@ -260,8 +303,8 @@ github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.2.14 h1:yOQvXCBc3Ij46LRkRoh4Yd5qK6LVOgi0bYOXfb7ifjw=
github.com/ugorji/go/codec v1.2.14/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ugorji/go/codec v1.3.1 h1:waO7eEiFDwidsBN6agj1vJQ4AG7lh2yqXyOXqhgQuyY=
github.com/ugorji/go/codec v1.3.1/go.mod h1:pRBVtBSKl77K30Bv8R2P+cLSGaTtex6fsA2Wjqmfxj4=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
@@ -279,8 +322,12 @@ go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFw
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
golang.org/x/arch v0.18.0 h1:WN9poc33zL4AzGxqf8VtpKUnGvMi8O9lhNyBMF/85qc=
golang.org/x/arch v0.18.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
go.uber.org/mock v0.6.0 h1:hyF9dfmbgIX5EfOdasqLsWD6xqpNZlXblLB/Dbnwv3Y=
go.uber.org/mock v0.6.0/go.mod h1:KiVJ4BqZJaMj4svdfmHM0AUx4NJYO8ZNpPnZn1Z+BBU=
go.yaml.in/yaml/v3 v3.0.4 h1:tfq32ie2Jv2UxXFdLJdh3jXuOzWiL1fo0bu/FbuKpbc=
go.yaml.in/yaml/v3 v3.0.4/go.mod h1:DhzuOOF2ATzADvBadXxruRBLzYTpT36CKvDb3+aBEFg=
golang.org/x/arch v0.23.0 h1:lKF64A2jF6Zd8L0knGltUnegD62JMFBiCPBmQpToHhg=
golang.org/x/arch v0.23.0/go.mod h1:dNHoOeKiyja7GTvF9NJS1l3Z2yntpQNzgrjh1cU103A=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
@@ -288,8 +335,8 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.41.0 h1:WKYxWedPGCTVVl5+WHSSrOBT0O8lx32+zxmHxijgXp4=
golang.org/x/crypto v0.41.0/go.mod h1:pO5AFd7FA68rFak7rOAGVuygIISepHftHnr8dr6+sUc=
golang.org/x/crypto v0.46.0 h1:cKRW/pmt1pKAfetfu+RCEvjvZkA9RimPbh7bhFjGVBU=
golang.org/x/crypto v0.46.0/go.mod h1:Evb/oLKmMraqjZ2iQTwDwvCtJkczlDuTmdJXoZVzqU0=
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b h1:QoALfVG9rhQ/M7vYDScfPdWjGL9dlsVVM5VGh7aKoAA=
golang.org/x/exp v0.0.0-20250531010427-b6e5de432a8b/go.mod h1:U6Lno4MTRCDY+Ba7aCcauB9T60gsv5s4ralQzP72ZoQ=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
@@ -297,18 +344,21 @@ golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.31.0 h1:HaW9xtz0+kOcWKwli0ZXy79Ix+UW/vOfmWI5QVd2tgI=
golang.org/x/mod v0.31.0/go.mod h1:43JraMp9cGx1Rx3AqioxrbrhNsLl2l/iNAvuBkrezpg=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.7.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.43.0 h1:lat02VYK2j4aLzMzecihNvTlJNQUq316m2Mr9rnM6YE=
golang.org/x/net v0.43.0/go.mod h1:vhO1fvI4dGsIjh73sWfUVjj3N7CA9WkKJNQm2svM6Jg=
golang.org/x/net v0.48.0 h1:zyQRTTrjc33Lhh0fBgT/H3oZq9WuvRR5gPC70xpDiQU=
golang.org/x/net v0.48.0/go.mod h1:+ndRgGjkh8FGtu1w1FGbEC31if4VrNVMuKTgcAAnQRY=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -318,8 +368,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.16.0 h1:ycBJEhp9p4vXvUZNszeOq0kGTPghopOL8q0fq3vstxw=
golang.org/x/sync v0.16.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.19.0 h1:vV+1eWNmZ5geRlYjzm2adRgW2/mcpevXNg50YZtPCE4=
golang.org/x/sync v0.19.0/go.mod h1:9KTHXmSnoGruLpwFjVSX0lNNA75CykiMECbovNTZqGI=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -336,8 +386,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.35.0 h1:vz1N37gP5bs89s7He8XuIYXpyY0+QlsKmzipCbUtyxI=
golang.org/x/sys v0.35.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.39.0 h1:CvCKL8MeisomCi6qNZ+wbb0DN9E5AATixKsvNtMoMFk=
golang.org/x/sys v0.39.0/go.mod h1:OgkHotnGiDImocRcuBABYBEXf8A9a87e/uXjp9XT3ks=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -347,8 +397,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.34.0 h1:O/2T7POpk0ZZ7MAzMeWFSg6S5IpWd/RXDlM9hgM3DR4=
golang.org/x/term v0.34.0/go.mod h1:5jC53AEywhIVebHgPVeg0mj8OD3VO9OzclacVrqpaAw=
golang.org/x/term v0.38.0 h1:PQ5pkm/rLO6HnxFR7N2lJHOZX6Kez5Y1gDSJla6jo7Q=
golang.org/x/term v0.38.0/go.mod h1:bSEAKrOT1W+VSu9TSCMtoGEOUcKxOKgl3LE5QEF/xVg=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -359,14 +409,18 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.28.0 h1:rhazDwis8INMIwQ4tpjLDzUhx6RlXqZNPEM0huQojng=
golang.org/x/text v0.28.0/go.mod h1:U8nCwOR8jO/marOQ0QbDiOngZVEBB7MAiitBuMjXiNU=
golang.org/x/text v0.32.0 h1:ZD01bjUt1FQ9WJ0ClOL5vxgxOI/sVCNgX1YtKwcY0mU=
golang.org/x/text v0.32.0/go.mod h1:o/rUWzghvpD5TXrTIBuJU77MTaN0ljMWE47kxGJQ7jY=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/tools v0.40.0 h1:yLkxfA+Qnul4cs9QA3KnlFu0lVmd8JJfoq+E41uSutA=
golang.org/x/tools v0.40.0/go.mod h1:Ik/tzLRlbscWpqqMRjyWYDisX8bG13FrdXp3o4Sr9lc=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.247.0 h1:tSd/e0QrUlLsrwMKmkbQhYVa109qIintOls2Wh6bngc=
@@ -381,8 +435,8 @@ google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c h1:
google.golang.org/genproto/googleapis/rpc v0.0.0-20250818200422-3122310a409c/go.mod h1:gw1tLEfykwDz2ET4a12jcXt4couGAm7IwsVaTy0Sflo=
google.golang.org/grpc v1.74.2 h1:WoosgB65DlWVC9FqI82dGsZhWFNBSLjQ84bjROOpMu4=
google.golang.org/grpc v1.74.2/go.mod h1:CtQ+BGjaAIXHs/5YS3i473GqwBBa1zGQNevxdeBEXrM=
google.golang.org/protobuf v1.36.7 h1:IgrO7UwFQGJdRNXH/sQux4R1Dj1WAKcLElzeeRaXV2A=
google.golang.org/protobuf v1.36.7/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
google.golang.org/protobuf v1.36.11 h1:fV6ZwhNocDyBLK0dj+fg8ektcVegBBuEolpbTQyBNVE=
google.golang.org/protobuf v1.36.11/go.mod h1:HTf+CrKn2C3g5S8VImy6tdcUvCska2kB7j23XfzDpco=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@@ -394,4 +448,3 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=

View File

@@ -8,6 +8,7 @@ import (
"os"
"path/filepath"
"reflect"
"slices"
"strconv"
"strings"
@@ -115,7 +116,7 @@ func Init() (ret *Flags, err error) {
// Create mapping from flag names (both short and long) to yaml tag names
flagToYamlTag := make(map[string]string)
t := reflect.TypeOf(Flags{})
t := reflect.TypeFor[Flags]()
for i := 0; i < t.NumField(); i++ {
field := t.Field(i)
yamlTag := field.Tag.Get("yaml")
@@ -224,14 +225,14 @@ func Init() (ret *Flags, err error) {
}
func parseDebugLevel(args []string) int {
for i := 0; i < len(args); i++ {
for i := range args {
arg := args[i]
if arg == "--debug" && i+1 < len(args) {
if lvl, err := strconv.Atoi(args[i+1]); err == nil {
return lvl
}
} else if strings.HasPrefix(arg, "--debug=") {
if lvl, err := strconv.Atoi(strings.TrimPrefix(arg, "--debug=")); err == nil {
} else if after, ok := strings.CutPrefix(arg, "--debug="); ok {
if lvl, err := strconv.Atoi(after); err == nil {
return lvl
}
}
@@ -241,8 +242,8 @@ func parseDebugLevel(args []string) int {
func extractFlag(arg string) string {
var flag string
if strings.HasPrefix(arg, "--") {
flag = strings.TrimPrefix(arg, "--")
if after, ok := strings.CutPrefix(arg, "--"); ok {
flag = after
if i := strings.Index(flag, "="); i > 0 {
flag = flag[:i]
}
@@ -348,10 +349,8 @@ func validateImageFile(imagePath string) error {
ext := strings.ToLower(filepath.Ext(imagePath))
validExtensions := []string{".png", ".jpeg", ".jpg", ".webp"}
for _, validExt := range validExtensions {
if ext == validExt {
return nil // Valid extension found
}
if slices.Contains(validExtensions, ext) {
return nil // Valid extension found
}
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_image_file_extension"), ext))
@@ -370,13 +369,7 @@ func validateImageParameters(imagePath, size, quality, background string, compre
// Validate size
if size != "" {
validSizes := []string{"1024x1024", "1536x1024", "1024x1536", "auto"}
valid := false
for _, validSize := range validSizes {
if size == validSize {
valid = true
break
}
}
valid := slices.Contains(validSizes, size)
if !valid {
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_image_size"), size))
}
@@ -385,13 +378,7 @@ func validateImageParameters(imagePath, size, quality, background string, compre
// Validate quality
if quality != "" {
validQualities := []string{"low", "medium", "high", "auto"}
valid := false
for _, validQuality := range validQualities {
if quality == validQuality {
valid = true
break
}
}
valid := slices.Contains(validQualities, quality)
if !valid {
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_image_quality"), quality))
}
@@ -400,13 +387,7 @@ func validateImageParameters(imagePath, size, quality, background string, compre
// Validate background
if background != "" {
validBackgrounds := []string{"opaque", "transparent"}
valid := false
for _, validBackground := range validBackgrounds {
if background == validBackground {
valid = true
break
}
}
valid := slices.Contains(validBackgrounds, background)
if !valid {
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_image_background"), background))
}

View File

@@ -137,8 +137,7 @@ func (h *TranslatedHelpWriter) getTranslatedDescription(flagName string) string
// getOriginalDescription retrieves the original description from struct tags
func (h *TranslatedHelpWriter) getOriginalDescription(flagName string) string {
flags := &Flags{}
flagsType := reflect.TypeOf(flags).Elem()
flagsType := reflect.TypeFor[Flags]()
for i := 0; i < flagsType.NumField(); i++ {
field := flagsType.Field(i)
@@ -184,10 +183,10 @@ func detectLanguageFromArgs() string {
if i+1 < len(args) {
return args[i+1]
}
} else if strings.HasPrefix(arg, "--language=") {
return strings.TrimPrefix(arg, "--language=")
} else if strings.HasPrefix(arg, "-g=") {
return strings.TrimPrefix(arg, "-g=")
} else if after, ok := strings.CutPrefix(arg, "--language="); ok {
return after
} else if after, ok := strings.CutPrefix(arg, "-g="); ok {
return after
} else if runtime.GOOS == "windows" && strings.HasPrefix(arg, "/g:") {
return strings.TrimPrefix(arg, "/g:")
} else if runtime.GOOS == "windows" && strings.HasPrefix(arg, "/g=") {
@@ -218,8 +217,7 @@ func detectLanguageFromEnv() string {
// writeAllFlags writes all flags with translated descriptions
func (h *TranslatedHelpWriter) writeAllFlags() {
// Use direct reflection on the Flags struct to get all flag definitions
flags := &Flags{}
flagsType := reflect.TypeOf(flags).Elem()
flagsType := reflect.TypeFor[Flags]()
for i := 0; i < flagsType.NumField(); i++ {
field := flagsType.Field(i)
@@ -274,10 +272,7 @@ func (h *TranslatedHelpWriter) writeAllFlags() {
// Pad to align descriptions
flagStr := flagLine.String()
padding := 34 - len(flagStr)
if padding < 2 {
padding = 2
}
padding := max(34-len(flagStr), 2)
fmt.Fprintf(h.writer, "%s%s%s", flagStr, strings.Repeat(" ", padding), description)

View File

@@ -30,6 +30,28 @@ func handleListingCommands(currentFlags *Flags, fabricDb *fsdb.Db, registry *cor
}
if currentFlags.ListPatterns {
// Check if patterns exist before listing
var names []string
if names, err = fabricDb.Patterns.GetNames(); err != nil {
return true, err
}
if len(names) == 0 && !currentFlags.ShellCompleteOutput {
// No patterns found - provide helpful guidance
fmt.Println("\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Println(i18n.T("patterns_not_found_header"))
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Printf("\n%s\n", i18n.T("patterns_required_to_work"))
fmt.Println()
fmt.Println(i18n.T("patterns_option_run_setup"))
fmt.Printf(" %s\n", i18n.T("patterns_option_run_setup_command"))
fmt.Println()
fmt.Println(i18n.T("patterns_option_run_update"))
fmt.Printf(" %s\n", i18n.T("patterns_option_run_update_command"))
fmt.Println()
return true, nil
}
err = fabricDb.Patterns.ListNames(currentFlags.ShellCompleteOutput)
return true, err
}
@@ -39,6 +61,11 @@ func handleListingCommands(currentFlags *Flags, fabricDb *fsdb.Db, registry *cor
if models, err = registry.VendorManager.GetModels(); err != nil {
return true, err
}
if currentFlags.Vendor != "" {
models = models.FilterByVendor(currentFlags.Vendor)
}
if currentFlags.ShellCompleteOutput {
models.Print(true)
} else {

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"os"
"path/filepath"
"slices"
"strings"
"github.com/atotto/clipboard"
@@ -66,10 +67,5 @@ func CreateAudioOutputFile(audioData []byte, fileName string) (err error) {
func IsAudioFormat(fileName string) bool {
ext := strings.ToLower(filepath.Ext(fileName))
audioExts := []string{".wav", ".mp3", ".m4a", ".aac", ".ogg", ".flac"}
for _, audioExt := range audioExts {
if ext == audioExt {
return true
}
}
return false
return slices.Contains(audioExts, ext)
}

View File

@@ -17,8 +17,9 @@ func handleTranscription(flags *Flags, registry *core.PluginRegistry) (message s
if vendorName == "" {
vendorName = "OpenAI"
}
vendor, ok := registry.VendorManager.VendorsByName[vendorName]
if !ok {
vendor := registry.VendorManager.FindByName(vendorName)
if vendor == nil {
return "", fmt.Errorf("%s", fmt.Sprintf(i18n.T("vendor_not_configured"), vendorName))
}
tr, ok := vendor.(transcriber)

View File

@@ -32,11 +32,9 @@ type Chatter struct {
// Send processes a chat request and applies file changes for create_coding_feature pattern
func (o *Chatter) Send(request *domain.ChatRequest, opts *domain.ChatOptions) (session *fsdb.Session, err error) {
modelToUse := opts.Model
if modelToUse == "" {
modelToUse = o.model
}
if o.vendor.NeedsRawMode(modelToUse) {
// Use o.model (normalized) for NeedsRawMode check instead of opts.Model
// This ensures case-insensitive model names work correctly (e.g., "GPT-5" → "gpt-5")
if o.vendor.NeedsRawMode(o.model) {
opts.Raw = true
}
if session, err = o.BuildSession(request, opts.Raw); err != nil {
@@ -57,6 +55,10 @@ func (o *Chatter) Send(request *domain.ChatRequest, opts *domain.ChatOptions) (s
if opts.Model == "" {
opts.Model = o.model
} else {
// Ensure opts.Model uses the normalized name from o.model if they refer to the same model
// This handles cases where user provides "GPT-5" but we've normalized it to "gpt-5"
opts.Model = o.model
}
if opts.ModelContextLength == 0 {

View File

@@ -176,29 +176,178 @@ func (o *PluginRegistry) SaveEnvFile() (err error) {
}
func (o *PluginRegistry) Setup() (err error) {
setupQuestion := plugins.NewSetupQuestion("Enter the number of the plugin to setup")
groupsPlugins := util.NewGroupsItemsSelector("Available plugins (please configure all required plugins):",
// Check if this is a first-time setup
isFirstRun := o.isFirstTimeSetup()
if isFirstRun {
err = o.runFirstTimeSetup()
} else {
err = o.runInteractiveSetup()
}
if err != nil {
return
}
// Validate setup after completion
o.validateSetup()
return
}
// isFirstTimeSetup checks if this is a first-time setup
func (o *PluginRegistry) isFirstTimeSetup() bool {
// Check if patterns and strategies are not configured
patternsConfigured := o.PatternsLoader.IsConfigured()
strategiesConfigured := o.Strategies.IsConfigured()
hasVendor := len(o.VendorManager.Vendors) > 0
return !patternsConfigured || !strategiesConfigured || !hasVendor
}
// runFirstTimeSetup handles first-time setup with automatic pattern/strategy download
func (o *PluginRegistry) runFirstTimeSetup() (err error) {
fmt.Println("\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Println(i18n.T("setup_welcome_header"))
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
// Step 1: Download patterns (required, automatic)
if !o.PatternsLoader.IsConfigured() {
fmt.Printf("\n%s\n", i18n.T("setup_step_downloading_patterns"))
if err = o.PatternsLoader.Setup(); err != nil {
return fmt.Errorf(i18n.T("setup_failed_download_patterns"), err)
}
if err = o.SaveEnvFile(); err != nil {
return
}
}
// Step 2: Download strategies (required, automatic)
if !o.Strategies.IsConfigured() {
fmt.Printf("\n%s\n", i18n.T("setup_step_downloading_strategies"))
if err = o.Strategies.Setup(); err != nil {
return fmt.Errorf(i18n.T("setup_failed_download_strategies"), err)
}
if err = o.SaveEnvFile(); err != nil {
return
}
}
// Step 3: Configure AI vendor (interactive)
if len(o.VendorManager.Vendors) == 0 {
fmt.Printf("\n%s\n", i18n.T("setup_step_configure_ai_provider"))
fmt.Printf(" %s\n", i18n.T("setup_ai_provider_required"))
fmt.Printf(" %s\n", i18n.T("setup_add_more_providers_later"))
fmt.Println()
if err = o.runVendorSetup(); err != nil {
return
}
}
// Step 4: Set default vendor and model
if !o.Defaults.IsConfigured() {
fmt.Printf("\n%s\n", i18n.T("setup_step_setting_defaults"))
if err = o.Defaults.Setup(); err != nil {
return fmt.Errorf(i18n.T("setup_failed_set_defaults"), err)
}
if err = o.SaveEnvFile(); err != nil {
return
}
}
fmt.Println("\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Println(i18n.T("setup_complete_header"))
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Printf("\n%s\n", i18n.T("setup_next_steps"))
fmt.Printf(" %s\n", i18n.T("setup_list_patterns"))
fmt.Printf(" %s\n", i18n.T("setup_try_pattern"))
fmt.Printf(" %s\n", i18n.T("setup_configure_more"))
fmt.Println()
return
}
// runVendorSetup helps user select and configure their first AI vendor
func (o *PluginRegistry) runVendorSetup() (err error) {
setupQuestion := plugins.NewSetupQuestion("Enter the number of the AI provider to configure")
groupsPlugins := util.NewGroupsItemsSelector(i18n.T("setup_available_ai_providers"),
func(plugin plugins.Plugin) string {
var configuredLabel string
if plugin.IsConfigured() {
configuredLabel = " (configured)"
} else {
configuredLabel = ""
}
return fmt.Sprintf("%v%v", plugin.GetSetupDescription(), configuredLabel)
return plugin.GetSetupDescription()
})
groupsPlugins.AddGroupItems("AI Vendors [at least one, required]", lo.Map(o.VendorsAll.Vendors,
groupsPlugins.AddGroupItems("", lo.Map(o.VendorsAll.Vendors,
func(vendor ai.Vendor, _ int) plugins.Plugin {
return vendor
})...)
groupsPlugins.AddGroupItems("Tools", o.CustomPatterns, o.Defaults, o.Jina, o.Language, o.PatternsLoader, o.Strategies, o.YouTube)
groupsPlugins.Print(false)
if answerErr := setupQuestion.Ask(i18n.T("setup_enter_ai_provider_number")); answerErr != nil {
return answerErr
}
if setupQuestion.Value == "" {
return fmt.Errorf("%s", i18n.T("setup_no_ai_provider_selected"))
}
number, parseErr := strconv.Atoi(setupQuestion.Value)
if parseErr != nil {
return fmt.Errorf(i18n.T("setup_invalid_selection"), setupQuestion.Value)
}
var plugin plugins.Plugin
if _, plugin, err = groupsPlugins.GetGroupAndItemByItemNumber(number); err != nil {
return
}
if pluginSetupErr := plugin.Setup(); pluginSetupErr != nil {
return pluginSetupErr
}
if err = o.SaveEnvFile(); err != nil {
return
}
if o.VendorManager.FindByName(plugin.GetName()) == nil {
if vendor, ok := plugin.(ai.Vendor); ok {
o.VendorManager.AddVendors(vendor)
}
}
return
}
// runInteractiveSetup runs the standard interactive setup menu
func (o *PluginRegistry) runInteractiveSetup() (err error) {
setupQuestion := plugins.NewSetupQuestion("Enter the number of the plugin to setup")
groupsPlugins := util.NewGroupsItemsSelector(i18n.T("setup_available_plugins"),
func(plugin plugins.Plugin) string {
var configuredLabel string
if plugin.IsConfigured() {
configuredLabel = i18n.T("plugin_configured")
} else {
configuredLabel = i18n.T("plugin_not_configured")
}
return fmt.Sprintf("%v%v", plugin.GetSetupDescription(), configuredLabel)
})
// Add vendors first under REQUIRED section
groupsPlugins.AddGroupItems(i18n.T("setup_required_configuration_header"), lo.Map(o.VendorsAll.Vendors,
func(vendor ai.Vendor, _ int) plugins.Plugin {
return vendor
})...)
// Add required tools
groupsPlugins.AddGroupItems(i18n.T("setup_required_tools"), o.Defaults, o.PatternsLoader, o.Strategies)
// Add optional tools
groupsPlugins.AddGroupItems(i18n.T("setup_optional_configuration_header"), o.CustomPatterns, o.Jina, o.Language, o.YouTube)
for {
groupsPlugins.Print(false)
if answerErr := setupQuestion.Ask("Plugin Number"); answerErr != nil {
if answerErr := setupQuestion.Ask(i18n.T("setup_plugin_number")); answerErr != nil {
break
}
@@ -222,9 +371,8 @@ func (o *PluginRegistry) Setup() (err error) {
}
}
if _, ok := o.VendorManager.VendorsByName[plugin.GetName()]; !ok {
var vendor ai.Vendor
if vendor, ok = plugin.(ai.Vendor); ok {
if o.VendorManager.FindByName(plugin.GetName()) == nil {
if vendor, ok := plugin.(ai.Vendor); ok {
o.VendorManager.AddVendors(vendor)
}
}
@@ -238,6 +386,58 @@ func (o *PluginRegistry) Setup() (err error) {
return
}
// validateSetup checks if required components are configured and warns user
func (o *PluginRegistry) validateSetup() {
fmt.Println("\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
fmt.Println(i18n.T("setup_validation_header"))
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
missingRequired := false
// Check AI vendor
if len(o.VendorManager.Vendors) > 0 {
fmt.Printf(" %s\n", i18n.T("setup_validation_ai_provider_configured"))
} else {
fmt.Printf(" %s\n", i18n.T("setup_validation_ai_provider_missing"))
missingRequired = true
}
// Check default model
if o.Defaults.IsConfigured() {
fmt.Printf(" %s\n", fmt.Sprintf(i18n.T("setup_validation_defaults_configured"), o.Defaults.Vendor.Value, o.Defaults.Model.Value))
} else {
fmt.Printf(" %s\n", i18n.T("setup_validation_defaults_missing"))
missingRequired = true
}
// Check patterns
if o.PatternsLoader.IsConfigured() {
fmt.Printf(" %s\n", i18n.T("setup_validation_patterns_configured"))
} else {
fmt.Printf(" %s\n", i18n.T("setup_validation_patterns_missing"))
missingRequired = true
}
// Check strategies
if o.Strategies.IsConfigured() {
fmt.Printf(" %s\n", i18n.T("setup_validation_strategies_configured"))
} else {
fmt.Printf(" %s\n", i18n.T("setup_validation_strategies_missing"))
missingRequired = true
}
fmt.Println("━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━")
if missingRequired {
fmt.Printf("\n%s\n", i18n.T("setup_validation_incomplete_warning"))
fmt.Printf(" %s\n", i18n.T("setup_validation_incomplete_help"))
fmt.Println()
} else {
fmt.Printf("\n%s\n", i18n.T("setup_validation_complete"))
fmt.Println()
}
}
func (o *PluginRegistry) SetupVendor(vendorName string) (err error) {
if err = o.VendorsAll.SetupVendor(vendorName, o.VendorManager.VendorsByName); err != nil {
return
@@ -330,11 +530,22 @@ func (o *PluginRegistry) GetChatter(model string, modelContextLength int, vendor
if models, err = vendorManager.GetModels(); err != nil {
return
}
// Normalize model name to match actual available model (case-insensitive)
// This must be done BEFORE checking vendor availability
actualModelName := models.FindModelNameCaseInsensitive(model)
if actualModelName != "" {
model = actualModelName // Use normalized name for all subsequent checks
}
if vendorName != "" {
// ensure vendor exists and provides model
ret.vendor = vendorManager.FindByName(vendorName)
availableVendors := models.FindGroupsByItem(model)
if ret.vendor == nil || !lo.Contains(availableVendors, vendorName) {
vendorAvailable := lo.ContainsBy(availableVendors, func(name string) bool {
return strings.EqualFold(name, vendorName)
})
if ret.vendor == nil || !vendorAvailable {
err = fmt.Errorf("model %s not available for vendor %s", model, vendorName)
return
}
@@ -345,6 +556,7 @@ func (o *PluginRegistry) GetChatter(model string, modelContextLength int, vendor
}
ret.vendor = vendorManager.FindByName(models.FindGroupsByItemFirst(model))
}
ret.model = model
}

View File

@@ -5,6 +5,7 @@ import (
"fmt"
"os"
"path/filepath"
"slices"
"strings"
)
@@ -146,14 +147,7 @@ func fixInvalidEscapes(jsonStr string) string {
// Check for escape sequences only inside strings
if inQuotes && ch == '\\' && i+1 < len(jsonStr) {
nextChar := jsonStr[i+1]
isValid := false
for _, validEscape := range validEscapes {
if nextChar == validEscape {
isValid = true
break
}
}
isValid := slices.Contains(validEscapes, nextChar)
if !isValid {
// Invalid escape sequence - add an extra backslash

View File

@@ -1,165 +1,211 @@
{
"html_readability_error": "verwende ursprüngliche Eingabe, da HTML-Lesbarkeit nicht angewendet werden kann",
"vendor_not_configured": "Anbieter %s ist nicht konfiguriert",
"vendor_no_transcription_support": "Anbieter %s unterstützt keine Audio-Transkription",
"transcription_model_required": "Transkriptionsmodell ist erforderlich (verwende --transcribe-model)",
"youtube_not_configured": "YouTube ist nicht konfiguriert, bitte führe das Setup-Verfahren aus",
"youtube_api_key_required": "YouTube API-Schlüssel für Kommentare und Metadaten erforderlich. Führe 'fabric --setup' aus, um zu konfigurieren",
"youtube_ytdlp_not_found": "yt-dlp wurde nicht in PATH gefunden. Bitte installiere yt-dlp, um die YouTube-Transkript-Funktionalität zu nutzen",
"youtube_invalid_url": "ungültige YouTube-URL, kann keine Video- oder Playlist-ID abrufen: '%s'",
"youtube_url_is_playlist_not_video": "URL ist eine Playlist, kein Video",
"youtube_no_video_id_found": "keine Video-ID in URL gefunden",
"youtube_rate_limit_exceeded": "YouTube-Ratenlimit überschritten. Versuche es später erneut oder verwende andere yt-dlp-Argumente wie '--sleep-requests 1', um Anfragen zu verlangsamen.",
"youtube_auth_required_bot_detection": "YouTube erfordert Authentifizierung (Bot-Erkennung). Verwende --yt-dlp-args='--cookies-from-browser BROWSER' wobei BROWSER chrome, firefox, brave usw. sein kann.",
"youtube_ytdlp_stderr_error": "Fehler beim Lesen von yt-dlp stderr",
"youtube_invalid_ytdlp_arguments": "ungültige yt-dlp-Argumente: %v",
"youtube_failed_create_temp_dir": "temporäres Verzeichnis konnte nicht erstellt werden: %v",
"youtube_no_transcript_content": "kein Transkriptinhalt in VTT-Datei gefunden",
"youtube_no_vtt_files_found": "keine VTT-Dateien im Verzeichnis gefunden",
"youtube_failed_walk_directory": "Verzeichnis konnte nicht durchlaufen werden: %v",
"youtube_error_getting_video_details": "Fehler beim Abrufen der Videodetails: %v",
"youtube_invalid_duration_string": "ungültige Dauer-Zeichenfolge: %s",
"youtube_error_getting_metadata": "Fehler beim Abrufen der Video-Metadaten: %v",
"youtube_error_parsing_duration": "Fehler beim Parsen der Videodauer: %v",
"youtube_error_getting_comments": "Fehler beim Abrufen der Kommentare: %v",
"youtube_error_saving_csv": "Fehler beim Speichern der Videos in CSV: %v",
"youtube_no_video_found_with_id": "kein Video mit ID gefunden: %s",
"youtube_invalid_timestamp_format": "ungültiges Zeitstempel-Format: %s",
"youtube_empty_seconds_string": "leere Sekunden-Zeichenfolge",
"youtube_invalid_seconds_format": "ungültiges Sekundenformat %q: %w",
"error_fetching_playlist_videos": "Fehler beim Abrufen der Playlist-Videos: %w",
"openai_api_base_url_not_configured": "API-Basis-URL für Anbieter %s nicht konfiguriert",
"openai_failed_to_create_models_url": "Modell-URL konnte nicht erstellt werden: %w",
"openai_unexpected_status_code_with_body": "unerwarteter Statuscode: %d von Anbieter %s, Antwort: %s",
"openai_unexpected_status_code_read_error_partial": "unerwarteter Statuscode: %d von Anbieter %s (Fehler beim Lesen: %v), teilweise Antwort: %s",
"openai_unexpected_status_code_read_error": "unerwarteter Statuscode: %d von Anbieter %s (Fehler beim Lesen der Antwort: %v)",
"openai_unable_to_parse_models_response": "Modell-Antwort konnte nicht geparst werden; rohe Antwort: %s",
"scraping_not_configured": "Scraping-Funktionalität ist nicht konfiguriert. Bitte richte Jina ein, um Scraping zu aktivieren",
"could_not_determine_home_dir": "konnte Benutzer-Home-Verzeichnis nicht bestimmen: %w",
"could_not_stat_env_file": "konnte .env-Datei nicht überprüfen: %w",
"could_not_create_config_dir": "konnte Konfigurationsverzeichnis nicht erstellen: %w",
"could_not_create_env_file": "konnte .env-Datei nicht erstellen: %w",
"could_not_copy_to_clipboard": "konnte nicht in die Zwischenablage kopieren: %v",
"file_already_exists_not_overwriting": "Datei %s existiert bereits, wird nicht überschrieben. Benenne die vorhandene Datei um oder wähle einen anderen Namen",
"error_creating_file": "Fehler beim Erstellen der Datei: %v",
"error_writing_to_file": "Fehler beim Schreiben in die Datei: %v",
"error_creating_audio_file": "Fehler beim Erstellen der Audio-Datei: %v",
"error_writing_audio_data": "Fehler beim Schreiben von Audio-Daten in die Datei: %v",
"tts_model_requires_audio_output": "TTS-Modell '%s' benötigt Audio-Ausgabe. Bitte gib eine Audio-Ausgabedatei mit dem -o Flag an (z.B., -o output.wav)",
"audio_output_file_specified_but_not_tts_model": "Audio-Ausgabedatei '%s' angegeben, aber Modell '%s' ist kein TTS-Modell. Bitte verwende ein TTS-Modell wie gemini-2.5-flash-preview-tts",
"file_already_exists_choose_different": "Datei %s existiert bereits. Bitte wähle einen anderen Dateinamen oder entferne die vorhandene Datei",
"no_notification_system_available": "kein Benachrichtigungssystem verfügbar",
"cannot_convert_string": "kann String %q nicht zu %v konvertieren",
"unsupported_conversion": "nicht unterstützte Konvertierung von %v zu %v",
"invalid_config_path": "ungültiger Konfigurationspfad: %w",
"config_file_not_found": "Konfigurationsdatei nicht gefunden: %s",
"error_reading_config_file": "Fehler beim Lesen der Konfigurationsdatei: %w",
"error_parsing_config_file": "Fehler beim Parsen der Konfigurationsdatei: %w",
"error_reading_piped_message": "Fehler beim Lesen der weitergeleiteten Nachricht von stdin: %w",
"image_file_already_exists": "Bilddatei existiert bereits: %s",
"invalid_image_file_extension": "ungültige Bilddatei-Erweiterung '%s'. Unterstützte Formate: .png, .jpeg, .jpg, .webp",
"image_parameters_require_image_file": "Bildparameter (--image-size, --image-quality, --image-background, --image-compression) können nur mit --image-file verwendet werden",
"invalid_image_size": "ungültige Bildgröße '%s'. Unterstützte Größen: 1024x1024, 1536x1024, 1024x1536, auto",
"invalid_image_quality": "ungültige Bildqualität '%s'. Unterstützte Qualitäten: low, medium, high, auto",
"invalid_image_background": "ungültiger Bildhintergrund '%s'. Unterstützte Hintergründe: opaque, transparent",
"image_compression_jpeg_webp_only": "Bildkomprimierung kann nur mit JPEG- und WebP-Formaten verwendet werden, nicht %s",
"image_compression_range_error": "Bildkomprimierung muss zwischen 0 und 100 liegen, erhalten: %d",
"transparent_background_png_webp_only": "transparenter Hintergrund kann nur mit PNG- und WebP-Formaten verwendet werden, nicht %s",
"available_transcription_models": "Verfügbare Transkriptionsmodelle:",
"tts_audio_generated_successfully": "TTS-Audio erfolgreich generiert und gespeichert unter: %s\n",
"fabric_command_complete": "Fabric-Befehl abgeschlossen",
"fabric_command_complete_with_pattern": "Fabric: %s abgeschlossen",
"command_completed_successfully": "Befehl erfolgreich abgeschlossen",
"output_truncated": "Ausgabe: %s...",
"output_full": "Ausgabe: %s",
"choose_pattern_from_available": "Wähle ein Muster aus den verfügbaren Mustern",
"pattern_variables_help": "Werte für Mustervariablen, z.B. -v=#role:expert -v=#points:30",
"choose_context_from_available": "Wähle einen Kontext aus den verfügbaren Kontexten",
"choose_session_from_available": "Wähle eine Sitzung aus den verfügbaren Sitzungen",
"attachment_path_or_url_help": "Anhangspfad oder URL (z.B. für OpenAI-Bilderkennungsnachrichten)",
"run_setup_for_reconfigurable_parts": "Setup für alle rekonfigurierbaren Teile von Fabric ausführen",
"set_temperature": "Temperatur festlegen",
"set_top_p": "Top P festlegen",
"stream_help": "Streaming",
"set_presence_penalty": "Präsenzstrafe festlegen",
"use_model_defaults_raw_help": "Verwende die Standardwerte des Modells, ohne Chat-Optionen (temperature, top_p usw.) zu senden. Gilt nur für OpenAI-kompatible Anbieter. Anthropic-Modelle verwenden stets eine intelligente Parameterauswahl, um modell-spezifische Anforderungen einzuhalten.",
"set_frequency_penalty": "Häufigkeitsstrafe festlegen",
"list_all_patterns": "Alle Muster auflisten",
"list_all_available_models": "Alle verfügbaren Modelle auflisten",
"list_all_contexts": "Alle Kontexte auflisten",
"list_all_sessions": "Alle Sitzungen auflisten",
"update_patterns": "Muster aktualisieren",
"messages_to_send_to_chat": "Nachrichten zum Senden an den Chat",
"copy_to_clipboard": "In Zwischenablage kopieren",
"choose_model": "Modell wählen",
"specify_vendor_for_model": "Anbieter für das ausgewählte Modell angeben (z.B., -V \"LM Studio\" -m openai/gpt-oss-20b)",
"model_context_length_ollama": "Modell-Kontextlänge (betrifft nur ollama)",
"output_to_file": "Ausgabe in Datei",
"output_entire_session": "Gesamte Sitzung (auch eine temporäre) in die Ausgabedatei ausgeben",
"number_of_latest_patterns": "Anzahl der neuesten Muster zum Auflisten",
"change_default_model": "Standardmodell ändern",
"youtube_url_help": "YouTube-Video oder Playlist-\"URL\" zum Abrufen von Transkript und Kommentaren und Senden an Chat oder Ausgabe in Konsole und Speichern in Ausgabedatei",
"prefer_playlist_over_video": "Playlist gegenüber Video bevorzugen, wenn beide IDs in der URL vorhanden sind",
"grab_transcript_from_youtube": "Transkript von YouTube-Video abrufen und an Chat senden (wird standardmäßig verwendet).",
"grab_transcript_with_timestamps": "Transkript von YouTube-Video mit Zeitstempeln abrufen und an Chat senden",
"grab_comments_from_youtube": "Kommentare von YouTube-Video abrufen und an Chat senden",
"output_video_metadata": "Video-Metadaten ausgeben",
"additional_yt_dlp_args": "Zusätzliche Argumente für yt-dlp (z.B. '--cookies-from-browser brave')",
"specify_language_code": "Sprachencode für den Chat angeben, z.B. -g=en -g=zh -g=pt-BR -g=pt-PT",
"scrape_website_url": "Website-URL zu Markdown mit Jina AI scrapen",
"search_question_jina": "Suchanfrage mit Jina AI",
"seed_for_lmm_generation": "Seed für LMM-Generierung",
"wipe_context": "Kontext löschen",
"wipe_session": "Sitzung löschen",
"print_context": "Kontext ausgeben",
"print_session": "Sitzung ausgeben",
"convert_html_readability": "HTML-Eingabe in eine saubere, lesbare Ansicht konvertieren",
"apply_variables_to_input": "Variablen auf Benutzereingabe anwenden",
"disable_pattern_variable_replacement": "Mustervariablenersetzung deaktivieren",
"show_dry_run": "Zeige, was an das Modell gesendet würde, ohne es tatsächlich zu senden",
"serve_fabric_rest_api": "Fabric REST API bereitstellen",
"serve_fabric_api_ollama_endpoints": "Fabric REST API mit ollama-Endpunkten bereitstellen",
"address_to_bind_rest_api": "Adresse zum Binden der REST API",
"api_key_secure_server_routes": "API-Schlüssel zum Sichern der Server-Routen",
"path_to_yaml_config": "Pfad zur YAML-Konfigurationsdatei",
"print_current_version": "Aktuelle Version ausgeben",
"list_all_registered_extensions": "Alle registrierten Erweiterungen auflisten",
"register_new_extension": "Neue Erweiterung aus Konfigurationsdateipfad registrieren",
"remove_registered_extension": "Registrierte Erweiterung nach Name entfernen",
"choose_strategy_from_available": "Strategie aus den verfügbaren Strategien wählen",
"list_all_strategies": "Alle Strategien auflisten",
"list_all_vendors": "Alle Anbieter auflisten",
"output_raw_list_shell_completion": "Rohe Liste ohne Kopfzeilen/Formatierung ausgeben (für Shell-Vervollständigung)",
"enable_web_search_tool": "Web-Such-Tool für unterstützte Modelle aktivieren (Anthropic, OpenAI, Gemini)",
"set_location_web_search": "Standort für Web-Suchergebnisse festlegen (z.B., 'America/Los_Angeles')",
"save_generated_image_to_file": "Generiertes Bild in angegebenem Dateipfad speichern (z.B., 'output.png')",
"image_dimensions_help": "Bildabmessungen: 1024x1024, 1536x1024, 1024x1536, auto (Standard: auto)",
"image_quality_help": "Bildqualität: low, medium, high, auto (Standard: auto)",
"compression_level_jpeg_webp": "Komprimierungslevel 0-100 für JPEG/WebP-Formate (Standard: nicht gesetzt)",
"background_type_help": "Hintergrundtyp: opaque, transparent (Standard: opaque, nur für PNG/WebP)",
"suppress_thinking_tags": "In Denk-Tags eingeschlossenen Text unterdrücken",
"start_tag_thinking_sections": "Start-Tag für Denk-Abschnitte",
"end_tag_thinking_sections": "End-Tag für Denk-Abschnitte",
"disable_openai_responses_api": "OpenAI Responses API deaktivieren (Standard: false)",
"audio_video_file_transcribe": "Audio- oder Video-Datei zum Transkribieren",
"model_for_transcription": "Modell für Transkription (getrennt vom Chat-Modell)",
"split_media_files_ffmpeg": "Audio/Video-Dateien größer als 25MB mit ffmpeg aufteilen",
"tts_voice_name": "TTS-Stimmenname für unterstützte Modelle (z.B., Kore, Charon, Puck)",
"list_gemini_tts_voices": "Alle verfügbaren Gemini TTS-Stimmen auflisten",
"list_transcription_models": "Alle verfügbaren Transkriptionsmodelle auflisten",
"send_desktop_notification": "Desktop-Benachrichtigung senden, wenn Befehl abgeschlossen ist",
"custom_notification_command": "Benutzerdefinierter Befehl für Benachrichtigungen (überschreibt eingebaute Benachrichtigungen)",
"set_reasoning_thinking_level": "Reasoning/Thinking-Level festlegen (z.B., off, low, medium, high, oder numerische Token für Anthropic oder Google Gemini)",
"set_debug_level": "Debug-Level festlegen (0=aus, 1=grundlegend, 2=detailliert, 3=Trace)",
"usage_header": "Verwendung:",
"application_options_header": "Anwendungsoptionen:",
"help_options_header": "Hilfe-Optionen:",
"help_message": "Diese Hilfenachricht anzeigen",
"options_placeholder": "[OPTIONEN]",
"available_vendors_header": "Verfügbare Anbieter:",
"available_models_header": "Verfügbare Modelle",
"no_items_found": "Keine %s",
"no_description_available": "Keine Beschreibung verfügbar",
"i18n_download_failed": "Fehler beim Herunterladen der Übersetzung für Sprache '%s': %v",
"i18n_load_failed": "Fehler beim Laden der Übersetzungsdatei: %v"
"html_readability_error": "verwende ursprüngliche Eingabe, da HTML-Lesbarkeit nicht angewendet werden kann",
"vendor_not_configured": "Anbieter %s ist nicht konfiguriert",
"vendor_no_transcription_support": "Anbieter %s unterstützt keine Audio-Transkription",
"transcription_model_required": "Transkriptionsmodell ist erforderlich (verwende --transcribe-model)",
"youtube_not_configured": "YouTube ist nicht konfiguriert, bitte führe das Setup-Verfahren aus",
"youtube_api_key_required": "YouTube API-Schlüssel für Kommentare und Metadaten erforderlich. Führe 'fabric --setup' aus, um zu konfigurieren",
"youtube_ytdlp_not_found": "yt-dlp wurde nicht in PATH gefunden. Bitte installiere yt-dlp, um die YouTube-Transkript-Funktionalität zu nutzen",
"youtube_invalid_url": "ungültige YouTube-URL, kann keine Video- oder Playlist-ID abrufen: '%s'",
"youtube_url_is_playlist_not_video": "URL ist eine Playlist, kein Video",
"youtube_no_video_id_found": "keine Video-ID in URL gefunden",
"youtube_rate_limit_exceeded": "YouTube-Ratenlimit überschritten. Versuche es später erneut oder verwende andere yt-dlp-Argumente wie '--sleep-requests 1', um Anfragen zu verlangsamen.",
"youtube_auth_required_bot_detection": "YouTube erfordert Authentifizierung (Bot-Erkennung). Verwende --yt-dlp-args='--cookies-from-browser BROWSER' wobei BROWSER chrome, firefox, brave usw. sein kann.",
"youtube_ytdlp_stderr_error": "Fehler beim Lesen von yt-dlp stderr",
"youtube_invalid_ytdlp_arguments": "ungültige yt-dlp-Argumente: %v",
"youtube_failed_create_temp_dir": "temporäres Verzeichnis konnte nicht erstellt werden: %v",
"youtube_no_transcript_content": "kein Transkriptinhalt in VTT-Datei gefunden",
"youtube_no_vtt_files_found": "keine VTT-Dateien im Verzeichnis gefunden",
"youtube_failed_walk_directory": "Verzeichnis konnte nicht durchlaufen werden: %v",
"youtube_error_getting_video_details": "Fehler beim Abrufen der Videodetails: %v",
"youtube_invalid_duration_string": "ungültige Dauer-Zeichenfolge: %s",
"youtube_error_getting_metadata": "Fehler beim Abrufen der Video-Metadaten: %v",
"youtube_error_parsing_duration": "Fehler beim Parsen der Videodauer: %v",
"youtube_error_getting_comments": "Fehler beim Abrufen der Kommentare: %v",
"youtube_error_saving_csv": "Fehler beim Speichern der Videos in CSV: %v",
"youtube_no_video_found_with_id": "kein Video mit ID gefunden: %s",
"youtube_invalid_timestamp_format": "ungültiges Zeitstempel-Format: %s",
"youtube_empty_seconds_string": "leere Sekunden-Zeichenfolge",
"youtube_invalid_seconds_format": "ungültiges Sekundenformat %q: %w",
"error_fetching_playlist_videos": "Fehler beim Abrufen der Playlist-Videos: %w",
"openai_api_base_url_not_configured": "API-Basis-URL für Anbieter %s nicht konfiguriert",
"openai_failed_to_create_models_url": "Modell-URL konnte nicht erstellt werden: %w",
"openai_unexpected_status_code_with_body": "unerwarteter Statuscode: %d von Anbieter %s, Antwort: %s",
"openai_unexpected_status_code_read_error_partial": "unerwarteter Statuscode: %d von Anbieter %s (Fehler beim Lesen: %v), teilweise Antwort: %s",
"openai_unexpected_status_code_read_error": "unerwarteter Statuscode: %d von Anbieter %s (Fehler beim Lesen der Antwort: %v)",
"openai_unable_to_parse_models_response": "Modell-Antwort konnte nicht geparst werden; rohe Antwort: %s",
"scraping_not_configured": "Scraping-Funktionalität ist nicht konfiguriert. Bitte richte Jina ein, um Scraping zu aktivieren",
"could_not_determine_home_dir": "konnte Benutzer-Home-Verzeichnis nicht bestimmen: %w",
"could_not_stat_env_file": "konnte .env-Datei nicht überprüfen: %w",
"could_not_create_config_dir": "konnte Konfigurationsverzeichnis nicht erstellen: %w",
"could_not_create_env_file": "konnte .env-Datei nicht erstellen: %w",
"could_not_copy_to_clipboard": "konnte nicht in die Zwischenablage kopieren: %v",
"file_already_exists_not_overwriting": "Datei %s existiert bereits, wird nicht überschrieben. Benenne die vorhandene Datei um oder wähle einen anderen Namen",
"error_creating_file": "Fehler beim Erstellen der Datei: %v",
"error_writing_to_file": "Fehler beim Schreiben in die Datei: %v",
"error_creating_audio_file": "Fehler beim Erstellen der Audio-Datei: %v",
"error_writing_audio_data": "Fehler beim Schreiben von Audio-Daten in die Datei: %v",
"tts_model_requires_audio_output": "TTS-Modell '%s' benötigt Audio-Ausgabe. Bitte gib eine Audio-Ausgabedatei mit dem -o Flag an (z.B., -o output.wav)",
"audio_output_file_specified_but_not_tts_model": "Audio-Ausgabedatei '%s' angegeben, aber Modell '%s' ist kein TTS-Modell. Bitte verwende ein TTS-Modell wie gemini-2.5-flash-preview-tts",
"file_already_exists_choose_different": "Datei %s existiert bereits. Bitte wähle einen anderen Dateinamen oder entferne die vorhandene Datei",
"no_notification_system_available": "kein Benachrichtigungssystem verfügbar",
"cannot_convert_string": "kann String %q nicht zu %v konvertieren",
"unsupported_conversion": "nicht unterstützte Konvertierung von %v zu %v",
"invalid_config_path": "ungültiger Konfigurationspfad: %w",
"config_file_not_found": "Konfigurationsdatei nicht gefunden: %s",
"error_reading_config_file": "Fehler beim Lesen der Konfigurationsdatei: %w",
"error_parsing_config_file": "Fehler beim Parsen der Konfigurationsdatei: %w",
"error_reading_piped_message": "Fehler beim Lesen der weitergeleiteten Nachricht von stdin: %w",
"image_file_already_exists": "Bilddatei existiert bereits: %s",
"invalid_image_file_extension": "ungültige Bilddatei-Erweiterung '%s'. Unterstützte Formate: .png, .jpeg, .jpg, .webp",
"image_parameters_require_image_file": "Bildparameter (--image-size, --image-quality, --image-background, --image-compression) können nur mit --image-file verwendet werden",
"invalid_image_size": "ungültige Bildgröße '%s'. Unterstützte Größen: 1024x1024, 1536x1024, 1024x1536, auto",
"invalid_image_quality": "ungültige Bildqualität '%s'. Unterstützte Qualitäten: low, medium, high, auto",
"invalid_image_background": "ungültiger Bildhintergrund '%s'. Unterstützte Hintergründe: opaque, transparent",
"image_compression_jpeg_webp_only": "Bildkomprimierung kann nur mit JPEG- und WebP-Formaten verwendet werden, nicht %s",
"image_compression_range_error": "Bildkomprimierung muss zwischen 0 und 100 liegen, erhalten: %d",
"transparent_background_png_webp_only": "transparenter Hintergrund kann nur mit PNG- und WebP-Formaten verwendet werden, nicht %s",
"available_transcription_models": "Verfügbare Transkriptionsmodelle:",
"tts_audio_generated_successfully": "TTS-Audio erfolgreich generiert und gespeichert unter: %s\n",
"fabric_command_complete": "Fabric-Befehl abgeschlossen",
"fabric_command_complete_with_pattern": "Fabric: %s abgeschlossen",
"command_completed_successfully": "Befehl erfolgreich abgeschlossen",
"output_truncated": "Ausgabe: %s...",
"output_full": "Ausgabe: %s",
"choose_pattern_from_available": "Wähle ein Muster aus den verfügbaren Mustern",
"pattern_variables_help": "Werte für Mustervariablen, z.B. -v=#role:expert -v=#points:30",
"choose_context_from_available": "Wähle einen Kontext aus den verfügbaren Kontexten",
"choose_session_from_available": "Wähle eine Sitzung aus den verfügbaren Sitzungen",
"attachment_path_or_url_help": "Anhangspfad oder URL (z.B. für OpenAI-Bilderkennungsnachrichten)",
"run_setup_for_reconfigurable_parts": "Setup für alle rekonfigurierbaren Teile von Fabric ausführen",
"set_temperature": "Temperatur festlegen",
"set_top_p": "Top P festlegen",
"stream_help": "Streaming",
"set_presence_penalty": "Präsenzstrafe festlegen",
"use_model_defaults_raw_help": "Verwende die Standardwerte des Modells, ohne Chat-Optionen (temperature, top_p usw.) zu senden. Gilt nur für OpenAI-kompatible Anbieter. Anthropic-Modelle verwenden stets eine intelligente Parameterauswahl, um modell-spezifische Anforderungen einzuhalten.",
"set_frequency_penalty": "Häufigkeitsstrafe festlegen",
"list_all_patterns": "Alle Muster auflisten",
"list_all_available_models": "Alle verfügbaren Modelle auflisten",
"list_all_contexts": "Alle Kontexte auflisten",
"list_all_sessions": "Alle Sitzungen auflisten",
"update_patterns": "Muster aktualisieren",
"messages_to_send_to_chat": "Nachrichten zum Senden an den Chat",
"copy_to_clipboard": "In Zwischenablage kopieren",
"choose_model": "Modell wählen",
"specify_vendor_for_model": "Anbieter für das ausgewählte Modell angeben (z.B., -V \"LM Studio\" -m openai/gpt-oss-20b)",
"model_context_length_ollama": "Modell-Kontextlänge (betrifft nur ollama)",
"output_to_file": "Ausgabe in Datei",
"output_entire_session": "Gesamte Sitzung (auch eine temporäre) in die Ausgabedatei ausgeben",
"number_of_latest_patterns": "Anzahl der neuesten Muster zum Auflisten",
"change_default_model": "Standardmodell ändern",
"youtube_url_help": "YouTube-Video oder Playlist-\"URL\" zum Abrufen von Transkript und Kommentaren und Senden an Chat oder Ausgabe in Konsole und Speichern in Ausgabedatei",
"prefer_playlist_over_video": "Playlist gegenüber Video bevorzugen, wenn beide IDs in der URL vorhanden sind",
"grab_transcript_from_youtube": "Transkript von YouTube-Video abrufen und an Chat senden (wird standardmäßig verwendet).",
"grab_transcript_with_timestamps": "Transkript von YouTube-Video mit Zeitstempeln abrufen und an Chat senden",
"grab_comments_from_youtube": "Kommentare von YouTube-Video abrufen und an Chat senden",
"output_video_metadata": "Video-Metadaten ausgeben",
"additional_yt_dlp_args": "Zusätzliche Argumente für yt-dlp (z.B. '--cookies-from-browser brave')",
"specify_language_code": "Sprachencode für den Chat angeben, z.B. -g=en -g=zh -g=pt-BR -g=pt-PT",
"scrape_website_url": "Website-URL zu Markdown mit Jina AI scrapen",
"search_question_jina": "Suchanfrage mit Jina AI",
"seed_for_lmm_generation": "Seed für LMM-Generierung",
"wipe_context": "Kontext löschen",
"wipe_session": "Sitzung löschen",
"print_context": "Kontext ausgeben",
"print_session": "Sitzung ausgeben",
"convert_html_readability": "HTML-Eingabe in eine saubere, lesbare Ansicht konvertieren",
"apply_variables_to_input": "Variablen auf Benutzereingabe anwenden",
"disable_pattern_variable_replacement": "Mustervariablenersetzung deaktivieren",
"show_dry_run": "Zeige, was an das Modell gesendet würde, ohne es tatsächlich zu senden",
"serve_fabric_rest_api": "Fabric REST API bereitstellen",
"serve_fabric_api_ollama_endpoints": "Fabric REST API mit ollama-Endpunkten bereitstellen",
"address_to_bind_rest_api": "Adresse zum Binden der REST API",
"api_key_secure_server_routes": "API-Schlüssel zum Sichern der Server-Routen",
"path_to_yaml_config": "Pfad zur YAML-Konfigurationsdatei",
"print_current_version": "Aktuelle Version ausgeben",
"list_all_registered_extensions": "Alle registrierten Erweiterungen auflisten",
"register_new_extension": "Neue Erweiterung aus Konfigurationsdateipfad registrieren",
"remove_registered_extension": "Registrierte Erweiterung nach Name entfernen",
"choose_strategy_from_available": "Strategie aus den verfügbaren Strategien wählen",
"list_all_strategies": "Alle Strategien auflisten",
"list_all_vendors": "Alle Anbieter auflisten",
"output_raw_list_shell_completion": "Rohe Liste ohne Kopfzeilen/Formatierung ausgeben (für Shell-Vervollständigung)",
"enable_web_search_tool": "Web-Such-Tool für unterstützte Modelle aktivieren (Anthropic, OpenAI, Gemini)",
"set_location_web_search": "Standort für Web-Suchergebnisse festlegen (z.B., 'America/Los_Angeles')",
"save_generated_image_to_file": "Generiertes Bild in angegebenem Dateipfad speichern (z.B., 'output.png')",
"image_dimensions_help": "Bildabmessungen: 1024x1024, 1536x1024, 1024x1536, auto (Standard: auto)",
"image_quality_help": "Bildqualität: low, medium, high, auto (Standard: auto)",
"compression_level_jpeg_webp": "Komprimierungslevel 0-100 für JPEG/WebP-Formate (Standard: nicht gesetzt)",
"background_type_help": "Hintergrundtyp: opaque, transparent (Standard: opaque, nur für PNG/WebP)",
"suppress_thinking_tags": "In Denk-Tags eingeschlossenen Text unterdrücken",
"start_tag_thinking_sections": "Start-Tag für Denk-Abschnitte",
"end_tag_thinking_sections": "End-Tag für Denk-Abschnitte",
"disable_openai_responses_api": "OpenAI Responses API deaktivieren (Standard: false)",
"audio_video_file_transcribe": "Audio- oder Video-Datei zum Transkribieren",
"model_for_transcription": "Modell für Transkription (getrennt vom Chat-Modell)",
"split_media_files_ffmpeg": "Audio/Video-Dateien größer als 25MB mit ffmpeg aufteilen",
"tts_voice_name": "TTS-Stimmenname für unterstützte Modelle (z.B., Kore, Charon, Puck)",
"list_gemini_tts_voices": "Alle verfügbaren Gemini TTS-Stimmen auflisten",
"list_transcription_models": "Alle verfügbaren Transkriptionsmodelle auflisten",
"send_desktop_notification": "Desktop-Benachrichtigung senden, wenn Befehl abgeschlossen ist",
"custom_notification_command": "Benutzerdefinierter Befehl für Benachrichtigungen (überschreibt eingebaute Benachrichtigungen)",
"set_reasoning_thinking_level": "Reasoning/Thinking-Level festlegen (z.B., off, low, medium, high, oder numerische Token für Anthropic oder Google Gemini)",
"set_debug_level": "Debug-Level festlegen (0=aus, 1=grundlegend, 2=detailliert, 3=Trace)",
"usage_header": "Verwendung:",
"application_options_header": "Anwendungsoptionen:",
"help_options_header": "Hilfe-Optionen:",
"help_message": "Diese Hilfenachricht anzeigen",
"options_placeholder": "[OPTIONEN]",
"available_vendors_header": "Verfügbare Anbieter:",
"available_models_header": "Verfügbare Modelle",
"no_items_found": "Keine %s",
"no_description_available": "Keine Beschreibung verfügbar",
"i18n_download_failed": "Fehler beim Herunterladen der Übersetzung für Sprache '%s': %v",
"i18n_load_failed": "Fehler beim Laden der Übersetzungsdatei: %v",
"setup_welcome_header": "🎉 Willkommen bei Fabric! Lass uns mit der Einrichtung beginnen.",
"setup_step_downloading_patterns": "📥 Schritt 1: Patterns werden heruntergeladen (erforderlich für Fabric)...",
"setup_step_downloading_strategies": "📥 Schritt 2: Strategien werden heruntergeladen (erforderlich für Fabric)...",
"setup_step_configure_ai_provider": "🤖 Schritt 3: KI-Anbieter konfigurieren",
"setup_ai_provider_required": "Fabric benötigt mindestens einen KI-Anbieter.",
"setup_add_more_providers_later": "Sie können später weitere Anbieter mit 'fabric --setup' hinzufügen",
"setup_step_setting_defaults": "⚙️ Schritt 4: Standard-Anbieter und -Modell werden festgelegt...",
"setup_complete_header": "✅ Einrichtung abgeschlossen! Sie können Fabric jetzt verwenden.",
"setup_next_steps": "Nächste Schritte:",
"setup_list_patterns": "• Verfügbare Patterns auflisten: fabric -l",
"setup_try_pattern": "• Ein Pattern ausprobieren: echo 'Ihr Text' | fabric --pattern summarize",
"setup_configure_more": "• Weitere Einstellungen konfigurieren: fabric --setup",
"setup_failed_download_patterns": "Fehler beim Herunterladen der Patterns: %w",
"setup_failed_download_strategies": "Fehler beim Herunterladen der Strategien: %w",
"setup_failed_set_defaults": "Fehler beim Festlegen des Standard-Anbieters und -Modells: %w",
"setup_no_ai_provider_selected": "Kein KI-Anbieter ausgewählt - mindestens einer ist erforderlich",
"setup_invalid_selection": "Ungültige Auswahl: %s",
"setup_available_ai_providers": "Verfügbare KI-Anbieter:",
"setup_enter_ai_provider_number": "KI-Anbieter-Nummer",
"setup_available_plugins": "Verfügbare Plugins:",
"setup_plugin_number": "Plugin-Nummer",
"setup_required_configuration_header": "━━━ ERFORDERLICHE KONFIGURATION ━━━\n\nKI-Anbieter [mindestens einer erforderlich]",
"setup_required_tools": "Erforderliche Werkzeuge",
"setup_optional_configuration_header": "━━━ OPTIONALE KONFIGURATION ━━━\n\nOptionale Werkzeuge",
"setup_validation_header": "Konfigurationsstatus:",
"setup_validation_ai_provider_configured": "✓ KI-Anbieter konfiguriert",
"setup_validation_ai_provider_missing": "✗ KI-Anbieter nicht konfiguriert - Erforderlich für Fabric",
"setup_validation_defaults_configured": "✓ Standard-Anbieter/-Modell festgelegt: %s/%s",
"setup_validation_defaults_missing": "✗ Standard-Anbieter/-Modell nicht festgelegt - Erforderlich für Fabric",
"setup_validation_patterns_configured": "✓ Patterns heruntergeladen",
"setup_validation_patterns_missing": "✗ Patterns nicht gefunden - Erforderlich für Fabric",
"setup_validation_strategies_configured": "✓ Strategien heruntergeladen",
"setup_validation_strategies_missing": "✗ Strategien nicht gefunden - Erforderlich für Fabric",
"setup_validation_incomplete_warning": "⚠️ Einrichtung unvollständig! Erforderliche Komponenten fehlen.",
"setup_validation_incomplete_help": "Führen Sie 'fabric --setup' erneut aus, um fehlende Elemente zu konfigurieren,\noder 'fabric -U', um Patterns und Strategien herunterzuladen.",
"setup_validation_complete": "✓ Alle erforderlichen Komponenten konfiguriert!",
"patterns_not_found_header": "⚠️ Keine Patterns gefunden!",
"patterns_required_to_work": "Patterns sind erforderlich, damit Fabric funktioniert. Um dies zu beheben:",
"patterns_option_run_setup": "Option 1 (Empfohlen): Setup ausführen, um Patterns herunterzuladen",
"patterns_option_run_setup_command": "fabric --setup",
"patterns_option_run_update": "Option 2: Patterns direkt herunterladen/aktualisieren",
"patterns_option_run_update_command": "fabric -U",
"pattern_not_found_no_patterns": "Pattern '%s' nicht gefunden.\n\nKeine Patterns installiert! Um dies zu beheben:\n • Führen Sie 'fabric --setup' aus, um Patterns zu konfigurieren und herunterzuladen\n • Oder führen Sie 'fabric -U' aus, um Patterns direkt herunterzuladen/zu aktualisieren",
"pattern_not_found_list_available": "Pattern '%s' nicht gefunden. Führen Sie 'fabric -l' aus, um verfügbare Patterns anzuzeigen",
"plugin_configured": " ✓",
"plugin_not_configured": " ⚠️ NICHT KONFIGURIERT"
}

View File

@@ -161,5 +161,51 @@
"no_items_found": "No %s",
"no_description_available": "No description available",
"i18n_download_failed": "Failed to download translation for language '%s': %v",
"i18n_load_failed": "Failed to load translation file: %v"
"i18n_load_failed": "Failed to load translation file: %v",
"setup_welcome_header": "🎉 Welcome to Fabric! Let's get you set up.",
"setup_step_downloading_patterns": "📥 Step 1: Downloading patterns (required for Fabric to work)...",
"setup_step_downloading_strategies": "📥 Step 2: Downloading strategies (required for Fabric to work)...",
"setup_step_configure_ai_provider": "🤖 Step 3: Configure an AI provider",
"setup_ai_provider_required": "Fabric needs at least one AI provider to work.",
"setup_add_more_providers_later": "You'll be able to add more providers later with 'fabric --setup'",
"setup_step_setting_defaults": "⚙️ Step 4: Setting default vendor and model...",
"setup_complete_header": "✅ Setup complete! You can now use Fabric.",
"setup_next_steps": "Next steps:",
"setup_list_patterns": "• List available patterns: fabric -l",
"setup_try_pattern": "• Try a pattern: echo 'your text' | fabric --pattern summarize",
"setup_configure_more": "• Configure more settings: fabric --setup",
"setup_failed_download_patterns": "failed to download patterns: %w",
"setup_failed_download_strategies": "failed to download strategies: %w",
"setup_failed_set_defaults": "failed to set default vendor and model: %w",
"setup_no_ai_provider_selected": "no AI provider selected - at least one is required",
"setup_invalid_selection": "invalid selection: %s",
"setup_available_ai_providers": "Available AI Providers:",
"setup_enter_ai_provider_number": "AI Provider Number",
"setup_available_plugins": "Available plugins:",
"setup_plugin_number": "Plugin Number",
"setup_required_configuration_header": "━━━ REQUIRED CONFIGURATION ━━━\n\nAI Vendors [at least one required]",
"setup_required_tools": "Required Tools",
"setup_optional_configuration_header": "━━━ OPTIONAL CONFIGURATION ━━━\n\nOptional Tools",
"setup_validation_header": "Configuration Status:",
"setup_validation_ai_provider_configured": "✓ AI Provider configured",
"setup_validation_ai_provider_missing": "✗ AI Provider not configured - Required for Fabric to work",
"setup_validation_defaults_configured": "✓ Default vendor/model set: %s/%s",
"setup_validation_defaults_missing": "✗ Default vendor/model not set - Required for Fabric to work",
"setup_validation_patterns_configured": "✓ Patterns downloaded",
"setup_validation_patterns_missing": "✗ Patterns not found - Required for Fabric to work",
"setup_validation_strategies_configured": "✓ Strategies downloaded",
"setup_validation_strategies_missing": "✗ Strategies not found - Required for Fabric to work",
"setup_validation_incomplete_warning": "⚠️ Setup incomplete! Missing required components.",
"setup_validation_incomplete_help": "Run 'fabric --setup' again to configure missing items,\nor run 'fabric -U' to download patterns and strategies.",
"setup_validation_complete": "✓ All required components configured!",
"patterns_not_found_header": "⚠️ No patterns found!",
"patterns_required_to_work": "Patterns are required for Fabric to work. To fix this:",
"patterns_option_run_setup": "Option 1 (Recommended): Run setup to download patterns",
"patterns_option_run_setup_command": "fabric --setup",
"patterns_option_run_update": "Option 2: Download/update patterns directly",
"patterns_option_run_update_command": "fabric -U",
"pattern_not_found_no_patterns": "pattern '%s' not found.\n\nNo patterns are installed! To fix this:\n • Run 'fabric --setup' to configure and download patterns\n • Or run 'fabric -U' to download/update patterns directly",
"pattern_not_found_list_available": "pattern '%s' not found. Run 'fabric -l' to see available patterns",
"plugin_configured": " ✓",
"plugin_not_configured": " ⚠️ NOT CONFIGURED"
}

View File

@@ -161,5 +161,51 @@
"no_items_found": "No hay %s",
"no_description_available": "No hay descripción disponible",
"i18n_download_failed": "Error al descargar traducción para el idioma '%s': %v",
"i18n_load_failed": "Error al cargar archivo de traducción: %v"
"i18n_load_failed": "Error al cargar archivo de traducción: %v",
"setup_welcome_header": "🎉 ¡Bienvenido a Fabric! Vamos a configurarte.",
"setup_step_downloading_patterns": "📥 Paso 1: Descargando patrones (requeridos para que Fabric funcione)...",
"setup_step_downloading_strategies": "📥 Paso 2: Descargando estrategias (requeridas para que Fabric funcione)...",
"setup_step_configure_ai_provider": "🤖 Paso 3: Configurar un proveedor de IA",
"setup_ai_provider_required": "Fabric necesita al menos un proveedor de IA para funcionar.",
"setup_add_more_providers_later": "Podrás agregar más proveedores después con 'fabric --setup'",
"setup_step_setting_defaults": "⚙️ Paso 4: Estableciendo proveedor y modelo predeterminados...",
"setup_complete_header": "✅ ¡Configuración completa! Ya puedes usar Fabric.",
"setup_next_steps": "Próximos pasos:",
"setup_list_patterns": "• Listar patrones disponibles: fabric -l",
"setup_try_pattern": "• Probar un patrón: echo 'tu texto' | fabric --pattern summarize",
"setup_configure_more": "• Configurar más opciones: fabric --setup",
"setup_failed_download_patterns": "error al descargar patrones: %w",
"setup_failed_download_strategies": "error al descargar estrategias: %w",
"setup_failed_set_defaults": "error al establecer proveedor y modelo predeterminados: %w",
"setup_no_ai_provider_selected": "no se seleccionó proveedor de IA - se requiere al menos uno",
"setup_invalid_selection": "selección inválida: %s",
"setup_available_ai_providers": "Proveedores de IA Disponibles:",
"setup_enter_ai_provider_number": "Número de Proveedor de IA",
"setup_available_plugins": "Plugins disponibles:",
"setup_plugin_number": "Número de Plugin",
"setup_required_configuration_header": "━━━ CONFIGURACIÓN REQUERIDA ━━━\n\nProveedores de IA [se requiere al menos uno]",
"setup_required_tools": "Herramientas Requeridas",
"setup_optional_configuration_header": "━━━ CONFIGURACIÓN OPCIONAL ━━━\n\nHerramientas Opcionales",
"setup_validation_header": "Estado de Configuración:",
"setup_validation_ai_provider_configured": "✓ Proveedor de IA configurado",
"setup_validation_ai_provider_missing": "✗ Proveedor de IA no configurado - Requerido para que Fabric funcione",
"setup_validation_defaults_configured": "✓ Proveedor/modelo predeterminado establecido: %s/%s",
"setup_validation_defaults_missing": "✗ Proveedor/modelo predeterminado no establecido - Requerido para que Fabric funcione",
"setup_validation_patterns_configured": "✓ Patrones descargados",
"setup_validation_patterns_missing": "✗ Patrones no encontrados - Requeridos para que Fabric funcione",
"setup_validation_strategies_configured": "✓ Estrategias descargadas",
"setup_validation_strategies_missing": "✗ Estrategias no encontradas - Requeridas para que Fabric funcione",
"setup_validation_incomplete_warning": "⚠️ ¡Configuración incompleta! Faltan componentes requeridos.",
"setup_validation_incomplete_help": "Ejecuta 'fabric --setup' de nuevo para configurar los elementos faltantes,\no ejecuta 'fabric -U' para descargar patrones y estrategias.",
"setup_validation_complete": "✓ ¡Todos los componentes requeridos configurados!",
"patterns_not_found_header": "⚠️ ¡No se encontraron patrones!",
"patterns_required_to_work": "Los patrones son requeridos para que Fabric funcione. Para solucionar esto:",
"patterns_option_run_setup": "Opción 1 (Recomendada): Ejecutar configuración para descargar patrones",
"patterns_option_run_setup_command": "fabric --setup",
"patterns_option_run_update": "Opción 2: Descargar/actualizar patrones directamente",
"patterns_option_run_update_command": "fabric -U",
"pattern_not_found_no_patterns": "patrón '%s' no encontrado.\n\n¡No hay patrones instalados! Para solucionar esto:\n • Ejecuta 'fabric --setup' para configurar y descargar patrones\n • O ejecuta 'fabric -U' para descargar/actualizar patrones directamente",
"pattern_not_found_list_available": "patrón '%s' no encontrado. Ejecuta 'fabric -l' para ver los patrones disponibles",
"plugin_configured": " ✓",
"plugin_not_configured": " ⚠️ NO CONFIGURADO"
}

View File

@@ -161,5 +161,51 @@
"no_items_found": "هیچ %s",
"no_description_available": "توضیحی در دسترس نیست",
"i18n_download_failed": "دانلود ترجمه برای زبان '%s' ناموفق بود: %v",
"i18n_load_failed": "بارگذاری فایل ترجمه ناموفق بود: %v"
"i18n_load_failed": "بارگذاری فایل ترجمه ناموفق بود: %v",
"setup_welcome_header": "🎉 به Fabric خوش آمدید! بیایید تنظیمات را انجام دهیم.",
"setup_step_downloading_patterns": "📥 مرحله ۱: دانلود الگوها (برای کار Fabric ضروری است)...",
"setup_step_downloading_strategies": "📥 مرحله ۲: دانلود استراتژی‌ها (برای کار Fabric ضروری است)...",
"setup_step_configure_ai_provider": "🤖 مرحله ۳: پیکربندی یک ارائه‌دهنده هوش مصنوعی",
"setup_ai_provider_required": "Fabric برای کار کردن به حداقل یک ارائه‌دهنده هوش مصنوعی نیاز دارد.",
"setup_add_more_providers_later": "می‌توانید بعداً با 'fabric --setup' ارائه‌دهندگان بیشتری اضافه کنید",
"setup_step_setting_defaults": "⚙️ مرحله ۴: تنظیم ارائه‌دهنده و مدل پیش‌فرض...",
"setup_complete_header": "✅ تنظیمات کامل شد! اکنون می‌توانید از Fabric استفاده کنید.",
"setup_next_steps": "مراحل بعدی:",
"setup_list_patterns": "• نمایش الگوهای موجود: fabric -l",
"setup_try_pattern": "• امتحان یک الگو: echo 'متن شما' | fabric --pattern summarize",
"setup_configure_more": "• پیکربندی تنظیمات بیشتر: fabric --setup",
"setup_failed_download_patterns": "دانلود الگوها ناموفق بود: %w",
"setup_failed_download_strategies": "دانلود استراتژی‌ها ناموفق بود: %w",
"setup_failed_set_defaults": "تنظیم ارائه‌دهنده و مدل پیش‌فرض ناموفق بود: %w",
"setup_no_ai_provider_selected": "هیچ ارائه‌دهنده هوش مصنوعی انتخاب نشده - حداقل یکی ضروری است",
"setup_invalid_selection": "انتخاب نامعتبر: %s",
"setup_available_ai_providers": "ارائه‌دهندگان هوش مصنوعی موجود:",
"setup_enter_ai_provider_number": "شماره ارائه‌دهنده هوش مصنوعی",
"setup_available_plugins": "افزونه‌های موجود:",
"setup_plugin_number": "شماره افزونه",
"setup_required_configuration_header": "━━━ پیکربندی ضروری ━━━\n\nارائهدهندگان هوش مصنوعی [حداقل یکی ضروری است]",
"setup_required_tools": "ابزارهای ضروری",
"setup_optional_configuration_header": "━━━ پیکربندی اختیاری ━━━\n\nابزارهای اختیاری",
"setup_validation_header": "وضعیت پیکربندی:",
"setup_validation_ai_provider_configured": "✓ ارائه‌دهنده هوش مصنوعی پیکربندی شده",
"setup_validation_ai_provider_missing": "✗ ارائه‌دهنده هوش مصنوعی پیکربندی نشده - برای کار Fabric ضروری است",
"setup_validation_defaults_configured": "✓ ارائه‌دهنده/مدل پیش‌فرض تنظیم شده: %s/%s",
"setup_validation_defaults_missing": "✗ ارائه‌دهنده/مدل پیش‌فرض تنظیم نشده - برای کار Fabric ضروری است",
"setup_validation_patterns_configured": "✓ الگوها دانلود شده",
"setup_validation_patterns_missing": "✗ الگوها یافت نشد - برای کار Fabric ضروری است",
"setup_validation_strategies_configured": "✓ استراتژی‌ها دانلود شده",
"setup_validation_strategies_missing": "✗ استراتژی‌ها یافت نشد - برای کار Fabric ضروری است",
"setup_validation_incomplete_warning": "⚠️ تنظیمات ناقص! اجزای ضروری وجود ندارند.",
"setup_validation_incomplete_help": "دوباره 'fabric --setup' را اجرا کنید تا موارد ناقص را پیکربندی کنید،\nیا 'fabric -U' را برای دانلود الگوها و استراتژی‌ها اجرا کنید.",
"setup_validation_complete": "✓ تمام اجزای ضروری پیکربندی شده‌اند!",
"patterns_not_found_header": "⚠️ هیچ الگویی یافت نشد!",
"patterns_required_to_work": "الگوها برای کار Fabric ضروری هستند. برای رفع این مشکل:",
"patterns_option_run_setup": "گزینه ۱ (توصیه شده): اجرای تنظیمات برای دانلود الگوها",
"patterns_option_run_setup_command": "fabric --setup",
"patterns_option_run_update": "گزینه ۲: دانلود/به‌روزرسانی مستقیم الگوها",
"patterns_option_run_update_command": "fabric -U",
"pattern_not_found_no_patterns": "الگوی '%s' یافت نشد.\n\nهیچ الگویی نصب نشده است! برای رفع این مشکل:\n • 'fabric --setup' را برای پیکربندی و دانلود الگوها اجرا کنید\n • یا 'fabric -U' را برای دانلود/به‌روزرسانی الگوها اجرا کنید",
"pattern_not_found_list_available": "الگوی '%s' یافت نشد. برای مشاهده الگوهای موجود 'fabric -l' را اجرا کنید",
"plugin_configured": " ✓",
"plugin_not_configured": " ⚠️ پیکربندی نشده"
}

View File

@@ -161,5 +161,51 @@
"no_items_found": "Aucun %s",
"no_description_available": "Aucune description disponible",
"i18n_download_failed": "Échec du téléchargement de la traduction pour la langue '%s' : %v",
"i18n_load_failed": "Échec du chargement du fichier de traduction : %v"
"i18n_load_failed": "Échec du chargement du fichier de traduction : %v",
"setup_welcome_header": "🎉 Bienvenue sur Fabric ! Configurons votre installation.",
"setup_step_downloading_patterns": "📥 Étape 1 : Téléchargement des modèles (requis pour le fonctionnement de Fabric)...",
"setup_step_downloading_strategies": "📥 Étape 2 : Téléchargement des stratégies (requis pour le fonctionnement de Fabric)...",
"setup_step_configure_ai_provider": "🤖 Étape 3 : Configurer un fournisseur d'IA",
"setup_ai_provider_required": "Fabric a besoin d'au moins un fournisseur d'IA pour fonctionner.",
"setup_add_more_providers_later": "Vous pourrez ajouter d'autres fournisseurs plus tard avec 'fabric --setup'",
"setup_step_setting_defaults": "⚙️ Étape 4 : Configuration du fournisseur et du modèle par défaut...",
"setup_complete_header": "✅ Configuration terminée ! Vous pouvez maintenant utiliser Fabric.",
"setup_next_steps": "Prochaines étapes :",
"setup_list_patterns": "• Lister les modèles disponibles : fabric -l",
"setup_try_pattern": "• Essayer un modèle : echo 'votre texte' | fabric --pattern summarize",
"setup_configure_more": "• Configurer plus de paramètres : fabric --setup",
"setup_failed_download_patterns": "échec du téléchargement des modèles : %w",
"setup_failed_download_strategies": "échec du téléchargement des stratégies : %w",
"setup_failed_set_defaults": "échec de la configuration du fournisseur et du modèle par défaut : %w",
"setup_no_ai_provider_selected": "aucun fournisseur d'IA sélectionné - au moins un est requis",
"setup_invalid_selection": "sélection invalide : %s",
"setup_available_ai_providers": "Fournisseurs d'IA disponibles :",
"setup_enter_ai_provider_number": "Numéro du fournisseur d'IA",
"setup_available_plugins": "Plugins disponibles :",
"setup_plugin_number": "Numéro du plugin",
"setup_required_configuration_header": "━━━ CONFIGURATION REQUISE ━━━\n\nFournisseurs d'IA [au moins un requis]",
"setup_required_tools": "Outils requis",
"setup_optional_configuration_header": "━━━ CONFIGURATION OPTIONNELLE ━━━\n\nOutils optionnels",
"setup_validation_header": "État de la configuration :",
"setup_validation_ai_provider_configured": "✓ Fournisseur d'IA configuré",
"setup_validation_ai_provider_missing": "✗ Fournisseur d'IA non configuré - Requis pour le fonctionnement de Fabric",
"setup_validation_defaults_configured": "✓ Fournisseur/modèle par défaut défini : %s/%s",
"setup_validation_defaults_missing": "✗ Fournisseur/modèle par défaut non défini - Requis pour le fonctionnement de Fabric",
"setup_validation_patterns_configured": "✓ Modèles téléchargés",
"setup_validation_patterns_missing": "✗ Modèles non trouvés - Requis pour le fonctionnement de Fabric",
"setup_validation_strategies_configured": "✓ Stratégies téléchargées",
"setup_validation_strategies_missing": "✗ Stratégies non trouvées - Requises pour le fonctionnement de Fabric",
"setup_validation_incomplete_warning": "⚠️ Configuration incomplète ! Composants requis manquants.",
"setup_validation_incomplete_help": "Exécutez à nouveau 'fabric --setup' pour configurer les éléments manquants,\nou exécutez 'fabric -U' pour télécharger les modèles et stratégies.",
"setup_validation_complete": "✓ Tous les composants requis sont configurés !",
"patterns_not_found_header": "⚠️ Aucun modèle trouvé !",
"patterns_required_to_work": "Les modèles sont requis pour le fonctionnement de Fabric. Pour résoudre ce problème :",
"patterns_option_run_setup": "Option 1 (Recommandée) : Exécuter la configuration pour télécharger les modèles",
"patterns_option_run_setup_command": "fabric --setup",
"patterns_option_run_update": "Option 2 : Télécharger/mettre à jour les modèles directement",
"patterns_option_run_update_command": "fabric -U",
"pattern_not_found_no_patterns": "modèle '%s' non trouvé.\n\nAucun modèle n'est installé ! Pour résoudre ce problème :\n • Exécutez 'fabric --setup' pour configurer et télécharger les modèles\n • Ou exécutez 'fabric -U' pour télécharger/mettre à jour les modèles directement",
"pattern_not_found_list_available": "modèle '%s' non trouvé. Exécutez 'fabric -l' pour voir les modèles disponibles",
"plugin_configured": " ✓",
"plugin_not_configured": " ⚠️ NON CONFIGURÉ"
}

View File

@@ -161,5 +161,51 @@
"no_items_found": "Nessun %s",
"no_description_available": "Nessuna descrizione disponibile",
"i18n_download_failed": "Fallito il download della traduzione per la lingua '%s': %v",
"i18n_load_failed": "Fallito il caricamento del file di traduzione: %v"
"i18n_load_failed": "Fallito il caricamento del file di traduzione: %v",
"setup_welcome_header": "🎉 Benvenuto su Fabric! Configuriamo tutto.",
"setup_step_downloading_patterns": "📥 Passo 1: Download dei pattern (richiesti per il funzionamento di Fabric)...",
"setup_step_downloading_strategies": "📥 Passo 2: Download delle strategie (richieste per il funzionamento di Fabric)...",
"setup_step_configure_ai_provider": "🤖 Passo 3: Configura un fornitore di IA",
"setup_ai_provider_required": "Fabric necessita di almeno un fornitore di IA per funzionare.",
"setup_add_more_providers_later": "Potrai aggiungere altri fornitori in seguito con 'fabric --setup'",
"setup_step_setting_defaults": "⚙️ Passo 4: Impostazione del fornitore e del modello predefiniti...",
"setup_complete_header": "✅ Configurazione completata! Ora puoi usare Fabric.",
"setup_next_steps": "Prossimi passi:",
"setup_list_patterns": "• Elenca i pattern disponibili: fabric -l",
"setup_try_pattern": "• Prova un pattern: echo 'il tuo testo' | fabric --pattern summarize",
"setup_configure_more": "• Configura altre impostazioni: fabric --setup",
"setup_failed_download_patterns": "download dei pattern fallito: %w",
"setup_failed_download_strategies": "download delle strategie fallito: %w",
"setup_failed_set_defaults": "impostazione del fornitore e del modello predefiniti fallita: %w",
"setup_no_ai_provider_selected": "nessun fornitore di IA selezionato - almeno uno è richiesto",
"setup_invalid_selection": "selezione non valida: %s",
"setup_available_ai_providers": "Fornitori di IA disponibili:",
"setup_enter_ai_provider_number": "Numero del fornitore di IA",
"setup_available_plugins": "Plugin disponibili:",
"setup_plugin_number": "Numero del plugin",
"setup_required_configuration_header": "━━━ CONFIGURAZIONE RICHIESTA ━━━\n\nFornitori di IA [almeno uno richiesto]",
"setup_required_tools": "Strumenti richiesti",
"setup_optional_configuration_header": "━━━ CONFIGURAZIONE OPZIONALE ━━━\n\nStrumenti opzionali",
"setup_validation_header": "Stato della configurazione:",
"setup_validation_ai_provider_configured": "✓ Fornitore di IA configurato",
"setup_validation_ai_provider_missing": "✗ Fornitore di IA non configurato - Richiesto per il funzionamento di Fabric",
"setup_validation_defaults_configured": "✓ Fornitore/modello predefinito impostato: %s/%s",
"setup_validation_defaults_missing": "✗ Fornitore/modello predefinito non impostato - Richiesto per il funzionamento di Fabric",
"setup_validation_patterns_configured": "✓ Pattern scaricati",
"setup_validation_patterns_missing": "✗ Pattern non trovati - Richiesti per il funzionamento di Fabric",
"setup_validation_strategies_configured": "✓ Strategie scaricate",
"setup_validation_strategies_missing": "✗ Strategie non trovate - Richieste per il funzionamento di Fabric",
"setup_validation_incomplete_warning": "⚠️ Configurazione incompleta! Componenti richiesti mancanti.",
"setup_validation_incomplete_help": "Esegui di nuovo 'fabric --setup' per configurare gli elementi mancanti,\noppure esegui 'fabric -U' per scaricare pattern e strategie.",
"setup_validation_complete": "✓ Tutti i componenti richiesti sono configurati!",
"patterns_not_found_header": "⚠️ Nessun pattern trovato!",
"patterns_required_to_work": "I pattern sono richiesti per il funzionamento di Fabric. Per risolvere:",
"patterns_option_run_setup": "Opzione 1 (Consigliata): Esegui la configurazione per scaricare i pattern",
"patterns_option_run_setup_command": "fabric --setup",
"patterns_option_run_update": "Opzione 2: Scarica/aggiorna i pattern direttamente",
"patterns_option_run_update_command": "fabric -U",
"pattern_not_found_no_patterns": "pattern '%s' non trovato.\n\nNessun pattern installato! Per risolvere:\n • Esegui 'fabric --setup' per configurare e scaricare i pattern\n • Oppure esegui 'fabric -U' per scaricare/aggiornare i pattern direttamente",
"pattern_not_found_list_available": "pattern '%s' non trovato. Esegui 'fabric -l' per vedere i pattern disponibili",
"plugin_configured": " ✓",
"plugin_not_configured": " ⚠️ NON CONFIGURATO"
}

View File

@@ -161,5 +161,51 @@
"no_items_found": "%s がありません",
"no_description_available": "説明がありません",
"i18n_download_failed": "言語 '%s' の翻訳のダウンロードに失敗しました: %v",
"i18n_load_failed": "翻訳ファイルの読み込みに失敗しました: %v"
"i18n_load_failed": "翻訳ファイルの読み込みに失敗しました: %v",
"setup_welcome_header": "🎉 Fabricへようこそセットアップを始めましょう。",
"setup_step_downloading_patterns": "📥 ステップ1: パターンをダウンロード中Fabricの動作に必要です...",
"setup_step_downloading_strategies": "📥 ステップ2: ストラテジーをダウンロード中Fabricの動作に必要です...",
"setup_step_configure_ai_provider": "🤖 ステップ3: AIプロバイダーを設定",
"setup_ai_provider_required": "Fabricを動作させるには、少なくとも1つのAIプロバイダーが必要です。",
"setup_add_more_providers_later": "'fabric --setup'で後からプロバイダーを追加できます",
"setup_step_setting_defaults": "⚙️ ステップ4: デフォルトのベンダーとモデルを設定中...",
"setup_complete_header": "✅ セットアップ完了Fabricを使用できます。",
"setup_next_steps": "次のステップ:",
"setup_list_patterns": "• 利用可能なパターンを一覧表示: fabric -l",
"setup_try_pattern": "• パターンを試す: echo 'テキスト' | fabric --pattern summarize",
"setup_configure_more": "• その他の設定: fabric --setup",
"setup_failed_download_patterns": "パターンのダウンロードに失敗しました: %w",
"setup_failed_download_strategies": "ストラテジーのダウンロードに失敗しました: %w",
"setup_failed_set_defaults": "デフォルトのベンダーとモデルの設定に失敗しました: %w",
"setup_no_ai_provider_selected": "AIプロバイダーが選択されていません - 少なくとも1つは必要です",
"setup_invalid_selection": "無効な選択: %s",
"setup_available_ai_providers": "利用可能なAIプロバイダー:",
"setup_enter_ai_provider_number": "AIプロバイダー番号",
"setup_available_plugins": "利用可能なプラグイン:",
"setup_plugin_number": "プラグイン番号",
"setup_required_configuration_header": "━━━ 必須設定 ━━━\n\nAIベンダー [少なくとも1つ必要]",
"setup_required_tools": "必須ツール",
"setup_optional_configuration_header": "━━━ オプション設定 ━━━\n\nオプションツール",
"setup_validation_header": "設定状況:",
"setup_validation_ai_provider_configured": "✓ AIプロバイダー設定済み",
"setup_validation_ai_provider_missing": "✗ AIプロバイダー未設定 - Fabricの動作に必要です",
"setup_validation_defaults_configured": "✓ デフォルトのベンダー/モデル設定済み: %s/%s",
"setup_validation_defaults_missing": "✗ デフォルトのベンダー/モデル未設定 - Fabricの動作に必要です",
"setup_validation_patterns_configured": "✓ パターンダウンロード済み",
"setup_validation_patterns_missing": "✗ パターンが見つかりません - Fabricの動作に必要です",
"setup_validation_strategies_configured": "✓ ストラテジーダウンロード済み",
"setup_validation_strategies_missing": "✗ ストラテジーが見つかりません - Fabricの動作に必要です",
"setup_validation_incomplete_warning": "⚠️ セットアップ未完了!必要なコンポーネントが不足しています。",
"setup_validation_incomplete_help": "'fabric --setup'を再度実行して不足項目を設定するか、\n'fabric -U'を実行してパターンとストラテジーをダウンロードしてください。",
"setup_validation_complete": "✓ 必要なコンポーネントがすべて設定されています!",
"patterns_not_found_header": "⚠️ パターンが見つかりません!",
"patterns_required_to_work": "Fabricを動作させるにはパターンが必要です。解決するには:",
"patterns_option_run_setup": "オプション1推奨: セットアップを実行してパターンをダウンロード",
"patterns_option_run_setup_command": "fabric --setup",
"patterns_option_run_update": "オプション2: パターンを直接ダウンロード/更新",
"patterns_option_run_update_command": "fabric -U",
"pattern_not_found_no_patterns": "パターン '%s' が見つかりません。\n\nパターンがインストールされていません解決するには:\n • 'fabric --setup'を実行してパターンを設定・ダウンロード\n • または'fabric -U'を実行してパターンをダウンロード/更新",
"pattern_not_found_list_available": "パターン '%s' が見つかりません。'fabric -l'で利用可能なパターンを確認してください",
"plugin_configured": " ✓",
"plugin_not_configured": " ⚠️ 未設定"
}

View File

@@ -161,5 +161,51 @@
"no_items_found": "Nenhum %s",
"no_description_available": "Nenhuma descrição disponível",
"i18n_download_failed": "Falha ao baixar tradução para o idioma '%s': %v",
"i18n_load_failed": "Falha ao carregar arquivo de tradução: %v"
"i18n_load_failed": "Falha ao carregar arquivo de tradução: %v",
"setup_welcome_header": "🎉 Bem-vindo ao Fabric! Vamos configurar tudo.",
"setup_step_downloading_patterns": "📥 Passo 1: Baixando padrões (necessários para o Fabric funcionar)...",
"setup_step_downloading_strategies": "📥 Passo 2: Baixando estratégias (necessárias para o Fabric funcionar)...",
"setup_step_configure_ai_provider": "🤖 Passo 3: Configurar um provedor de IA",
"setup_ai_provider_required": "O Fabric precisa de pelo menos um provedor de IA para funcionar.",
"setup_add_more_providers_later": "Você poderá adicionar mais provedores depois com 'fabric --setup'",
"setup_step_setting_defaults": "⚙️ Passo 4: Configurando provedor e modelo padrão...",
"setup_complete_header": "✅ Configuração completa! Agora você pode usar o Fabric.",
"setup_next_steps": "Próximos passos:",
"setup_list_patterns": "• Listar padrões disponíveis: fabric -l",
"setup_try_pattern": "• Experimentar um padrão: echo 'seu texto' | fabric --pattern summarize",
"setup_configure_more": "• Configurar mais opções: fabric --setup",
"setup_failed_download_patterns": "falha ao baixar padrões: %w",
"setup_failed_download_strategies": "falha ao baixar estratégias: %w",
"setup_failed_set_defaults": "falha ao configurar provedor e modelo padrão: %w",
"setup_no_ai_provider_selected": "nenhum provedor de IA selecionado - pelo menos um é necessário",
"setup_invalid_selection": "seleção inválida: %s",
"setup_available_ai_providers": "Provedores de IA Disponíveis:",
"setup_enter_ai_provider_number": "Número do Provedor de IA",
"setup_available_plugins": "Plugins disponíveis:",
"setup_plugin_number": "Número do Plugin",
"setup_required_configuration_header": "━━━ CONFIGURAÇÃO OBRIGATÓRIA ━━━\n\nProvedores de IA [pelo menos um obrigatório]",
"setup_required_tools": "Ferramentas Obrigatórias",
"setup_optional_configuration_header": "━━━ CONFIGURAÇÃO OPCIONAL ━━━\n\nFerramentas Opcionais",
"setup_validation_header": "Status da Configuração:",
"setup_validation_ai_provider_configured": "✓ Provedor de IA configurado",
"setup_validation_ai_provider_missing": "✗ Provedor de IA não configurado - Necessário para o Fabric funcionar",
"setup_validation_defaults_configured": "✓ Provedor/modelo padrão definido: %s/%s",
"setup_validation_defaults_missing": "✗ Provedor/modelo padrão não definido - Necessário para o Fabric funcionar",
"setup_validation_patterns_configured": "✓ Padrões baixados",
"setup_validation_patterns_missing": "✗ Padrões não encontrados - Necessários para o Fabric funcionar",
"setup_validation_strategies_configured": "✓ Estratégias baixadas",
"setup_validation_strategies_missing": "✗ Estratégias não encontradas - Necessárias para o Fabric funcionar",
"setup_validation_incomplete_warning": "⚠️ Configuração incompleta! Componentes necessários ausentes.",
"setup_validation_incomplete_help": "Execute 'fabric --setup' novamente para configurar itens faltantes,\nou execute 'fabric -U' para baixar padrões e estratégias.",
"setup_validation_complete": "✓ Todos os componentes necessários estão configurados!",
"patterns_not_found_header": "⚠️ Nenhum padrão encontrado!",
"patterns_required_to_work": "Padrões são necessários para o Fabric funcionar. Para resolver:",
"patterns_option_run_setup": "Opção 1 (Recomendada): Execute a configuração para baixar padrões",
"patterns_option_run_setup_command": "fabric --setup",
"patterns_option_run_update": "Opção 2: Baixar/atualizar padrões diretamente",
"patterns_option_run_update_command": "fabric -U",
"pattern_not_found_no_patterns": "padrão '%s' não encontrado.\n\nNenhum padrão instalado! Para resolver:\n • Execute 'fabric --setup' para configurar e baixar padrões\n • Ou execute 'fabric -U' para baixar/atualizar padrões diretamente",
"pattern_not_found_list_available": "padrão '%s' não encontrado. Execute 'fabric -l' para ver os padrões disponíveis",
"plugin_configured": " ✓",
"plugin_not_configured": " ⚠️ NÃO CONFIGURADO"
}

View File

@@ -161,5 +161,51 @@
"no_items_found": "Nenhum %s",
"no_description_available": "Nenhuma descrição disponível",
"i18n_download_failed": "Falha ao descarregar tradução para o idioma '%s': %v",
"i18n_load_failed": "Falha ao carregar ficheiro de tradução: %v"
"i18n_load_failed": "Falha ao carregar ficheiro de tradução: %v",
"setup_welcome_header": "🎉 Bem-vindo ao Fabric! Vamos configurar tudo.",
"setup_step_downloading_patterns": "📥 Passo 1: A descarregar padrões (necessários para o Fabric funcionar)...",
"setup_step_downloading_strategies": "📥 Passo 2: A descarregar estratégias (necessárias para o Fabric funcionar)...",
"setup_step_configure_ai_provider": "🤖 Passo 3: Configurar um fornecedor de IA",
"setup_ai_provider_required": "O Fabric precisa de pelo menos um fornecedor de IA para funcionar.",
"setup_add_more_providers_later": "Poderá adicionar mais fornecedores depois com 'fabric --setup'",
"setup_step_setting_defaults": "⚙️ Passo 4: A configurar fornecedor e modelo predefinido...",
"setup_complete_header": "✅ Configuração completa! Agora pode usar o Fabric.",
"setup_next_steps": "Próximos passos:",
"setup_list_patterns": "• Listar padrões disponíveis: fabric -l",
"setup_try_pattern": "• Experimentar um padrão: echo 'o seu texto' | fabric --pattern summarize",
"setup_configure_more": "• Configurar mais opções: fabric --setup",
"setup_failed_download_patterns": "falha ao descarregar padrões: %w",
"setup_failed_download_strategies": "falha ao descarregar estratégias: %w",
"setup_failed_set_defaults": "falha ao configurar fornecedor e modelo predefinido: %w",
"setup_no_ai_provider_selected": "nenhum fornecedor de IA selecionado - pelo menos um é necessário",
"setup_invalid_selection": "seleção inválida: %s",
"setup_available_ai_providers": "Fornecedores de IA Disponíveis:",
"setup_enter_ai_provider_number": "Número do Fornecedor de IA",
"setup_available_plugins": "Plugins disponíveis:",
"setup_plugin_number": "Número do Plugin",
"setup_required_configuration_header": "━━━ CONFIGURAÇÃO OBRIGATÓRIA ━━━\n\nFornecedores de IA [pelo menos um obrigatório]",
"setup_required_tools": "Ferramentas Obrigatórias",
"setup_optional_configuration_header": "━━━ CONFIGURAÇÃO OPCIONAL ━━━\n\nFerramentas Opcionais",
"setup_validation_header": "Estado da Configuração:",
"setup_validation_ai_provider_configured": "✓ Fornecedor de IA configurado",
"setup_validation_ai_provider_missing": "✗ Fornecedor de IA não configurado - Necessário para o Fabric funcionar",
"setup_validation_defaults_configured": "✓ Fornecedor/modelo predefinido definido: %s/%s",
"setup_validation_defaults_missing": "✗ Fornecedor/modelo predefinido não definido - Necessário para o Fabric funcionar",
"setup_validation_patterns_configured": "✓ Padrões descarregados",
"setup_validation_patterns_missing": "✗ Padrões não encontrados - Necessários para o Fabric funcionar",
"setup_validation_strategies_configured": "✓ Estratégias descarregadas",
"setup_validation_strategies_missing": "✗ Estratégias não encontradas - Necessárias para o Fabric funcionar",
"setup_validation_incomplete_warning": "⚠️ Configuração incompleta! Componentes necessários em falta.",
"setup_validation_incomplete_help": "Execute 'fabric --setup' novamente para configurar itens em falta,\nou execute 'fabric -U' para descarregar padrões e estratégias.",
"setup_validation_complete": "✓ Todos os componentes necessários estão configurados!",
"patterns_not_found_header": "⚠️ Nenhum padrão encontrado!",
"patterns_required_to_work": "Padrões são necessários para o Fabric funcionar. Para resolver:",
"patterns_option_run_setup": "Opção 1 (Recomendada): Execute a configuração para descarregar padrões",
"patterns_option_run_setup_command": "fabric --setup",
"patterns_option_run_update": "Opção 2: Descarregar/atualizar padrões diretamente",
"patterns_option_run_update_command": "fabric -U",
"pattern_not_found_no_patterns": "padrão '%s' não encontrado.\n\nNenhum padrão instalado! Para resolver:\n • Execute 'fabric --setup' para configurar e descarregar padrões\n • Ou execute 'fabric -U' para descarregar/atualizar padrões diretamente",
"pattern_not_found_list_available": "padrão '%s' não encontrado. Execute 'fabric -l' para ver os padrões disponíveis",
"plugin_configured": " ✓",
"plugin_not_configured": " ⚠️ NÃO CONFIGURADO"
}

View File

@@ -161,5 +161,51 @@
"no_items_found": "没有 %s",
"no_description_available": "没有可用描述",
"i18n_download_failed": "下载语言 '%s' 的翻译失败: %v",
"i18n_load_failed": "加载翻译文件失败: %v"
"i18n_load_failed": "加载翻译文件失败: %v",
"setup_welcome_header": "🎉 欢迎使用 Fabric让我们开始设置。",
"setup_step_downloading_patterns": "📥 步骤 1正在下载模式Fabric 运行所需)...",
"setup_step_downloading_strategies": "📥 步骤 2正在下载策略Fabric 运行所需)...",
"setup_step_configure_ai_provider": "🤖 步骤 3配置 AI 提供商",
"setup_ai_provider_required": "Fabric 需要至少一个 AI 提供商才能运行。",
"setup_add_more_providers_later": "您可以稍后通过 'fabric --setup' 添加更多提供商",
"setup_step_setting_defaults": "⚙️ 步骤 4正在设置默认提供商和模型...",
"setup_complete_header": "✅ 设置完成!您现在可以使用 Fabric 了。",
"setup_next_steps": "下一步:",
"setup_list_patterns": "• 列出可用模式fabric -l",
"setup_try_pattern": "• 尝试一个模式echo '您的文本' | fabric --pattern summarize",
"setup_configure_more": "• 配置更多设置fabric --setup",
"setup_failed_download_patterns": "下载模式失败:%w",
"setup_failed_download_strategies": "下载策略失败:%w",
"setup_failed_set_defaults": "设置默认提供商和模型失败:%w",
"setup_no_ai_provider_selected": "未选择 AI 提供商 - 至少需要一个",
"setup_invalid_selection": "无效的选择:%s",
"setup_available_ai_providers": "可用的 AI 提供商:",
"setup_enter_ai_provider_number": "AI 提供商编号",
"setup_available_plugins": "可用的插件:",
"setup_plugin_number": "插件编号",
"setup_required_configuration_header": "━━━ 必需配置 ━━━\n\nAI 提供商 [至少需要一个]",
"setup_required_tools": "必需工具",
"setup_optional_configuration_header": "━━━ 可选配置 ━━━\n\n可选工具",
"setup_validation_header": "配置状态:",
"setup_validation_ai_provider_configured": "✓ AI 提供商已配置",
"setup_validation_ai_provider_missing": "✗ AI 提供商未配置 - Fabric 运行所需",
"setup_validation_defaults_configured": "✓ 默认提供商/模型已设置:%s/%s",
"setup_validation_defaults_missing": "✗ 默认提供商/模型未设置 - Fabric 运行所需",
"setup_validation_patterns_configured": "✓ 模式已下载",
"setup_validation_patterns_missing": "✗ 未找到模式 - Fabric 运行所需",
"setup_validation_strategies_configured": "✓ 策略已下载",
"setup_validation_strategies_missing": "✗ 未找到策略 - Fabric 运行所需",
"setup_validation_incomplete_warning": "⚠️ 设置不完整!缺少必需组件。",
"setup_validation_incomplete_help": "再次运行 'fabric --setup' 配置缺失项,\n或运行 'fabric -U' 下载模式和策略。",
"setup_validation_complete": "✓ 所有必需组件已配置!",
"patterns_not_found_header": "⚠️ 未找到模式!",
"patterns_required_to_work": "Fabric 需要模式才能运行。要解决此问题:",
"patterns_option_run_setup": "选项 1推荐运行设置以下载模式",
"patterns_option_run_setup_command": "fabric --setup",
"patterns_option_run_update": "选项 2直接下载/更新模式",
"patterns_option_run_update_command": "fabric -U",
"pattern_not_found_no_patterns": "未找到模式 '%s'。\n\n未安装任何模式要解决此问题\n • 运行 'fabric --setup' 配置并下载模式\n • 或运行 'fabric -U' 直接下载/更新模式",
"pattern_not_found_list_available": "未找到模式 '%s'。运行 'fabric -l' 查看可用模式",
"plugin_configured": " ✓",
"plugin_not_configured": " ⚠️ 未配置"
}

View File

@@ -51,7 +51,7 @@ func LevelFromInt(i int) Level {
}
// Debug writes a debug message if the global level permits.
func Debug(l Level, format string, a ...interface{}) {
func Debug(l Level, format string, a ...any) {
mu.RLock()
current := level
w := output
@@ -63,7 +63,7 @@ func Debug(l Level, format string, a ...interface{}) {
// Log writes a message unconditionally to stderr.
// This is for important messages that should always be shown regardless of debug level.
func Log(format string, a ...interface{}) {
func Log(format string, a ...any) {
mu.RLock()
w := output
mu.RUnlock()

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"net/http"
"os"
"strconv"
"strings"
@@ -50,6 +51,10 @@ func NewClient() (ret *Client) {
string(anthropic.ModelClaudeOpus4_1_20250805),
string(anthropic.ModelClaudeSonnet4_5),
string(anthropic.ModelClaudeSonnet4_5_20250929),
string(anthropic.ModelClaudeOpus4_5_20251101),
string(anthropic.ModelClaudeOpus4_5),
string(anthropic.ModelClaudeHaiku4_5),
string(anthropic.ModelClaudeHaiku4_5_20251001),
}
ret.modelBetas = map[string][]string{
@@ -212,7 +217,7 @@ func (an *Client) SendStream(
}
if stream.Err() != nil {
fmt.Printf("Messages stream error: %v\n", stream.Err())
fmt.Fprintf(os.Stderr, "Messages stream error: %v\n", stream.Err())
}
close(channel)
return

View File

@@ -52,7 +52,7 @@ func createExpiredToken(accessToken, refreshToken string) *util.OAuthToken {
}
// mockTokenServer creates a mock OAuth token server for testing
func mockTokenServer(_ *testing.T, responses map[string]interface{}) *httptest.Server {
func mockTokenServer(_ *testing.T, responses map[string]any) *httptest.Server {
return httptest.NewServer(http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
if r.URL.Path != "/v1/oauth/token" {
http.NotFound(w, r)
@@ -80,7 +80,7 @@ func mockTokenServer(_ *testing.T, responses map[string]interface{}) *httptest.S
w.Header().Set("Content-Type", "application/json")
if errorResp, ok := response.(map[string]interface{}); ok && errorResp["error"] != nil {
if errorResp, ok := response.(map[string]any); ok && errorResp["error"] != nil {
w.WriteHeader(http.StatusBadRequest)
}
@@ -114,8 +114,8 @@ func TestGeneratePKCE(t *testing.T) {
func TestExchangeToken_Success(t *testing.T) {
// Create mock server
server := mockTokenServer(t, map[string]interface{}{
"authorization_code": map[string]interface{}{
server := mockTokenServer(t, map[string]any{
"authorization_code": map[string]any{
"access_token": "test_access_token",
"refresh_token": "test_refresh_token",
"expires_in": 3600,
@@ -161,8 +161,8 @@ func TestRefreshToken_Success(t *testing.T) {
os.WriteFile(tokenPath, data, 0600)
// Create mock server for refresh
server := mockTokenServer(t, map[string]interface{}{
"refresh_token": map[string]interface{}{
server := mockTokenServer(t, map[string]any{
"refresh_token": map[string]any{
"access_token": "new_access_token",
"refresh_token": "new_refresh_token",
"expires_in": 3600,
@@ -416,7 +416,7 @@ func TestGetValidTokenWithValidToken(t *testing.T) {
// Benchmark tests
func BenchmarkGeneratePKCE(b *testing.B) {
for i := 0; i < b.N; i++ {
for b.Loop() {
_, _, err := generatePKCE()
if err != nil {
b.Fatal(err)
@@ -427,8 +427,7 @@ func BenchmarkGeneratePKCE(b *testing.B) {
func BenchmarkTokenIsExpired(b *testing.B) {
token := createTestToken("access", "refresh", 3600)
b.ResetTimer()
for i := 0; i < b.N; i++ {
for b.Loop() {
token.IsExpired(5)
}
}

View File

@@ -3,6 +3,7 @@ package gemini
import (
"fmt"
"sort"
"strings"
)
// GeminiVoice represents a Gemini TTS voice with its characteristics
@@ -126,16 +127,17 @@ func ListGeminiVoices(shellCompleteMode bool) string {
if shellCompleteMode {
// For shell completion, just return voice names
names := GetGeminiVoiceNames()
result := ""
var result strings.Builder
for _, name := range names {
result += name + "\n"
result.WriteString(name + "\n")
}
return result
return result.String()
}
// For human-readable output
voices := GetGeminiVoices()
result := "Available Gemini Text-to-Speech voices:\n\n"
var result strings.Builder
result.WriteString("Available Gemini Text-to-Speech voices:\n\n")
// Group by characteristics for better readability
groups := map[string][]GeminiVoice{
@@ -186,22 +188,22 @@ func ListGeminiVoices(shellCompleteMode bool) string {
// Output grouped voices
for groupName, groupVoices := range groups {
if len(groupVoices) > 0 {
result += fmt.Sprintf("%s:\n", groupName)
result.WriteString(fmt.Sprintf("%s:\n", groupName))
for _, voice := range groupVoices {
defaultStr := ""
if voice.Name == "Kore" {
defaultStr = " (default)"
}
result += fmt.Sprintf(" %-15s - %s%s\n", voice.Name, voice.Description, defaultStr)
result.WriteString(fmt.Sprintf(" %-15s - %s%s\n", voice.Name, voice.Description, defaultStr))
}
result += "\n"
result.WriteString("\n")
}
}
result += "Use --voice <voice_name> to select a specific voice.\n"
result += "Example: fabric --voice Charon -m gemini-2.5-flash-preview-tts -o output.wav \"Hello world\"\n"
result.WriteString("Use --voice <voice_name> to select a specific voice.\n")
result.WriteString("Example: fabric --voice Charon -m gemini-2.5-flash-preview-tts -o output.wav \"Hello world\"\n")
return result
return result.String()
}
// NOTE: This implementation maintains a curated list based on official Google documentation.

View File

@@ -90,7 +90,7 @@ func (c *Client) ListModels() ([]string, error) {
func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string) (err error) {
url := fmt.Sprintf("%s/chat/completions", c.ApiUrl.Value)
payload := map[string]interface{}{
payload := map[string]any{
"messages": msgs,
"model": opts.Model,
"stream": true, // Enable streaming
@@ -140,27 +140,27 @@ func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.Cha
continue
}
if bytes.HasPrefix(line, []byte("data: ")) {
line = bytes.TrimPrefix(line, []byte("data: "))
if after, ok := bytes.CutPrefix(line, []byte("data: ")); ok {
line = after
}
if string(line) == "[DONE]" {
break
}
var result map[string]interface{}
var result map[string]any
if err = json.Unmarshal(line, &result); err != nil {
continue
}
var choices []interface{}
var choices []any
var ok bool
if choices, ok = result["choices"].([]interface{}); !ok || len(choices) == 0 {
if choices, ok = result["choices"].([]any); !ok || len(choices) == 0 {
continue
}
var delta map[string]interface{}
if delta, ok = choices[0].(map[string]interface{})["delta"].(map[string]interface{}); !ok {
var delta map[string]any
if delta, ok = choices[0].(map[string]any)["delta"].(map[string]any); !ok {
continue
}
@@ -176,7 +176,7 @@ func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.Cha
func (c *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions) (content string, err error) {
url := fmt.Sprintf("%s/chat/completions", c.ApiUrl.Value)
payload := map[string]interface{}{
payload := map[string]any{
"messages": msgs,
"model": opts.Model,
// Add other options from opts if supported by LM Studio
@@ -208,21 +208,21 @@ func (c *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, o
return
}
var result map[string]interface{}
var result map[string]any
if err = json.NewDecoder(resp.Body).Decode(&result); err != nil {
err = fmt.Errorf("failed to decode response: %w", err)
return
}
var choices []interface{}
var choices []any
var ok bool
if choices, ok = result["choices"].([]interface{}); !ok || len(choices) == 0 {
if choices, ok = result["choices"].([]any); !ok || len(choices) == 0 {
err = fmt.Errorf("invalid response format: missing or empty choices")
return
}
var message map[string]interface{}
if message, ok = choices[0].(map[string]interface{})["message"].(map[string]interface{}); !ok {
var message map[string]any
if message, ok = choices[0].(map[string]any)["message"].(map[string]any); !ok {
err = fmt.Errorf("invalid response format: missing message in first choice")
return
}
@@ -238,7 +238,7 @@ func (c *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, o
func (c *Client) Complete(ctx context.Context, prompt string, opts *domain.ChatOptions) (text string, err error) {
url := fmt.Sprintf("%s/completions", c.ApiUrl.Value)
payload := map[string]interface{}{
payload := map[string]any{
"prompt": prompt,
"model": opts.Model,
// Add other options from opts if supported by LM Studio
@@ -270,20 +270,20 @@ func (c *Client) Complete(ctx context.Context, prompt string, opts *domain.ChatO
return
}
var result map[string]interface{}
var result map[string]any
if err = json.NewDecoder(resp.Body).Decode(&result); err != nil {
err = fmt.Errorf("failed to decode response: %w", err)
return
}
var choices []interface{}
var choices []any
var ok bool
if choices, ok = result["choices"].([]interface{}); !ok || len(choices) == 0 {
if choices, ok = result["choices"].([]any); !ok || len(choices) == 0 {
err = fmt.Errorf("invalid response format: missing or empty choices")
return
}
if text, ok = choices[0].(map[string]interface{})["text"].(string); !ok {
if text, ok = choices[0].(map[string]any)["text"].(string); !ok {
err = fmt.Errorf("invalid response format: missing or non-string text in first choice")
return
}
@@ -294,7 +294,7 @@ func (c *Client) Complete(ctx context.Context, prompt string, opts *domain.ChatO
func (c *Client) GetEmbeddings(ctx context.Context, input string, opts *domain.ChatOptions) (embeddings []float64, err error) {
url := fmt.Sprintf("%s/embeddings", c.ApiUrl.Value)
payload := map[string]interface{}{
payload := map[string]any{
"input": input,
"model": opts.Model,
// Add other options from opts if supported by LM Studio

View File

@@ -17,6 +17,35 @@ type VendorsModels struct {
*util.GroupsItemsSelectorString
}
// FilterByVendor returns a new VendorsModels containing only the specified vendor's models.
// Vendor matching is case-insensitive (e.g., "OpenAI", "openai", and "OPENAI" all match).
// If the vendor is not found, an empty VendorsModels is returned.
func (o *VendorsModels) FilterByVendor(vendor string) *VendorsModels {
filtered := NewVendorsModels()
for _, groupItems := range o.GroupsItems {
if strings.EqualFold(groupItems.Group, vendor) {
filtered.AddGroupItems(groupItems.Group, groupItems.Items...)
break
}
}
return filtered
}
// FindModelNameCaseInsensitive returns the actual model name from available models,
// matching case-insensitively. Returns empty string if not found.
// For example, if the available models contain "gpt-4o" and user queries "GPT-4O",
// this returns "gpt-4o" (the actual model name that should be sent to the API).
func (o *VendorsModels) FindModelNameCaseInsensitive(modelQuery string) string {
for _, groupItems := range o.GroupsItems {
for _, item := range groupItems.Items {
if strings.EqualFold(item, modelQuery) {
return item
}
}
}
return ""
}
// PrintWithVendor prints models including their vendor on each line.
// When shellCompleteList is true, output is suitable for shell completion.
// Default vendor and model are highlighted with an asterisk.

View File

@@ -19,19 +19,19 @@ func TestNewVendorsModels(t *testing.T) {
func TestFindVendorsByModelFirst(t *testing.T) {
vendors := NewVendorsModels()
vendors.AddGroupItems("vendor1", []string{"model1", "model2"}...)
vendors.AddGroupItems("Vendor1", []string{"Model1", "model2"}...)
vendor := vendors.FindGroupsByItemFirst("model1")
if vendor != "vendor1" {
t.Fatalf("FindVendorsByModelFirst() = %v, want %v", vendor, "vendor1")
if vendor != "Vendor1" {
t.Fatalf("FindVendorsByModelFirst() = %v, want %v", vendor, "Vendor1")
}
}
func TestFindVendorsByModel(t *testing.T) {
vendors := NewVendorsModels()
vendors.AddGroupItems("vendor1", []string{"model1", "model2"}...)
foundVendors := vendors.FindGroupsByItem("model1")
if len(foundVendors) != 1 || foundVendors[0] != "vendor1" {
t.Fatalf("FindVendorsByModel() = %v, want %v", foundVendors, []string{"vendor1"})
vendors.AddGroupItems("Vendor1", []string{"Model1", "model2"}...)
foundVendors := vendors.FindGroupsByItem("MODEL1")
if len(foundVendors) != 1 || foundVendors[0] != "Vendor1" {
t.Fatalf("FindVendorsByModel() = %v, want %v", foundVendors, []string{"Vendor1"})
}
}
@@ -54,3 +54,51 @@ func TestPrintWithVendorMarksDefault(t *testing.T) {
t.Fatalf("default model not marked: %s", out)
}
}
func TestFilterByVendorCaseInsensitive(t *testing.T) {
vendors := NewVendorsModels()
vendors.AddGroupItems("vendor1", []string{"model1"}...)
vendors.AddGroupItems("vendor2", []string{"model2"}...)
filtered := vendors.FilterByVendor("VENDOR2")
if len(filtered.GroupsItems) != 1 {
t.Fatalf("expected 1 vendor group, got %d", len(filtered.GroupsItems))
}
if filtered.GroupsItems[0].Group != "vendor2" {
t.Fatalf("expected vendor2, got %s", filtered.GroupsItems[0].Group)
}
if len(filtered.GroupsItems[0].Items) != 1 || filtered.GroupsItems[0].Items[0] != "model2" {
t.Fatalf("unexpected models for vendor2: %v", filtered.GroupsItems[0].Items)
}
}
func TestFindModelNameCaseInsensitive(t *testing.T) {
vendors := NewVendorsModels()
vendors.AddGroupItems("OpenAI", []string{"gpt-4o", "gpt-5"}...)
vendors.AddGroupItems("Anthropic", []string{"claude-3-opus"}...)
tests := []struct {
name string
query string
expectedModel string
}{
{"exact match lowercase", "gpt-4o", "gpt-4o"},
{"uppercase query", "GPT-4O", "gpt-4o"},
{"mixed case query", "GpT-5", "gpt-5"},
{"exact match with hyphens", "claude-3-opus", "claude-3-opus"},
{"uppercase with hyphens", "CLAUDE-3-OPUS", "claude-3-opus"},
{"non-existent model", "gpt-999", ""},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := vendors.FindModelNameCaseInsensitive(tt.query)
if result != tt.expectedModel {
t.Errorf("FindModelNameCaseInsensitive(%q) = %q, want %q", tt.query, result, tt.expectedModel)
}
})
}
}

View File

@@ -2,7 +2,9 @@ package ollama
import (
"context"
"encoding/base64"
"fmt"
"io"
"net/http"
"net/url"
"os"
@@ -10,11 +12,10 @@ import (
"time"
"github.com/danielmiessler/fabric/internal/chat"
ollamaapi "github.com/ollama/ollama/api"
"github.com/samber/lo"
"github.com/danielmiessler/fabric/internal/domain"
debuglog "github.com/danielmiessler/fabric/internal/log"
"github.com/danielmiessler/fabric/internal/plugins"
ollamaapi "github.com/ollama/ollama/api"
)
const defaultBaseUrl = "http://localhost:11434"
@@ -48,6 +49,7 @@ type Client struct {
apiUrl *url.URL
client *ollamaapi.Client
ApiHttpTimeout *plugins.SetupQuestion
httpClient *http.Client
}
type transport_sec struct {
@@ -84,7 +86,8 @@ func (o *Client) configure() (err error) {
}
}
o.client = ollamaapi.NewClient(o.apiUrl, &http.Client{Timeout: timeout, Transport: &transport_sec{underlyingTransport: http.DefaultTransport, ApiKey: o.ApiKey}})
o.httpClient = &http.Client{Timeout: timeout, Transport: &transport_sec{underlyingTransport: http.DefaultTransport, ApiKey: o.ApiKey}}
o.client = ollamaapi.NewClient(o.apiUrl, o.httpClient)
return
}
@@ -104,15 +107,18 @@ func (o *Client) ListModels() (ret []string, err error) {
}
func (o *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string) (err error) {
req := o.createChatRequest(msgs, opts)
ctx := context.Background()
var req ollamaapi.ChatRequest
if req, err = o.createChatRequest(ctx, msgs, opts); err != nil {
return
}
respFunc := func(resp ollamaapi.ChatResponse) (streamErr error) {
channel <- resp.Message.Content
return
}
ctx := context.Background()
if err = o.client.Chat(ctx, &req, respFunc); err != nil {
return
}
@@ -124,7 +130,10 @@ func (o *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.Cha
func (o *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions) (ret string, err error) {
bf := false
req := o.createChatRequest(msgs, opts)
var req ollamaapi.ChatRequest
if req, err = o.createChatRequest(ctx, msgs, opts); err != nil {
return
}
req.Stream = &bf
respFunc := func(resp ollamaapi.ChatResponse) (streamErr error) {
@@ -133,17 +142,20 @@ func (o *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, o
}
if err = o.client.Chat(ctx, &req, respFunc); err != nil {
fmt.Printf("FRED --> %s\n", err)
debuglog.Debug(debuglog.Basic, "Ollama chat request failed: %v\n", err)
}
return
}
func (o *Client) createChatRequest(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions) (ret ollamaapi.ChatRequest) {
messages := lo.Map(msgs, func(message *chat.ChatCompletionMessage, _ int) (ret ollamaapi.Message) {
return ollamaapi.Message{Role: message.Role, Content: message.Content}
})
func (o *Client) createChatRequest(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions) (ret ollamaapi.ChatRequest, err error) {
messages := make([]ollamaapi.Message, len(msgs))
for i, message := range msgs {
if messages[i], err = o.convertMessage(ctx, message); err != nil {
return
}
}
options := map[string]interface{}{
options := map[string]any{
"temperature": opts.Temperature,
"presence_penalty": opts.PresencePenalty,
"frequency_penalty": opts.FrequencyPenalty,
@@ -162,14 +174,85 @@ func (o *Client) createChatRequest(msgs []*chat.ChatCompletionMessage, opts *dom
return
}
func (o *Client) convertMessage(ctx context.Context, message *chat.ChatCompletionMessage) (ret ollamaapi.Message, err error) {
ret = ollamaapi.Message{Role: message.Role, Content: message.Content}
if len(message.MultiContent) == 0 {
return
}
// Pre-allocate with capacity hint
textParts := make([]string, 0, len(message.MultiContent))
if strings.TrimSpace(ret.Content) != "" {
textParts = append(textParts, strings.TrimSpace(ret.Content))
}
for _, part := range message.MultiContent {
switch part.Type {
case chat.ChatMessagePartTypeText:
if trimmed := strings.TrimSpace(part.Text); trimmed != "" {
textParts = append(textParts, trimmed)
}
case chat.ChatMessagePartTypeImageURL:
// Nil guard
if part.ImageURL == nil || part.ImageURL.URL == "" {
continue
}
var img []byte
if img, err = o.loadImageBytes(ctx, part.ImageURL.URL); err != nil {
return
}
ret.Images = append(ret.Images, ollamaapi.ImageData(img))
}
}
ret.Content = strings.Join(textParts, "\n")
return
}
func (o *Client) loadImageBytes(ctx context.Context, imageURL string) (ret []byte, err error) {
// Handle data URLs (base64 encoded)
if strings.HasPrefix(imageURL, "data:") {
parts := strings.SplitN(imageURL, ",", 2)
if len(parts) != 2 {
err = fmt.Errorf("invalid data URL format")
return
}
if ret, err = base64.StdEncoding.DecodeString(parts[1]); err != nil {
err = fmt.Errorf("failed to decode data URL: %w", err)
}
return
}
// Handle HTTP URLs with context
var req *http.Request
if req, err = http.NewRequestWithContext(ctx, http.MethodGet, imageURL, nil); err != nil {
return
}
var resp *http.Response
if resp, err = o.httpClient.Do(req); err != nil {
return
}
defer resp.Body.Close()
if resp.StatusCode >= http.StatusBadRequest {
err = fmt.Errorf("failed to fetch image %s: %s", imageURL, resp.Status)
return
}
ret, err = io.ReadAll(resp.Body)
return
}
func (o *Client) NeedsRawMode(modelName string) bool {
ollamaPrefixes := []string{
ollamaSearchStrings := []string{
"llama3",
"llama2",
"mistral",
}
for _, prefix := range ollamaPrefixes {
if strings.HasPrefix(modelName, prefix) {
for _, searchString := range ollamaSearchStrings {
if strings.Contains(modelName, searchString) {
return true
}
}

View File

@@ -172,10 +172,11 @@ func (o *Client) supportsResponsesAPI() bool {
func (o *Client) NeedsRawMode(modelName string) bool {
openaiModelsPrefixes := []string{
"glm",
"gpt-5",
"o1",
"o3",
"o4",
"gpt-5",
}
openAIModelsNeedingRaw := []string{
"gpt-4o-mini-search-preview",

View File

@@ -8,6 +8,7 @@ import (
"fmt"
"os"
"path/filepath"
"slices"
"strings"
"github.com/danielmiessler/fabric/internal/domain"
@@ -31,12 +32,7 @@ var ImageGenerationSupportedModels = []string{
// supportsImageGeneration checks if the given model supports the image_generation tool
func supportsImageGeneration(model string) bool {
for _, supportedModel := range ImageGenerationSupportedModels {
if model == supportedModel {
return true
}
}
return false
return slices.Contains(ImageGenerationSupportedModels, model)
}
// getOutputFormatFromExtension determines the API output format based on file extension

View File

@@ -345,7 +345,7 @@ func TestAddImageGenerationToolWithUserParameters(t *testing.T) {
tests := []struct {
name string
opts *domain.ChatOptions
expected map[string]interface{}
expected map[string]any
}{
{
name: "All parameters specified",
@@ -356,7 +356,7 @@ func TestAddImageGenerationToolWithUserParameters(t *testing.T) {
ImageBackground: "transparent",
ImageCompression: 0, // Not applicable for PNG
},
expected: map[string]interface{}{
expected: map[string]any{
"size": "1536x1024",
"quality": "high",
"background": "transparent",
@@ -372,7 +372,7 @@ func TestAddImageGenerationToolWithUserParameters(t *testing.T) {
ImageBackground: "opaque",
ImageCompression: 75,
},
expected: map[string]interface{}{
expected: map[string]any{
"size": "1024x1024",
"quality": "medium",
"background": "opaque",
@@ -386,7 +386,7 @@ func TestAddImageGenerationToolWithUserParameters(t *testing.T) {
ImageFile: "/tmp/test.webp",
ImageQuality: "low",
},
expected: map[string]interface{}{
expected: map[string]any{
"quality": "low",
"output_format": "webp",
},
@@ -396,7 +396,7 @@ func TestAddImageGenerationToolWithUserParameters(t *testing.T) {
opts: &domain.ChatOptions{
ImageFile: "/tmp/test.png",
},
expected: map[string]interface{}{
expected: map[string]any{
"output_format": "png",
},
},

View File

@@ -16,7 +16,7 @@ func TestBuildResponseRequestWithMaxTokens(t *testing.T) {
var msgs []*chat.ChatCompletionMessage
for i := 0; i < 2; i++ {
for range 2 {
msgs = append(msgs, &chat.ChatCompletionMessage{
Role: "User",
Content: "My msg",
@@ -42,7 +42,7 @@ func TestBuildResponseRequestNoMaxTokens(t *testing.T) {
var msgs []*chat.ChatCompletionMessage
for i := 0; i < 2; i++ {
for range 2 {
msgs = append(msgs, &chat.ChatCompletionMessage{
Role: "User",
Content: "My msg",

View File

@@ -2,6 +2,7 @@ package openai_compatible
import (
"context"
"fmt"
"os"
"strings"
@@ -38,8 +39,12 @@ func NewClient(providerConfig ProviderConfig) *Client {
// ListModels overrides the default ListModels to handle different response formats
func (c *Client) ListModels() ([]string, error) {
// If a custom models URL is provided, use direct fetch with that URL
// If a custom models URL is provided, handle it
if c.modelsURL != "" {
// Check for static model list
if strings.HasPrefix(c.modelsURL, "static:") {
return c.getStaticModels(c.modelsURL)
}
// TODO: Handle context properly in Fabric by accepting and propagating a context.Context
// instead of creating a new one here.
return openai.FetchModelsDirectly(context.Background(), c.modelsURL, c.Client.ApiKey.Value, c.GetName())
@@ -55,6 +60,68 @@ func (c *Client) ListModels() ([]string, error) {
return c.DirectlyGetModels(context.Background())
}
// getStaticModels returns a predefined list of models for providers that don't support model discovery
func (c *Client) getStaticModels(modelsKey string) ([]string, error) {
switch modelsKey {
case "static:abacus":
return []string{
"route-llm",
"gpt-4o-2024-11-20",
"gpt-4o-mini",
"o4-mini",
"o3-pro",
"o3",
"o3-mini",
"gpt-4.1",
"gpt-4.1-mini",
"gpt-4.1-nano",
"gpt-5",
"gpt-5-mini",
"gpt-5-nano",
"gpt-5.1",
"gpt-5.1-chat-latest",
"openai/gpt-oss-120b",
"claude-3-7-sonnet-20250219",
"claude-sonnet-4-20250514",
"claude-opus-4-20250514",
"claude-opus-4-1-20250805",
"claude-sonnet-4-5-20250929",
"claude-haiku-4-5-20251001",
"claude-opus-4-5-20251101",
"meta-llama/Llama-4-Maverick-17B-128E-Instruct-FP8",
"meta-llama/Meta-Llama-3.1-405B-Instruct-Turbo",
"meta-llama/Meta-Llama-3.1-70B-Instruct",
"meta-llama/Meta-Llama-3.1-8B-Instruct",
"llama-3.3-70b-versatile",
"gemini-2.0-flash-001",
"gemini-2.0-pro-exp-02-05",
"gemini-2.5-pro",
"gemini-2.5-flash",
"gemini-3-pro-preview",
"qwen-2.5-coder-32b",
"Qwen/Qwen2.5-72B-Instruct",
"Qwen/QwQ-32B",
"Qwen/Qwen3-235B-A22B-Instruct-2507",
"Qwen/Qwen3-32B",
"qwen/qwen3-coder-480b-a35b-instruct",
"qwen/qwen3-Max",
"grok-4-0709",
"grok-4-fast-non-reasoning",
"grok-4-1-fast-non-reasoning",
"grok-code-fast-1",
"kimi-k2-turbo-preview",
"deepseek/deepseek-v3.1",
"deepseek-ai/DeepSeek-V3.1-Terminus",
"deepseek-ai/DeepSeek-R1",
"deepseek-ai/DeepSeek-V3.2",
"zai-org/glm-4.5",
"zai-org/glm-4.6",
}, nil
default:
return nil, fmt.Errorf("unknown static model list: %s", modelsKey)
}
}
// ProviderMap is a map of provider name to ProviderConfig for O(1) lookup
var ProviderMap = map[string]ProviderConfig{
"AIML": {
@@ -123,6 +190,17 @@ var ProviderMap = map[string]ProviderConfig{
BaseURL: "https://api.venice.ai/api/v1",
ImplementsResponses: false,
},
"Z AI": {
Name: "Z AI",
BaseURL: "https://api.z.ai/api/paas/v4",
ImplementsResponses: false,
},
"Abacus": {
Name: "Abacus",
BaseURL: "https://routellm.abacus.ai/v1/",
ModelsURL: "static:abacus", // Special marker for static model list
ImplementsResponses: false,
},
}
// GetProviderByName returns the provider configuration for a given name with O(1) lookup

View File

@@ -20,6 +20,16 @@ func TestCreateClient(t *testing.T) {
provider: "Groq",
exists: true,
},
{
name: "Existing provider - Z AI",
provider: "Z AI",
exists: true,
},
{
name: "Existing provider - Abacus",
provider: "Abacus",
exists: true,
},
{
name: "Non-existent provider",
provider: "NonExistent",

View File

@@ -4,6 +4,7 @@ import (
"context"
"fmt"
"os"
"strings"
"sync"
"github.com/danielmiessler/fabric/internal/domain"
@@ -107,18 +108,19 @@ func (c *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, o
return "", fmt.Errorf("perplexity API request failed: %w", err) // Corrected capitalization
}
content := resp.GetLastContent()
var content strings.Builder
content.WriteString(resp.GetLastContent())
// Append citations if available
citations := resp.GetCitations()
if len(citations) > 0 {
content += "\n\n# CITATIONS\n\n"
content.WriteString("\n\n# CITATIONS\n\n")
for i, citation := range citations {
content += fmt.Sprintf("- [%d] %s\n", i+1, citation)
content.WriteString(fmt.Sprintf("- [%d] %s\n", i+1, citation))
}
}
return content, nil
return content.String(), nil
}
func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *domain.ChatOptions, channel chan string) error {

View File

@@ -25,9 +25,12 @@ type VendorsManager struct {
Models *VendorsModels
}
// AddVendors registers one or more vendors with the manager.
// Vendors are stored with lowercase keys to enable case-insensitive lookup.
func (o *VendorsManager) AddVendors(vendors ...Vendor) {
for _, vendor := range vendors {
o.VendorsByName[vendor.GetName()] = vendor
name := strings.ToLower(vendor.GetName())
o.VendorsByName[name] = vendor
o.Vendors = append(o.Vendors, vendor)
}
}
@@ -63,8 +66,10 @@ func (o *VendorsManager) HasVendors() bool {
return len(o.Vendors) > 0
}
// FindByName returns a vendor by name. Lookup is case-insensitive.
// For example, "OpenAI", "openai", and "OPENAI" all match the same vendor.
func (o *VendorsManager) FindByName(name string) Vendor {
return o.VendorsByName[name]
return o.VendorsByName[strings.ToLower(name)]
}
func (o *VendorsManager) readModels() (err error) {
@@ -143,9 +148,9 @@ func (o *VendorsManager) SetupVendor(vendorName string, configuredVendors map[st
func (o *VendorsManager) setupVendorTo(vendor Vendor, configuredVendors map[string]Vendor) {
if vendorErr := vendor.Setup(); vendorErr == nil {
fmt.Printf("[%v] configured\n", vendor.GetName())
configuredVendors[vendor.GetName()] = vendor
configuredVendors[strings.ToLower(vendor.GetName())] = vendor
} else {
delete(configuredVendors, vendor.GetName())
delete(configuredVendors, strings.ToLower(vendor.GetName()))
fmt.Printf("[%v] skipped\n", vendor.GetName())
}
}

View File

@@ -0,0 +1,66 @@
package ai
import (
"bytes"
"context"
"testing"
"github.com/danielmiessler/fabric/internal/chat"
"github.com/danielmiessler/fabric/internal/domain"
)
type stubVendor struct {
name string
}
func (v *stubVendor) GetName() string { return v.name }
func (v *stubVendor) GetSetupDescription() string { return "" }
func (v *stubVendor) IsConfigured() bool { return true }
func (v *stubVendor) Configure() error { return nil }
func (v *stubVendor) Setup() error { return nil }
func (v *stubVendor) SetupFillEnvFileContent(*bytes.Buffer) {}
func (v *stubVendor) ListModels() ([]string, error) { return nil, nil }
func (v *stubVendor) SendStream([]*chat.ChatCompletionMessage, *domain.ChatOptions, chan string) error {
return nil
}
func (v *stubVendor) Send(context.Context, []*chat.ChatCompletionMessage, *domain.ChatOptions) (string, error) {
return "", nil
}
func (v *stubVendor) NeedsRawMode(string) bool { return false }
func TestVendorsManagerFindByNameCaseInsensitive(t *testing.T) {
manager := NewVendorsManager()
vendor := &stubVendor{name: "OpenAI"}
manager.AddVendors(vendor)
if got := manager.FindByName("openai"); got != vendor {
t.Fatalf("FindByName lowercase = %v, want %v", got, vendor)
}
if got := manager.FindByName("OPENAI"); got != vendor {
t.Fatalf("FindByName uppercase = %v, want %v", got, vendor)
}
if got := manager.FindByName("OpenAI"); got != vendor {
t.Fatalf("FindByName mixed case = %v, want %v", got, vendor)
}
}
func TestVendorsManagerSetupVendorToCaseInsensitive(t *testing.T) {
manager := NewVendorsManager()
vendor := &stubVendor{name: "OpenAI"}
configured := map[string]Vendor{}
manager.setupVendorTo(vendor, configured)
// Verify vendor is stored with lowercase key
if _, ok := configured["openai"]; !ok {
t.Fatalf("setupVendorTo should store vendor using lowercase key")
}
// Verify original case key is not used
if _, ok := configured["OpenAI"]; ok {
t.Fatalf("setupVendorTo should not store vendor using original case key")
}
}

View File

@@ -7,6 +7,7 @@ import (
"sort"
"strings"
"github.com/danielmiessler/fabric/internal/i18n"
"github.com/danielmiessler/fabric/internal/plugins/template"
"github.com/danielmiessler/fabric/internal/util"
)
@@ -128,7 +129,16 @@ func (o *PatternsEntity) getFromDB(name string) (ret *Pattern, err error) {
var pattern []byte
if pattern, err = os.ReadFile(patternPath); err != nil {
return
// Check if the patterns directory is empty to provide helpful error message
if os.IsNotExist(err) {
var entries []os.DirEntry
entries, _ = os.ReadDir(o.Dir)
if len(entries) == 0 || (len(entries) == 1 && entries[0].Name() == "loaded") {
// Patterns directory is empty or only has 'loaded' file
return nil, fmt.Errorf(i18n.T("pattern_not_found_no_patterns"), name)
}
}
return nil, fmt.Errorf(i18n.T("pattern_not_found_list_available"), name)
}
patternStr := string(pattern)

View File

@@ -134,7 +134,7 @@ func (o *StorageEntity) buildFileName(name string) string {
return fmt.Sprintf("%s%v", name, o.FileExtension)
}
func (o *StorageEntity) SaveAsJson(name string, item interface{}) (err error) {
func (o *StorageEntity) SaveAsJson(name string, item any) (err error) {
var jsonString []byte
if jsonString, err = json.Marshal(item); err == nil {
err = o.Save(name, jsonString)
@@ -145,7 +145,7 @@ func (o *StorageEntity) SaveAsJson(name string, item interface{}) (err error) {
return err
}
func (o *StorageEntity) LoadAsJson(name string, item interface{}) (err error) {
func (o *StorageEntity) LoadAsJson(name string, item any) (err error) {
var content []byte
if content, err = o.Load(name); err != nil {
return

View File

@@ -92,7 +92,11 @@ func (o *PluginBase) Setup() (err error) {
return
}
err = o.Configure()
// After Setup, run ConfigureCustom if present, but skip re-validation
// since Ask() already validated user input (or allowed explicit reset)
if o.ConfigureCustom != nil {
err = o.ConfigureCustom()
}
return
}
@@ -198,16 +202,21 @@ func (o *SetupQuestion) Ask(label string) (err error) {
var answer string
fmt.Scanln(&answer)
answer = strings.TrimRight(answer, "\n")
isReset := strings.ToLower(answer) == AnswerReset
if answer == "" {
answer = o.Value
} else if strings.ToLower(answer) == AnswerReset {
} else if isReset {
answer = ""
}
err = o.OnAnswer(answer)
err = o.OnAnswerWithReset(answer, isReset)
return
}
func (o *SetupQuestion) OnAnswer(answer string) (err error) {
return o.OnAnswerWithReset(answer, false)
}
func (o *SetupQuestion) OnAnswerWithReset(answer string, isReset bool) (err error) {
if o.Type == SettingTypeBool {
if answer == "" {
o.Value = ""
@@ -226,6 +235,11 @@ func (o *SetupQuestion) OnAnswer(answer string) (err error) {
return
}
}
// Skip validation when explicitly resetting a value - the user intentionally
// wants to clear the value even if it's required
if isReset {
return nil
}
err = o.IsValidErr()
return
}

View File

@@ -116,6 +116,91 @@ func TestSetupQuestion_Ask(t *testing.T) {
assert.Equal(t, "user_value", setting.Value)
}
func TestSetupQuestion_Ask_Reset(t *testing.T) {
// Test that resetting a required field doesn't produce an error
setting := &Setting{
EnvVariable: "TEST_RESET_SETTING",
Value: "existing_value",
Required: true,
}
question := &SetupQuestion{
Setting: setting,
Question: "Enter test setting:",
}
input := "reset\n"
fmtInput := captureInput(input)
defer fmtInput()
err := question.Ask("TestConfigurable")
// Should NOT return an error even though the field is required
assert.NoError(t, err)
// Value should be cleared
assert.Equal(t, "", setting.Value)
}
func TestSetupQuestion_OnAnswerWithReset(t *testing.T) {
tests := []struct {
name string
setting *Setting
answer string
isReset bool
expectError bool
expectValue string
}{
{
name: "reset required field should not error",
setting: &Setting{
EnvVariable: "TEST_SETTING",
Value: "old_value",
Required: true,
},
answer: "",
isReset: true,
expectError: false,
expectValue: "",
},
{
name: "empty answer on required field should error",
setting: &Setting{
EnvVariable: "TEST_SETTING",
Value: "",
Required: true,
},
answer: "",
isReset: false,
expectError: true,
expectValue: "",
},
{
name: "valid answer on required field should not error",
setting: &Setting{
EnvVariable: "TEST_SETTING",
Value: "",
Required: true,
},
answer: "new_value",
isReset: false,
expectError: false,
expectValue: "new_value",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
question := &SetupQuestion{
Setting: tt.setting,
Question: "Test question",
}
err := question.OnAnswerWithReset(tt.answer, tt.isReset)
if tt.expectError {
assert.Error(t, err)
} else {
assert.NoError(t, err)
}
assert.Equal(t, tt.expectValue, tt.setting.Value)
})
}
}
func TestSettings_IsConfigured(t *testing.T) {
settings := Settings{
{EnvVariable: "TEST_SETTING1", Value: "value1", Required: true},

View File

@@ -103,17 +103,20 @@ func (sm *StrategiesManager) Setup() (err error) {
if err = sm.PopulateDB(); err != nil {
return
}
// Reload strategies after downloading so IsConfigured() reflects the new state
sm.Strategies, _ = LoadAllFiles()
return
}
// PopulateDB downloads strategies from the internet and populates the strategies folder
func (sm *StrategiesManager) PopulateDB() (err error) {
stageDir, _ := getStrategyDir()
fmt.Printf("Downloading strategies and Populating %s...\n", stageDir)
strategyDir, _ := getStrategyDir()
fmt.Printf("Downloading strategies and Populating %s...\n", strategyDir)
fmt.Println()
if err = sm.gitCloneAndCopy(); err != nil {
return
}
fmt.Printf("✅ Successfully downloaded and installed strategies to %s\n", strategyDir)
return
}
@@ -130,6 +133,8 @@ func (sm *StrategiesManager) gitCloneAndCopy() (err error) {
return fmt.Errorf("failed to create strategies directory: %w", err)
}
fmt.Printf("Cloning repository %s (path: %s)...\n", sm.DefaultGitRepoUrl.Value, sm.DefaultFolder.Value)
// Use the helper to fetch files
err = githelper.FetchFilesFromRepo(githelper.FetchOptions{
RepoURL: sm.DefaultGitRepoUrl.Value,
@@ -141,6 +146,18 @@ func (sm *StrategiesManager) gitCloneAndCopy() (err error) {
return fmt.Errorf("failed to download strategies: %w", err)
}
// Count downloaded strategies
entries, readErr := os.ReadDir(strategyDir)
if readErr == nil {
strategyCount := 0
for _, entry := range entries {
if !entry.IsDir() && filepath.Ext(entry.Name()) == ".json" {
strategyCount++
}
}
fmt.Printf("Downloaded %d strategies\n", strategyCount)
}
return nil
}

View File

@@ -187,9 +187,10 @@ esac`
executor := NewExtensionExecutor(registry)
// Helper function to create and register extension
createExtension := func(name, opName, cmdTemplate string, config map[string]interface{}) error {
createExtension := func(name, opName, cmdTemplate string, config map[string]any) error {
configPath := filepath.Join(tmpDir, name+".yaml")
configContent := `name: ` + name + `
var configContent strings.Builder
configContent.WriteString(`name: ` + name + `
executable: ` + testScript + `
type: executable
timeout: 30s
@@ -199,14 +200,14 @@ operations:
config:
output:
method: file
file_config:`
file_config:`)
// Add config options
for k, v := range config {
configContent += "\n " + k + ": " + strings.TrimSpace(v.(string))
configContent.WriteString("\n " + k + ": " + strings.TrimSpace(v.(string)))
}
if err := os.WriteFile(configPath, []byte(configContent), 0644); err != nil {
if err := os.WriteFile(configPath, []byte(configContent.String()), 0644); err != nil {
return err
}
@@ -216,7 +217,7 @@ config:
// Test basic fixed file output
t.Run("BasicFixedFile", func(t *testing.T) {
outputFile := filepath.Join(tmpDir, "output.txt")
config := map[string]interface{}{
config := map[string]any{
"output_file": `"output.txt"`,
"work_dir": `"` + tmpDir + `"`,
"cleanup": "true",
@@ -241,7 +242,7 @@ config:
// Test no work_dir specified
t.Run("NoWorkDir", func(t *testing.T) {
config := map[string]interface{}{
config := map[string]any{
"output_file": `"direct-output.txt"`,
"cleanup": "true",
}
@@ -263,7 +264,7 @@ config:
outputFile := filepath.Join(tmpDir, "cleanup-test.txt")
// Test with cleanup enabled
config := map[string]interface{}{
config := map[string]any{
"output_file": `"cleanup-test.txt"`,
"work_dir": `"` + tmpDir + `"`,
"cleanup": "true",
@@ -307,7 +308,7 @@ config:
// Test error cases
t.Run("ErrorCases", func(t *testing.T) {
outputFile := filepath.Join(tmpDir, "error-test.txt")
config := map[string]interface{}{
config := map[string]any{
"output_file": `"error-test.txt"`,
"work_dir": `"` + tmpDir + `"`,
"cleanup": "true",
@@ -341,7 +342,7 @@ config:
// Test with missing output_file
t.Run("MissingOutputFile", func(t *testing.T) {
config := map[string]interface{}{
config := map[string]any{
"work_dir": `"` + tmpDir + `"`,
"cleanup": "true",
}

View File

@@ -30,7 +30,7 @@ type ExtensionDefinition struct {
Operations map[string]OperationConfig `yaml:"operations"`
// Additional config
Config map[string]interface{} `yaml:"config"`
Config map[string]any `yaml:"config"`
}
type OperationConfig struct {
@@ -53,7 +53,7 @@ type ExtensionRegistry struct {
// Helper methods for Config access
func (e *ExtensionDefinition) GetOutputMethod() string {
if output, ok := e.Config["output"].(map[string]interface{}); ok {
if output, ok := e.Config["output"].(map[string]any); ok {
if method, ok := output["method"].(string); ok {
return method
}
@@ -61,9 +61,9 @@ func (e *ExtensionDefinition) GetOutputMethod() string {
return "stdout" // default to stdout if not specified
}
func (e *ExtensionDefinition) GetFileConfig() map[string]interface{} {
if output, ok := e.Config["output"].(map[string]interface{}); ok {
if fileConfig, ok := output["file_config"].(map[string]interface{}); ok {
func (e *ExtensionDefinition) GetFileConfig() map[string]any {
if output, ok := e.Config["output"].(map[string]any); ok {
if fileConfig, ok := output["file_config"].(map[string]any); ok {
return fileConfig
}
}

View File

@@ -33,7 +33,7 @@ func init() {
var pluginPattern = regexp.MustCompile(`\{\{plugin:([^:]+):([^:]+)(?::([^}]+))?\}\}`)
var extensionPattern = regexp.MustCompile(`\{\{ext:([^:]+):([^:]+)(?::([^}]+))?\}\}`)
func debugf(format string, a ...interface{}) {
func debugf(format string, a ...any) {
debuglog.Debug(debuglog.Trace, format, a...)
}

View File

@@ -16,7 +16,7 @@ func toTitle(s string) string {
lower := strings.ToLower(s)
runes := []rune(lower)
for i := 0; i < len(runes); i++ {
for i := range runes {
// Capitalize if previous char is non-letter AND
// (we're at the end OR next char is not space)
if i == 0 || !unicode.IsLetter(runes[i-1]) {

View File

@@ -2,14 +2,25 @@ package restapi
import (
"net/http"
"strings"
"github.com/gin-gonic/gin"
)
const APIKeyHeader = "X-API-Key"
// APIKeyMiddleware validates API key for protected endpoints.
// Swagger documentation endpoints (/swagger/*) are exempt from authentication
// to allow users to browse and test the API documentation freely.
func APIKeyMiddleware(apiKey string) gin.HandlerFunc {
return func(c *gin.Context) {
// Skip authentication for Swagger documentation endpoints
// This allows public access to API docs even when authentication is enabled
if strings.HasPrefix(c.Request.URL.Path, "/swagger/") {
c.Next()
return
}
headerApiKey := c.GetHeader(APIKeyHeader)
if headerApiKey == "" {

View File

@@ -55,6 +55,17 @@ func NewChatHandler(r *gin.Engine, registry *core.PluginRegistry, db *fsdb.Db) *
return handler
}
// HandleChat godoc
// @Summary Stream chat completions
// @Description Stream AI responses using Server-Sent Events (SSE)
// @Tags chat
// @Accept json
// @Produce text/event-stream
// @Param request body ChatRequest true "Chat request with prompts and options"
// @Success 200 {object} StreamResponse "Streaming response"
// @Failure 400 {object} map[string]string
// @Security ApiKeyAuth
// @Router /chat [post]
func (h *ChatHandler) HandleChat(c *gin.Context) {
var request ChatRequest

View File

@@ -17,6 +17,15 @@ func NewModelsHandler(r *gin.Engine, vendorManager *ai.VendorsManager) {
r.GET("/models/names", handler.GetModelNames)
}
// GetModelNames godoc
// @Summary List all available models
// @Description Get a list of all available AI models grouped by vendor
// @Tags models
// @Produce json
// @Success 200 {object} map[string]interface{} "Returns models (array) and vendors (map)"
// @Failure 500 {object} map[string]string
// @Security ApiKeyAuth
// @Router /models/names [get]
func (h *ModelsHandler) GetModelNames(c *gin.Context) {
vendorsModels, err := h.vendorManager.GetModels()
if err != nil {
@@ -24,7 +33,7 @@ func (h *ModelsHandler) GetModelNames(c *gin.Context) {
return
}
response := make(map[string]interface{})
response := make(map[string]any)
vendors := make(map[string][]string)
for _, groupItems := range vendorsModels.GroupsItems {

View File

@@ -102,7 +102,7 @@ func ServeOllama(registry *core.PluginRegistry, address string, version string)
// Ollama Endpoints
r.GET("/api/tags", typeConversion.ollamaTags)
r.GET("/api/version", func(c *gin.Context) {
c.Data(200, "application/json", []byte(fmt.Sprintf("{\"%s\"}", version)))
c.Data(200, "application/json", fmt.Appendf(nil, "{\"%s\"}", version))
})
r.POST("/api/chat", typeConversion.ollamaChat)
@@ -224,7 +224,7 @@ func (f APIConvert) ollamaChat(c *gin.Context) {
c.JSON(http.StatusInternalServerError, gin.H{"error": "testing endpoint"})
return
}
for _, word := range strings.Split(fabricResponse.Content, " ") {
for word := range strings.SplitSeq(fabricResponse.Content, " ") {
forwardedResponse = OllamaResponse{
Model: "",
CreatedAt: "",

View File

@@ -1,6 +1,7 @@
package restapi
import (
"maps"
"net/http"
"github.com/danielmiessler/fabric/internal/plugins/db/fsdb"
@@ -32,6 +33,16 @@ func NewPatternsHandler(r *gin.Engine, patterns *fsdb.PatternsEntity) (ret *Patt
}
// Get handles the GET /patterns/:name route - returns raw pattern without variable processing
// @Summary Get a pattern
// @Description Retrieve a pattern by name
// @Tags patterns
// @Accept json
// @Produce json
// @Param name path string true "Pattern name"
// @Success 200 {object} fsdb.Pattern
// @Failure 500 {object} map[string]string
// @Security ApiKeyAuth
// @Router /patterns/{name} [get]
func (h *PatternsHandler) Get(c *gin.Context) {
name := c.Param("name")
@@ -58,6 +69,18 @@ type PatternApplyRequest struct {
}
// ApplyPattern handles the POST /patterns/:name/apply route
// @Summary Apply pattern with variables
// @Description Apply a pattern with variable substitution
// @Tags patterns
// @Accept json
// @Produce json
// @Param name path string true "Pattern name"
// @Param request body PatternApplyRequest true "Pattern application request"
// @Success 200 {object} fsdb.Pattern
// @Failure 400 {object} map[string]string
// @Failure 500 {object} map[string]string
// @Security ApiKeyAuth
// @Router /patterns/{name}/apply [post]
func (h *PatternsHandler) ApplyPattern(c *gin.Context) {
name := c.Param("name")
@@ -74,9 +97,7 @@ func (h *PatternsHandler) ApplyPattern(c *gin.Context) {
variables[key] = values[0]
}
}
for key, value := range request.Variables {
variables[key] = value
}
maps.Copy(variables, request.Variables)
pattern, err := h.patterns.GetApplyVariables(name, variables, request.Input)
if err != nil {

View File

@@ -2,11 +2,30 @@ package restapi
import (
"log/slog"
"net/http"
"os"
"path/filepath"
"github.com/danielmiessler/fabric/internal/core"
"github.com/gin-gonic/gin"
swaggerFiles "github.com/swaggo/files"
ginSwagger "github.com/swaggo/gin-swagger"
_ "github.com/danielmiessler/fabric/docs" // swagger docs
)
// @title Fabric REST API
// @version 1.0
// @description REST API for Fabric AI augmentation framework. Provides endpoints for chat completions, pattern management, contexts, sessions, and more.
// @contact.name Fabric Support
// @contact.url https://github.com/danielmiessler/fabric
// @license.name MIT
// @license.url https://opensource.org/licenses/MIT
// @host localhost:8080
// @BasePath /
// @securityDefinitions.apikey ApiKeyAuth
// @in header
// @name X-API-Key
func Serve(registry *core.PluginRegistry, address string, apiKey string) (err error) {
r := gin.New()
@@ -20,6 +39,32 @@ func Serve(registry *core.PluginRegistry, address string, apiKey string) (err er
slog.Warn("Starting REST API server without API key authentication. This may pose security risks.")
}
// Swagger UI and documentation endpoint with custom YAML handler
r.GET("/swagger/*any", func(c *gin.Context) {
// Check if request is for swagger.yaml
if c.Param("any") == "/swagger.yaml" {
// Try to find swagger.yaml relative to current directory or executable
yamlPath := "docs/swagger.yaml"
if _, err := os.Stat(yamlPath); os.IsNotExist(err) {
// Try relative to executable
if exePath, err := os.Executable(); err == nil {
yamlPath = filepath.Join(filepath.Dir(exePath), "docs", "swagger.yaml")
}
}
if _, err := os.Stat(yamlPath); err != nil {
c.JSON(http.StatusNotFound, gin.H{"error": "swagger.yaml not found - generate it with: swag init -g internal/server/serve.go -o docs"})
return
}
c.File(yamlPath)
return
}
// For all other swagger paths, use the default handler
ginSwagger.WrapHandler(swaggerFiles.Handler)(c)
})
// Register routes
fabricDb := registry.Db
NewPatternsHandler(r, fabricDb.Patterns)

View File

@@ -12,15 +12,19 @@ type YouTubeHandler struct {
yt *youtube.YouTube
}
// YouTubeRequest represents a request to get a YouTube video transcript
type YouTubeRequest struct {
URL string `json:"url"`
Language string `json:"language"`
Timestamps bool `json:"timestamps"`
URL string `json:"url" binding:"required" example:"https://www.youtube.com/watch?v=dQw4w9WgXcQ"` // YouTube video URL (required)
Language string `json:"language,omitempty" example:"en"` // Language code for transcript (default: "en")
Timestamps bool `json:"timestamps,omitempty" example:"false"` // Include timestamps in the transcript (default: false)
}
// YouTubeResponse represents the response containing video transcript and metadata
type YouTubeResponse struct {
Transcript string `json:"transcript"`
Title string `json:"title"`
Transcript string `json:"transcript" example:"This is the video transcript..."` // The video transcript text
VideoId string `json:"videoId" example:"dQw4w9WgXcQ"` // YouTube video ID
Title string `json:"title" example:"Example Video Title"` // Video title from YouTube metadata
Description string `json:"description" example:"This is the video description from YouTube..."` // Video description from YouTube metadata
}
func NewYouTubeHandler(r *gin.Engine, registry *core.PluginRegistry) *YouTubeHandler {
@@ -29,6 +33,18 @@ func NewYouTubeHandler(r *gin.Engine, registry *core.PluginRegistry) *YouTubeHan
return handler
}
// Transcript godoc
// @Summary Get YouTube video transcript
// @Description Retrieves the transcript of a YouTube video along with video metadata (title and description)
// @Tags youtube
// @Accept json
// @Produce json
// @Param request body YouTubeRequest true "YouTube transcript request with URL, language, and timestamp options"
// @Success 200 {object} YouTubeResponse "Successful response with transcript and metadata"
// @Failure 400 {object} map[string]string "Bad request - invalid URL or playlist URL provided"
// @Failure 500 {object} map[string]string "Internal server error - failed to retrieve transcript or metadata"
// @Security ApiKeyAuth
// @Router /youtube/transcript [post]
func (h *YouTubeHandler) Transcript(c *gin.Context) {
var req YouTubeRequest
if err := c.BindJSON(&req); err != nil {
@@ -55,6 +71,20 @@ func (h *YouTubeHandler) Transcript(c *gin.Context) {
return
}
// Try to get metadata (requires valid YouTube API key), but don't fail if unavailable
// This allows the endpoint to work for transcript extraction even without API key
var metadata *youtube.VideoMetadata
var title, description string
if metadata, err = h.yt.GrabMetadata(videoID); err == nil {
// Metadata available - use title and description from API
title = metadata.Title
description = metadata.Description
} else {
// No valid API key or metadata fetch failed - fallback to videoID as title
title = videoID
description = ""
}
var transcript string
if req.Timestamps {
transcript, err = h.yt.GrabTranscriptWithTimestamps(videoID, language)
@@ -66,5 +96,5 @@ func (h *YouTubeHandler) Transcript(c *gin.Context) {
return
}
c.JSON(http.StatusOK, YouTubeResponse{Transcript: transcript, Title: videoID})
c.JSON(http.StatusOK, YouTubeResponse{Transcript: transcript, VideoId: videoID, Title: title, Description: description})
}

View File

@@ -133,7 +133,7 @@ func (o *GroupsItemsSelector[I]) Print(shellCompleteList bool) {
func (o *GroupsItemsSelector[I]) HasGroup(group string) (ret bool) {
for _, groupItems := range o.GroupsItems {
if ret = groupItems.Group == group; ret {
if ret = strings.EqualFold(groupItems.Group, group); ret {
break
}
}
@@ -146,7 +146,7 @@ func (o *GroupsItemsSelector[I]) FindGroupsByItemFirst(item I) (ret string) {
for _, groupItems := range o.GroupsItems {
if groupItems.ContainsItemBy(func(groupItem I) bool {
groupItemKey := o.GetItemKey(groupItem)
return groupItemKey == itemKey
return strings.EqualFold(groupItemKey, itemKey)
}) {
ret = groupItems.Group
break
@@ -161,7 +161,7 @@ func (o *GroupsItemsSelector[I]) FindGroupsByItem(item I) (groups []string) {
for _, groupItems := range o.GroupsItems {
if groupItems.ContainsItemBy(func(groupItem I) bool {
groupItemKey := o.GetItemKey(groupItem)
return groupItemKey == itemKey
return strings.EqualFold(groupItemKey, itemKey)
}) {
groups = append(groups, groupItems.Group)
}

View File

@@ -22,6 +22,9 @@ schema = 3
[mod."github.com/Azure/azure-sdk-for-go/sdk/internal"]
version = "v1.11.2"
hash = "sha256-O4Vo6D/fus3Qhs/Te644+jh2LfiG5PpiMkW0YWIbLCs="
[mod."github.com/KyleBanks/depth"]
version = "v1.2.1"
hash = "sha256-czR52MfeKA2FdStXCebTMQRKT8jaWQcbV214O3j49qU="
[mod."github.com/Microsoft/go-winio"]
version = "v0.6.2"
hash = "sha256-tVNWDUMILZbJvarcl/E7tpSnkn7urqgSHa2Eaka5vSU="
@@ -32,8 +35,8 @@ schema = 3
version = "v1.3.3"
hash = "sha256-jv7ZshpSd7FZzKKN6hqlUgiR8C3y85zNIS/hq7g76Ho="
[mod."github.com/anthropics/anthropic-sdk-go"]
version = "v1.16.0"
hash = "sha256-hD6Ix+V5IBFfoaCuAZemrDQx/+G111fCYHn2FAxFuEE="
version = "v1.19.0"
hash = "sha256-ubYeau5XL0tx4c/79L58rzJGOdOWs9z6WQOtN6mpgxw="
[mod."github.com/araddon/dateparse"]
version = "v0.0.0-20210429162001-6b43995a97de"
hash = "sha256-UuX84naeRGMsFOgIgRoBHG5sNy1CzBkWPKmd6VbLwFw="
@@ -88,18 +91,21 @@ schema = 3
[mod."github.com/aws/smithy-go"]
version = "v1.23.0"
hash = "sha256-75k+gn1lbQB1TzjV3HeEJeuyPPfX2huKhONXo98SUKg="
[mod."github.com/bytedance/gopkg"]
version = "v0.1.3"
hash = "sha256-GyUbPfn41y/mgj0cQOa4tm+aj70C2K50VBZxZc/tcZE="
[mod."github.com/bytedance/sonic"]
version = "v1.13.3"
hash = "sha256-Nnt5b2NkIvSXhGERQmyI0ka28hbWi7A7Zn3dsAjPcEA="
version = "v1.14.2"
hash = "sha256-S6EGwzt3TaTUjU1SdtorTdAq3xwROzSGZT6ynfLNq8o="
[mod."github.com/bytedance/sonic/loader"]
version = "v0.2.4"
hash = "sha256-rv9LnePpm4OspSVbfSoVbohXzhu+dxE1BH1gm3mTmTc="
version = "v0.4.0"
hash = "sha256-Hc2bB9nLEFhyipIVHKnJmi6WMoWPCe0REK3bmQThO4A="
[mod."github.com/cloudflare/circl"]
version = "v1.6.1"
hash = "sha256-Dc69V12eIFnJoUNmwg6VKXHfAMijbAeEVSDe8AiOaLo="
[mod."github.com/cloudwego/base64x"]
version = "v0.1.5"
hash = "sha256-MyUYTveN48DhnL8mwAgCRuMExLct98uzSPsmYlfaa4I="
version = "v0.1.6"
hash = "sha256-VzYJsGubsDk3FAMH6e0Xk0Cl4HtUFHHmWkVASUYidsI="
[mod."github.com/coder/websocket"]
version = "v1.8.13"
hash = "sha256-NbF0aPhy8YR3jRM6LMMQTtkeGTFba0eIBPAUsqI9KOk="
@@ -116,14 +122,14 @@ schema = 3
version = "v1.0.4"
hash = "sha256-c1JKoRSndwwOyOxq9ddCe+8qn7mG9uRq2o/822x5O/c="
[mod."github.com/gabriel-vasile/mimetype"]
version = "v1.4.9"
hash = "sha256-75uELLqb01djHTe7KdXvUidBK7SuejarYouEUuxaj8Q="
version = "v1.4.12"
hash = "sha256-vY2g58yUrkT//8fttRKhS9rbg89YSae/BzOARS5uH30="
[mod."github.com/gin-contrib/sse"]
version = "v1.1.0"
hash = "sha256-2VP6zHEsPi0u2ZYpOTcLulwj1Gsmb6oA19qcP2/AzVM="
[mod."github.com/gin-gonic/gin"]
version = "v1.10.1"
hash = "sha256-D98+chAdjb6JcLPkscOr8TgTW87UqA4h3cnY0XIr16c="
version = "v1.11.0"
hash = "sha256-tFP0u7TZyB1V8b1mdnTWMrfdcwzf9yO86RZr026K8Ao="
[mod."github.com/go-git/gcfg"]
version = "v1.5.1-0.20230307220236-3a3c6141e376"
hash = "sha256-f4k0gSYuo0/q3WOoTxl2eFaj7WZpdz29ih6CKc8Ude8="
@@ -139,6 +145,36 @@ schema = 3
[mod."github.com/go-logr/stdr"]
version = "v1.2.2"
hash = "sha256-rRweAP7XIb4egtT1f2gkz4sYOu7LDHmcJ5iNsJUd0sE="
[mod."github.com/go-openapi/jsonpointer"]
version = "v0.22.4"
hash = "sha256-V76k+oQUyqkWcpa64bOirteoCfdn1Xm9TLH0/7W4Uxc="
[mod."github.com/go-openapi/jsonreference"]
version = "v0.21.4"
hash = "sha256-R+Q/MOSRTMiOaZutRmjv/qwbU9/cUWN59Hwr2LgDz2U="
[mod."github.com/go-openapi/spec"]
version = "v0.22.2"
hash = "sha256-o9A6ZVaVPUVP2ssO0I4CDTuGyRuhL/VCM28jU0l32ps="
[mod."github.com/go-openapi/swag/conv"]
version = "v0.25.4"
hash = "sha256-uHgTdZC76LMSsq+x+RyeclnOgBsS0ID2cLgfjzFk0KQ="
[mod."github.com/go-openapi/swag/jsonname"]
version = "v0.25.4"
hash = "sha256-Z6uETeudh8W+/SGxFBnOB/VlJeRPRkFgCyJmntpz7bc="
[mod."github.com/go-openapi/swag/jsonutils"]
version = "v0.25.4"
hash = "sha256-TQGeMImUuL5BWDoBHKAjZ1BvvxPXkFrV3TMgb82IrLk="
[mod."github.com/go-openapi/swag/loading"]
version = "v0.25.4"
hash = "sha256-xr8OnFqB/kwBj3yf9HiLJwDcpL7I3/qJYlKb6VWbVFA="
[mod."github.com/go-openapi/swag/stringutils"]
version = "v0.25.4"
hash = "sha256-rVo5NBH+oLLX9kaemUYWILd+TKw/TDTB8UdSz+mn3m8="
[mod."github.com/go-openapi/swag/typeutils"]
version = "v0.25.4"
hash = "sha256-59RHnK6ugsAUc+A8DZCj7gZ3bJnt01Al7T/1kzM9PpA="
[mod."github.com/go-openapi/swag/yamlutils"]
version = "v0.25.4"
hash = "sha256-zba7QX7Ds05oZq1opP/vwRSBEDaJ1pKm+9DgrsOuR9w="
[mod."github.com/go-playground/locales"]
version = "v0.14.1"
hash = "sha256-BMJGAexq96waZn60DJXZfByRHb8zA/JP/i6f/YrW9oQ="
@@ -146,8 +182,8 @@ schema = 3
version = "v0.18.1"
hash = "sha256-2/B2qP51zfiY+k8G0w0D03KXUc7XpWj6wKY7NjNP/9E="
[mod."github.com/go-playground/validator/v10"]
version = "v10.26.0"
hash = "sha256-/jMKICp8LTcJVt+b4YRTnJM84r7HK6aT0oqO7Q8SRs8="
version = "v10.29.0"
hash = "sha256-HFiWBwxNlzoyMlRwA92rAiEGjBy9Dz/ZUKSRp8pXB2Y="
[mod."github.com/go-shiori/dom"]
version = "v0.0.0-20230515143342-73569d674e1c"
hash = "sha256-4lm9KZfR2XnfZU9KTG+4jqLYZqbfL74AMO4y3dKpIbg="
@@ -157,6 +193,9 @@ schema = 3
[mod."github.com/goccy/go-json"]
version = "v0.10.5"
hash = "sha256-/EtlGihP0/7oInzMC5E0InZ4b5Ad3s4xOpqotloi3xw="
[mod."github.com/goccy/go-yaml"]
version = "v1.19.1"
hash = "sha256-zlT9y4wkvyv7RBv48jdJqOM2lrDMLrC6Z6hCl90c5hQ="
[mod."github.com/gogs/chardet"]
version = "v0.0.0-20211120154057-b7413eaefb8f"
hash = "sha256-4MeqBJsh4U+ZEbfdDwdciTYMlQWkCil2KJbUxHjBSIo="
@@ -212,8 +251,8 @@ schema = 3
version = "v1.2.0"
hash = "sha256-Ta7ZOmyX8gG5tzWbY2oES70EJPfI90U7CIJS9EAce0s="
[mod."github.com/klauspost/cpuid/v2"]
version = "v2.2.10"
hash = "sha256-o21Tk5sD7WhhLUoqSkymnjLbzxl0mDJCTC1ApfZJrC0="
version = "v2.3.0"
hash = "sha256-50JhbQyT67BK38HIdJihPtjV7orYp96HknI2VP7A9Yc="
[mod."github.com/leodido/go-urn"]
version = "v1.4.0"
hash = "sha256-Q6kplWkY37Tzy6GOme3Wut40jFK4Izun+ij/BJvcEu0="
@@ -256,6 +295,12 @@ schema = 3
[mod."github.com/pmezard/go-difflib"]
version = "v1.0.1-0.20181226105442-5d4384ee4fb2"
hash = "sha256-XA4Oj1gdmdV/F/+8kMI+DBxKPthZ768hbKsO3d9Gx90="
[mod."github.com/quic-go/qpack"]
version = "v0.6.0"
hash = "sha256-xaxHnTKIZt1cHK5ZqTuSTOt5RNSjQB37GlrIgEGBskM="
[mod."github.com/quic-go/quic-go"]
version = "v0.57.1"
hash = "sha256-MdXc0GRVp3YuN9XFoGFOjgIcIMp7yoLqwfEikZp1i4w="
[mod."github.com/samber/lo"]
version = "v1.50.0"
hash = "sha256-KDFks82BKu39sGt0f972IyOkohV2U0r1YvsnlNLdugY="
@@ -277,6 +322,15 @@ schema = 3
[mod."github.com/stretchr/testify"]
version = "v1.11.1"
hash = "sha256-sWfjkuKJyDllDEtnM8sb/pdLzPQmUYWYtmeWz/5suUc="
[mod."github.com/swaggo/files"]
version = "v1.0.1"
hash = "sha256-bNBmpJaM7g1BNwd7VxNIRSdY35NKSXhYHGfnZsSEUZ8="
[mod."github.com/swaggo/gin-swagger"]
version = "v1.6.1"
hash = "sha256-9TQuEmPpDO2QL0c9VwjK9uwB1jka08Ox6fje2RwVpUc="
[mod."github.com/swaggo/swag"]
version = "v1.16.6"
hash = "sha256-na5+ZylxPFKoMJcgx8nmSJXMMkKFCyuh1qSkb7Qth/I="
[mod."github.com/tidwall/gjson"]
version = "v1.18.0"
hash = "sha256-CO6hqDu8Y58Po6A01e5iTpwiUBQ5khUZsw7czaJHw0I="
@@ -293,8 +347,8 @@ schema = 3
version = "v0.15.1"
hash = "sha256-HLk6oUe7EoITrNvP0y8D6BtIgIcmDZYtb/xl/dufIoY="
[mod."github.com/ugorji/go/codec"]
version = "v1.2.14"
hash = "sha256-PoVXlCBE8SvMWpXx9FRsQOSAmE/+5SnPGr4m5BGoyIo="
version = "v1.3.1"
hash = "sha256-VQtXVaKxXjm5Q60hCgVKZxNywl6SJFPqju6JNjADp4w="
[mod."github.com/xanzy/ssh-agent"]
version = "v0.3.3"
hash = "sha256-l3pGB6IdzcPA/HLk93sSN6NM2pKPy+bVOoacR5RC2+c="
@@ -313,30 +367,42 @@ schema = 3
[mod."go.opentelemetry.io/otel/trace"]
version = "v1.36.0"
hash = "sha256-owWD9x1lp8aIJqYt058BXPUsIMHdk3RI0escso0BxwA="
[mod."go.uber.org/mock"]
version = "v0.6.0"
hash = "sha256-m11cxIbrvOowa6xj11AztzfFk86DwR6SNO1lStcKzvo="
[mod."go.yaml.in/yaml/v3"]
version = "v3.0.4"
hash = "sha256-NkGFiDPoCxbr3LFsI6OCygjjkY0rdmg5ggvVVwpyDQ4="
[mod."golang.org/x/arch"]
version = "v0.18.0"
hash = "sha256-tUpUPERjmRi7zldj0oPlnbnBhEkcI9iQGvP1HqlsK10="
version = "v0.23.0"
hash = "sha256-ynvhsw8ZYgKTzFn1wnIj3/p+1O/Ty5PX1Lnj+NMTAQQ="
[mod."golang.org/x/crypto"]
version = "v0.41.0"
hash = "sha256-o5Di0lsFmYnXl7a5MBTqmN9vXMCRpE9ay71C1Ar8jEY="
version = "v0.46.0"
hash = "sha256-I8N/spcw3/h0DFA+V1WK38HctckWIB9ep93DEVCALxU="
[mod."golang.org/x/exp"]
version = "v0.0.0-20250531010427-b6e5de432a8b"
hash = "sha256-QaFfjyB+pogCkUkJskR9xnXwkCOU828XJRrzwwLm6Ms="
[mod."golang.org/x/mod"]
version = "v0.31.0"
hash = "sha256-ZVNmaZADgM3+30q9rW8q4gP6ySkT7r1eb4vrHIlpCjM="
[mod."golang.org/x/net"]
version = "v0.43.0"
hash = "sha256-bf3iQFrsC8BoarVaS0uSspEFAcr1zHp1uziTtBpwV34="
version = "v0.48.0"
hash = "sha256-oZpddsiJwWCH3Aipa+XXpy7G/xHY5fEagUSok7T0bXE="
[mod."golang.org/x/oauth2"]
version = "v0.30.0"
hash = "sha256-btD7BUtQpOswusZY5qIU90uDo38buVrQ0tmmQ8qNHDg="
[mod."golang.org/x/sync"]
version = "v0.16.0"
hash = "sha256-sqKDRESeMzLe0jWGWltLZL/JIgrn0XaIeBWCzVN3Bks="
version = "v0.19.0"
hash = "sha256-RbRZ+sKZUurOczGhhzOoY/sojTlta3H9XjL4PXX/cno="
[mod."golang.org/x/sys"]
version = "v0.35.0"
hash = "sha256-ZKM8pesQE6NAFZeKQ84oPn5JMhGr8g4TSwLYAsHMGSI="
version = "v0.39.0"
hash = "sha256-dxTBu/JAWUkPbjFIXXRFdhQWyn+YyEpIC+tWqGo0Y6U="
[mod."golang.org/x/text"]
version = "v0.28.0"
hash = "sha256-8UlJniGK+km4Hmrw6XMxELnExgrih7+z8tU26Cntmto="
version = "v0.32.0"
hash = "sha256-9PXtWBKKY9rG4AgjSP4N+I1DhepXhy8SF/vWSIDIoWs="
[mod."golang.org/x/tools"]
version = "v0.40.0"
hash = "sha256-ksmhTnH9btXKiRbbE0KGh02nbeNqNBQKcfwvx9dE7t0="
[mod."google.golang.org/api"]
version = "v0.247.0"
hash = "sha256-UzTtydHmNqh1OXbxcN5qNKQxb5dV6h2Mo6DH4P219Ec="
@@ -353,8 +419,8 @@ schema = 3
version = "v1.74.2"
hash = "sha256-tvYMdfu/ZQZRPZNmnQI4CZpg46CM8+mD49hw0gFheGs="
[mod."google.golang.org/protobuf"]
version = "v1.36.7"
hash = "sha256-6xCU+t2AVPcscMKenVs4etGqutYGPDXCQ3DCD3PpTq4="
version = "v1.36.11"
hash = "sha256-7W+6jntfI/awWL3JP6yQedxqP5S9o3XvPgJ2XxxsIeE="
[mod."gopkg.in/warnings.v0"]
version = "v0.1.2"
hash = "sha256-ATVL9yEmgYbkJ1DkltDGRn/auGAjqGOfjQyBYyUo8s8="

View File

@@ -1 +1 @@
"1.4.331"
"1.4.354"

View File

@@ -18,7 +18,7 @@ FROM alpine:latest
LABEL org.opencontainers.image.description="A Docker image for running the Fabric CLI. See https://github.com/danielmiessler/Fabric/tree/main/scripts/docker for details."
RUN apk add --no-cache ca-certificates \
RUN apk add --no-cache ca-certificates yt-dlp \
&& mkdir -p /root/.config/fabric
COPY --from=builder /fabric /usr/local/bin/fabric

View File

@@ -3,6 +3,7 @@
"""Extracts pattern information from the ~/.config/fabric/patterns directory,
creates JSON files for pattern extracts and descriptions, and updates web static files.
"""
import os
import json
import shutil
@@ -33,7 +34,13 @@ def get_pattern_extract(pattern_path):
def extract_pattern_info():
"""Extract pattern information from the patterns directory"""
script_dir = os.path.dirname(os.path.abspath(__file__))
patterns_dir = os.path.expanduser("~/.config/fabric/patterns")
local_patterns_dir = os.path.join(script_dir, "..", "..", "data", "patterns")
if os.path.exists(local_patterns_dir):
patterns_dir = local_patterns_dir
else:
patterns_dir = os.path.expanduser("~/.config/fabric/patterns")
print(f"\nScanning patterns directory: {patterns_dir}")
extracts_path = os.path.join(script_dir, "pattern_extracts.json")

View File

@@ -1924,6 +1924,14 @@
"tags": [
"VISUALIZE"
]
},
{
"patternName": "concall_summary",
"description": "Extract strategic insights from earnings transcripts for investors.",
"tags": [
"SUMMARIZE",
"BUSINESS"
]
}
]
}

View File

@@ -931,6 +931,10 @@
{
"patternName": "create_conceptmap",
"pattern_extract": "--- ### IDENTITY AND PURPOSE You are an intelligent assistant specialized in **knowledge visualization and educational data structuring**. You are capable of reading unstructured textual content (.txt or .md files), extracting **main concepts, subthemes, and logical relationships**, and transforming them into a **fully interactive conceptual map** built in **HTML using Vis.js (vis-network)**. You understand hierarchical, causal, and correlative relations between ideas and express them through **nodes and directed edges**. You ensure that the resulting HTML file is **autonomous, interactive, and visually consistent** with the Vis.js framework. You are precise, systematic, and maintain semantic coherence between concepts and their relationships. You automatically name the output file according to the **detected topic**, ensuring compatibility and clarity (e.g., `map_hist_china.html`). --- ### TASK You are given a `.txt` or `.md` file containing explanatory, conceptual, or thematic content. Your task is to: 1. **Extract** the main concepts and secondary ideas. 2. **Identify logical or hierarchical relationships** among these concepts using concise action verbs. 3. **Structure the output** as a self-contained, interactive HTML document that visually represents these relationships using the **Vis.js (vis-network)** library. The goal is to generate a **fully functional conceptual map** that can be opened directly in a browser without external dependencies. --- ### ACTIONS 1. **Analyze and Extract Concepts** - Read and process the uploaded `.txt` or `.md` file. - Identify main themes, subthemes, and key terms. - Convert each key concept into a node. 2. **Map Relationships** - Detect logical and hierarchical relations between concepts. - Use short, descriptive verbs such as: \"causes\", \"contributes to\", \"depends on\", \"evolves into\", \"results in\", \"influences\", \"generates\" / \"creates\", \"culminates in. 3. **Generate Node Structure** ```json {\"id\": \"conceito_id\", \"label\": \"Conceito\", \"title\": \"<b>Concept:</b> Conceito<br><i>Drag to position, double-click to release.</i>\"} ``` 4. **Generate Edge Structure** ```json {\"from\": \"conceito_origem\", \"to\": \"conceito_destino\", \"label\": \"verbo\", \"title\": \"<b>Relationship:</b> verbo\"} ``` 5. **Apply Visual and Physical Configuration** ```js shape: \"dot\", color: { border: \"#4285F4\", background: \"#ffffff\", highlight: { border: \"#34A853\", background: \"#e6f4ea\" } }, font: { size: 14, color: \"#3c4043\" }, borderWidth: 2, size: 20 // Edges color: { color: \"#dee2e6\", highlight: \"#34A853\" }, arrows: { to: { enabled: true, scaleFactor: 0.7 } }, font: { align: \"middle\", size: 12, color: \"#5f6368\" }, width: 2 // Physics physics: { solver: \"forceAtlas2Based\", forceAtlas2Based: { gravitationalConstant: -50, centralGravity: 0.005, springLength: 100, springConstant: 0.18 }, maxVelocity: 146, minVelocity: 0.1, stabilization: { iterations: 150 } } ``` 6. **Implement Interactivity** ```js // Fix node on drag end network.on(\"dragEnd\", (params) => { if (params.nodes.length > 0) { nodes.update({ id: params.nodes[0], fixed: true }); } }); // Release node on double click network.on(\"doubleClick\", (params) => { if (params.nodes.length > 0) { nodes.update({ id: params.nodes[0], fixed: false }); } }); ``` 7. **Assemble the Complete HTML Structure** ```html <head> <title>Mapa Conceitual — [TEMA DETECTADO DO ARQUIVO]</title> <script src=\"https://unpkg.com/vis-network/standalone/umd/vis-network.min.js\"></script> <link href=\"https://unpkg.com/vis-network/styles/vis-network.min.css\" rel=\"stylesheet\" /> </head> <body> <div id=\"map\"></div> <script type=\"text/javascript\"> // nodes, edges, options, and interactive network initialization </script> </body> ``` 8. **Auto-name Output File** Automatically save the generated HTML file based on the detected topic: ``` mapa_[tema_detectado].html ``` --- ###"
},
{
"patternName": "concall_summary",
"pattern_extract": "# IDENTITY and PURPOSE You are an equity research analyst specializing in earnings and conference call analysis. Your role involves carefully examining transcripts to extract actionable insights that can inform investment decisions. You need to focus on several key areas, including management commentary, analyst questions, financial and operational insights, risks and red flags, hidden signals, and an executive summary. Your task is to distill complex information into clear, concise bullet points, capturing strategic themes, growth drivers, and potential concerns. It is crucial to interpret the tone, identify contradictions, and highlight any subtle cues that may indicate future strategic shifts or risks. Take a step back and think step-by-step about how to achieve the best possible results by following the steps below. # STEPS * Analyze the transcript to extract management commentary, focusing on strategic themes, growth drivers, margin commentary, guidance, tone analysis, and any contradictions or vague areas. * Extract a summary of the content in exactly **25 words**, including who is presenting and the content being discussed; place this under a **SUMMARY** section. * For each analyst's question, determine the underlying concern, summarize managements exact answer, evaluate if the answers address the question fully, and identify anything the management avoided or deflected. * Gather financial and operational insights, including commentary on demand, pricing, capacity, market share, cost inflation, raw material trends, and supply-chain issues. * Identify risks and red flags by noting any negative commentary, early warning signs, unusual wording, delayed responses, repeated disclaimers, and areas where management seemed less confident. * Detect hidden signals such as forward-looking hints, unasked but important questions, and subtle cues about strategy shifts or stress. * Create an executive summary in bullet points, listing the 10 most important takeaways, 3 surprises, and 3 things to track in the next quarter. # OUTPUT STRUCTURE * MANAGEMENT COMMENTARY * Key strategic themes * Growth drivers discussed * Margin commentary * Guidance (explicit + implicit) * Tone analysis (positive/neutral/negative) * Any contradictions or vague areas * ANALYST QUESTIONS (Q&A) * For each analyst (use bullets, one analyst per bullet-group): * Underlying concern (what the question REALLY asked) * Managements exact answer (concise) * Answer completeness (Yes/No — short explanation) * Items management avoided or deflected * FINANCIAL & OPERATIONAL INSIGHTS * Demand, pricing, capacity, market share commentary * Cost inflation, raw material trends, supply-chain issues * Segment-wise performance and commentary (if applicable) * RISKS & RED FLAGS * Negative commentary or early-warning signs * Unusual wording, delayed responses, repeated disclaimers * Areas where management was less confident * HIDDEN SIGNALS * Forward-looking hints and tone shifts * Important topics not asked by analysts but relevant * Subtle cues of strategy change, stress, or opportunity * EXECUTIVE SUMMARY * 10 most important takeaways (bullet points) * 3 surprises (bullet points) * 3 things to track next quarter (bullet points) * SUMMARY (exactly 25 words) * A single 25-word sentence summarizing who presented and what was discussed # OUTPUT INSTRUCTIONS * Only output Markdown. * Provide everything in"
}
]
}

View File

@@ -26,9 +26,9 @@
"eslint-plugin-svelte": "^2.46.1",
"lucide-svelte": "^0.309.0",
"mdsvex": "^0.11.2",
"patch-package": "^8.0.0",
"patch-package": "^8.0.1",
"pdf-to-markdown-core": "github:jzillmann/pdf-to-markdown#modularize",
"pdfjs-dist": "^4.2.67",
"pdfjs-dist": "^5.4.449",
"postcss": "^8.5.3",
"postcss-load-config": "^6.0.1",
"rehype-autolink-headings": "^7.1.0",

565
web/pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,239 +1,284 @@
import { get } from "svelte/store";
import type {
ChatRequest,
StreamResponse,
ChatError as IChatError,
ChatPrompt
} from '$lib/interfaces/chat-interface';
import { get } from 'svelte/store';
import { modelConfig } from '$lib/store/model-store';
import { systemPrompt, selectedPatternName, patternVariables } from '$lib/store/pattern-store';
import { chatConfig } from '$lib/store/chat-config';
import { messageStore } from '$lib/store/chat-store';
import { languageStore } from '$lib/store/language-store';
import { selectedStrategy } from '$lib/store/strategy-store';
ChatPrompt,
ChatRequest,
ChatError as IChatError,
StreamResponse,
} from "$lib/interfaces/chat-interface";
import { chatConfig } from "$lib/store/chat-config";
import { languageStore } from "$lib/store/language-store";
import { modelConfig } from "$lib/store/model-store";
import {
patternVariables,
selectedPatternName,
systemPrompt,
} from "$lib/store/pattern-store";
import { selectedStrategy } from "$lib/store/strategy-store";
class LanguageValidator {
constructor(private targetLanguage: string) {}
constructor(private targetLanguage: string) {}
enforceLanguage(content: string): string {
if (this.targetLanguage === 'en') return content;
return `[Language: ${this.targetLanguage}]\n${content}`;
}
enforceLanguage(content: string): string {
if (this.targetLanguage === "en") return content;
return `[Language: ${this.targetLanguage}]\n${content}`;
}
}
export class ChatError extends Error implements IChatError {
constructor(
message: string,
public readonly code: string = 'CHAT_ERROR',
public readonly details?: unknown
) {
super(message);
this.name = 'ChatError';
}
constructor(
message: string,
public readonly code: string = "CHAT_ERROR",
public readonly details?: unknown,
) {
super(message);
this.name = "ChatError";
}
}
export class ChatService {
private validator: LanguageValidator;
private validator: LanguageValidator;
constructor() {
this.validator = new LanguageValidator(get(languageStore));
}
constructor() {
this.validator = new LanguageValidator(get(languageStore));
}
private async fetchStream(request: ChatRequest): Promise<ReadableStream<StreamResponse>> {
try {
console.log('\n=== ChatService Request Start ===');
console.log('1. Request details:', {
language: get(languageStore),
pattern: get(selectedPatternName),
promptCount: request.prompts?.length,
messageCount: request.messages?.length
});
// NEW: Log the full payload before sending to backend
console.log('Final ChatRequest payload:', JSON.stringify(request, null, 2));
private async fetchStream(
request: ChatRequest,
): Promise<ReadableStream<StreamResponse>> {
try {
console.log("\n=== ChatService Request Start ===");
console.log("1. Request details:", {
language: get(languageStore),
pattern: get(selectedPatternName),
promptCount: request.prompts?.length,
messageCount: request.messages?.length,
});
// NEW: Log the full payload before sending to backend
console.log(
"Final ChatRequest payload:",
JSON.stringify(request, null, 2),
);
const response = await fetch('/api/chat', {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(request),
});
const response = await fetch("/api/chat", {
method: "POST",
headers: { "Content-Type": "application/json" },
body: JSON.stringify(request),
});
if (!response.ok) {
throw new ChatError(`HTTP error! status: ${response.status}`, 'HTTP_ERROR', { status: response.status });
}
if (!response.ok) {
throw new ChatError(
`HTTP error! status: ${response.status}`,
"HTTP_ERROR",
{ status: response.status },
);
}
const reader = response.body?.getReader();
if (!reader) {
throw new ChatError('Response body is null', 'NULL_RESPONSE');
}
const reader = response.body?.getReader();
if (!reader) {
throw new ChatError("Response body is null", "NULL_RESPONSE");
}
return this.createMessageStream(reader);
} catch (error) {
if (error instanceof ChatError) throw error;
throw new ChatError('Failed to fetch chat stream', 'FETCH_ERROR', error);
}
}
return this.createMessageStream(reader);
} catch (error) {
if (error instanceof ChatError) throw error;
throw new ChatError("Failed to fetch chat stream", "FETCH_ERROR", error);
}
}
private cleanPatternOutput(content: string): string {
// Remove markdown fence if present
let cleaned = content.replace(/^```markdown\n/, '');
cleaned = cleaned.replace(/\n```$/, '');
private cleanPatternOutput(content: string): string {
// Remove markdown fence if present
let cleaned = content.replace(/^```markdown\n/, "");
cleaned = cleaned.replace(/\n```$/, "");
// Existing cleaning
cleaned = cleaned.replace(/^# OUTPUT\s*\n/, '');
cleaned = cleaned.replace(/^\s*\n/, '');
cleaned = cleaned.replace(/\n\s*$/, '');
cleaned = cleaned.replace(/^#\s+([A-Z]+):/gm, '$1:');
cleaned = cleaned.replace(/^#\s+([A-Z]+)\s*$/gm, '$1');
cleaned = cleaned.trim();
cleaned = cleaned.replace(/\n{3,}/g, '\n\n');
return cleaned;
}
// Existing cleaning
cleaned = cleaned.replace(/^# OUTPUT\s*\n/, "");
cleaned = cleaned.replace(/^\s*\n/, "");
cleaned = cleaned.replace(/\n\s*$/, "");
cleaned = cleaned.replace(/^#\s+([A-Z]+):/gm, "$1:");
cleaned = cleaned.replace(/^#\s+([A-Z]+)\s*$/gm, "$1");
cleaned = cleaned.trim();
cleaned = cleaned.replace(/\n{3,}/g, "\n\n");
return cleaned;
}
private createMessageStream(reader: ReadableStreamDefaultReader<Uint8Array>): ReadableStream<StreamResponse> {
let buffer = '';
const cleanPatternOutput = this.cleanPatternOutput.bind(this);
const language = get(languageStore);
const validator = new LanguageValidator(language);
private createMessageStream(
reader: ReadableStreamDefaultReader<Uint8Array>,
): ReadableStream<StreamResponse> {
let buffer = "";
const cleanPatternOutput = this.cleanPatternOutput.bind(this);
const language = get(languageStore);
const validator = new LanguageValidator(language);
const processResponse = (response: StreamResponse) => {
const pattern = get(selectedPatternName);
const processResponse = (response: StreamResponse) => {
const pattern = get(selectedPatternName);
if (pattern) {
response.content = cleanPatternOutput(response.content);
// Simplified format determination - always markdown unless mermaid
const isMermaid = [
'graph TD', 'gantt', 'flowchart',
'sequenceDiagram', 'classDiagram', 'stateDiagram'
].some(starter => response.content.trim().startsWith(starter));
if (pattern) {
response.content = cleanPatternOutput(response.content);
// Simplified format determination - always markdown unless mermaid
const isMermaid = [
"graph TD",
"gantt",
"flowchart",
"sequenceDiagram",
"classDiagram",
"stateDiagram",
].some((starter) => response.content.trim().startsWith(starter));
response.format = isMermaid ? 'mermaid' : 'markdown';
}
response.format = isMermaid ? "mermaid" : "markdown";
}
if (response.type === 'content') {
response.content = validator.enforceLanguage(response.content);
}
if (response.type === "content") {
response.content = validator.enforceLanguage(response.content);
}
return response;
};
return new ReadableStream({
async start(controller) {
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
return response;
};
return new ReadableStream({
async start(controller) {
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
buffer += new TextDecoder().decode(value);
const messages = buffer.split('\n\n').filter(msg => msg.startsWith('data: '));
buffer += new TextDecoder().decode(value);
const messages = buffer
.split("\n\n")
.filter((msg) => msg.startsWith("data: "));
if (messages.length > 1) {
buffer = messages.pop() || '';
for (const msg of messages) {
try {
let response = JSON.parse(msg.slice(6)) as StreamResponse;
response = processResponse(response);
controller.enqueue(response);
} catch (parseError) {
console.error('Error parsing stream message:', parseError);
}
}
}
}
if (messages.length > 1) {
buffer = messages.pop() || "";
for (const msg of messages) {
try {
let response = JSON.parse(msg.slice(6)) as StreamResponse;
response = processResponse(response);
controller.enqueue(response);
} catch (parseError) {
console.error("Error parsing stream message:", parseError);
}
}
}
}
if (buffer.startsWith('data: ')) {
try {
let response = JSON.parse(buffer.slice(6)) as StreamResponse;
response = processResponse(response);
controller.enqueue(response);
} catch (parseError) {
console.error('Error parsing final message:', parseError);
}
}
} catch (error) {
controller.error(new ChatError('Stream processing error', 'STREAM_ERROR', error));
} finally {
reader.releaseLock();
controller.close();
}
},
cancel() {
reader.cancel();
}
});
}
if (buffer.startsWith("data: ")) {
try {
let response = JSON.parse(buffer.slice(6)) as StreamResponse;
response = processResponse(response);
controller.enqueue(response);
} catch (parseError) {
console.error("Error parsing final message:", parseError);
}
}
} catch (error) {
controller.error(
new ChatError("Stream processing error", "STREAM_ERROR", error),
);
} finally {
reader.releaseLock();
controller.close();
}
},
cancel() {
reader.cancel();
},
});
}
private createChatPrompt(userInput: string, systemPromptText?: string): ChatPrompt {
const config = get(modelConfig);
const language = get(languageStore);
private createChatPrompt(
userInput: string,
systemPromptText?: string,
): ChatPrompt {
const config = get(modelConfig);
const language = get(languageStore);
const languageInstruction = language !== 'en'
? `You MUST respond in ${language} language. All output must be in ${language}. `
// ? `You MUST respond in ${language} language. ALL output, including section headers, titles, and formatting, MUST be translated into ${language}. It is CRITICAL that you translate ALL headers, such as SUMMARY, IDEAS, QUOTES, TAKEAWAYS, MAIN POINTS, etc., into ${language}. Maintain markdown formatting in the response. Do not output any English headers.`
: '';
const languageInstruction =
language !== "en"
? `You MUST respond in ${language} language. All output must be in ${language}. `
: // ? `You MUST respond in ${language} language. ALL output, including section headers, titles, and formatting, MUST be translated into ${language}. It is CRITICAL that you translate ALL headers, such as SUMMARY, IDEAS, QUOTES, TAKEAWAYS, MAIN POINTS, etc., into ${language}. Maintain markdown formatting in the response. Do not output any English headers.`
"";
const finalSystemPrompt = languageInstruction + (systemPromptText ?? get(systemPrompt));
const finalSystemPrompt =
languageInstruction + (systemPromptText ?? get(systemPrompt));
const finalUserInput = language !== 'en'
? `${userInput}\n\nIMPORTANT: Respond in ${language} language only.`
: userInput;
const finalUserInput =
language !== "en"
? `${userInput}\n\nIMPORTANT: Respond in ${language} language only.`
: userInput;
return {
userInput: finalUserInput,
systemPrompt: finalSystemPrompt,
model: config.model,
patternName: get(selectedPatternName),
strategyName: get(selectedStrategy), // Add selected strategy to prompt
variables: get(patternVariables) // Add pattern variables
};
}
public async createChatRequest(userInput: string, systemPromptText?: string, isPattern: boolean = false): Promise<ChatRequest> {
const prompt = this.createChatPrompt(userInput, systemPromptText);
const config = get(chatConfig);
const language = get(languageStore);
return {
prompts: [prompt],
messages: [],
language: language, // Add language at the top level for backend compatibility
...config
};
}
public async streamPattern(userInput: string, systemPromptText?: string): Promise<ReadableStream<StreamResponse>> {
const request = await this.createChatRequest(userInput, systemPromptText, true);
return this.fetchStream(request);
}
public async streamChat(userInput: string, systemPromptText?: string): Promise<ReadableStream<StreamResponse>> {
const request = await this.createChatRequest(userInput, systemPromptText);
return this.fetchStream(request);
}
public async processStream(
stream: ReadableStream<StreamResponse>,
onContent: (content: string, response?: StreamResponse) => void,
onError: (error: Error) => void
): Promise<void> {
const reader = stream.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
if (value.type === 'error') {
throw new ChatError(value.content, 'STREAM_CONTENT_ERROR');
}
if (value.type === 'content') {
onContent(value.content, value);
}
}
} catch (error) {
onError(error instanceof ChatError ? error : new ChatError('Stream processing error', 'STREAM_ERROR', error));
} finally {
reader.releaseLock();
}
}
return {
userInput: finalUserInput,
systemPrompt: finalSystemPrompt,
model: config.model,
patternName: get(selectedPatternName),
strategyName: get(selectedStrategy), // Add selected strategy to prompt
variables: get(patternVariables), // Add pattern variables
};
}
public async createChatRequest(
userInput: string,
systemPromptText?: string,
isPattern: boolean = false,
): Promise<ChatRequest> {
const prompt = this.createChatPrompt(userInput, systemPromptText);
const config = get(chatConfig);
const language = get(languageStore);
return {
prompts: [prompt],
messages: [],
language: language, // Add language at the top level for backend compatibility
...config,
};
}
public async streamPattern(
userInput: string,
systemPromptText?: string,
): Promise<ReadableStream<StreamResponse>> {
const request = await this.createChatRequest(
userInput,
systemPromptText,
true,
);
return this.fetchStream(request);
}
public async streamChat(
userInput: string,
systemPromptText?: string,
): Promise<ReadableStream<StreamResponse>> {
const request = await this.createChatRequest(userInput, systemPromptText);
return this.fetchStream(request);
}
public async processStream(
stream: ReadableStream<StreamResponse>,
onContent: (content: string, response?: StreamResponse) => void,
onError: (error: Error) => void,
): Promise<void> {
const reader = stream.getReader();
try {
while (true) {
const { done, value } = await reader.read();
if (done) break;
if (value.type === "error") {
throw new ChatError(value.content, "STREAM_CONTENT_ERROR");
}
if (value.type === "content") {
onContent(value.content, value);
}
}
} catch (error) {
onError(
error instanceof ChatError
? error
: new ChatError("Stream processing error", "STREAM_ERROR", error),
);
} finally {
reader.releaseLock();
}
}
}

View File

@@ -1,78 +1,74 @@
import { createPipeline, transformers } from 'pdf-to-markdown-core/lib/src';
import { PARSE_SCHEMA } from 'pdf-to-markdown-core/lib/src/PdfParser';
import * as pdfjs from 'pdfjs-dist';
import pdfConfig from './pdf-config';
import { createPipeline, transformers } from "pdf-to-markdown-core/lib/src";
import { PARSE_SCHEMA } from "pdf-to-markdown-core/lib/src/PdfParser";
// pdfjs-dist v5+ requires browser APIs at import time, so we use dynamic imports
let pdfjs: typeof import("pdfjs-dist") | null = null;
export class PdfConversionService {
constructor() {
if (typeof window !== 'undefined') {
console.log('PDF.js version:', pdfjs.version);
// Initialize PDF.js configuration from the shared config
pdfConfig.initialize();
console.log('Worker configuration complete');
}
}
private async ensureInitialized(): Promise<typeof import("pdfjs-dist")> {
if (!pdfjs) {
// Dynamic import to avoid SSR issues with pdfjs-dist v5+
pdfjs = await import("pdfjs-dist");
const pdfConfig = (await import("./pdf-config")).default;
console.log("PDF.js version:", pdfjs.version);
await pdfConfig.initialize();
console.log("Worker configuration complete");
}
return pdfjs;
}
async convertToMarkdown(file: File): Promise<string> {
console.log('Starting PDF conversion:', {
fileName: file.name,
fileSize: file.size
});
async convertToMarkdown(file: File): Promise<string> {
console.log("Starting PDF conversion:", {
fileName: file.name,
fileSize: file.size,
});
const buffer = await file.arrayBuffer();
console.log('Buffer created:', buffer.byteLength);
const pdfjsLib = await this.ensureInitialized();
const pipeline = createPipeline(pdfjs, {
transformConfig: {
transformers
}
});
console.log('Pipeline created');
const buffer = await file.arrayBuffer();
console.log("Buffer created:", buffer.byteLength);
const result = await pipeline.parse(
buffer,
(progress) => console.log('Processing:', {
stage: progress.stages,
details: progress.stageDetails,
progress: progress.stageProgress
})
);
console.log('Parse complete, validating result');
const pipeline = createPipeline(pdfjsLib, {
transformConfig: {
transformers,
},
});
console.log("Pipeline created");
const transformed = result.transform();
console.log('Transform applied:', transformed);
const result = await pipeline.parse(buffer, (progress) =>
console.log("Processing:", {
stage: progress.stages,
details: progress.stageDetails,
progress: progress.stageProgress,
}),
);
console.log("Parse complete, validating result");
const markdown = transformed.convert({
convert: (items) => {
console.log('PDF Structure:', {
itemCount: items.length,
firstItem: items[0],
schema: PARSE_SCHEMA // ['transform', 'width', 'height', 'str', 'fontName', 'dir']
});
const text = items
.map(item => item.value('str')) // Using 'str' instead of 'text' based on PARSE_SCHEMA
.filter(Boolean)
.join('\n');
console.log('Converted text:', {
length: text.length,
preview: text.substring(0, 100)
});
return text;
}
});
const transformed = result.transform();
console.log("Transform applied:", transformed);
return markdown;
}
const markdown = transformed.convert({
convert: (items) => {
console.log("PDF Structure:", {
itemCount: items.length,
firstItem: items[0],
schema: PARSE_SCHEMA, // ['transform', 'width', 'height', 'str', 'fontName', 'dir']
});
const text = items
.map((item) => item.value("str")) // Using 'str' instead of 'text' based on PARSE_SCHEMA
.filter(Boolean)
.join("\n");
console.log("Converted text:", {
length: text.length,
preview: text.substring(0, 100),
});
return text;
},
});
return markdown;
}
}

View File

@@ -1,20 +1,19 @@
import { browser } from '$app/environment';
import { GlobalWorkerOptions } from 'pdfjs-dist';
import { browser } from "$app/environment";
// Set up the worker source location - point to static file in public directory
const workerSrc = '/pdf.worker.min.mjs';
// Configure the worker options only on the client side
if (browser) {
GlobalWorkerOptions.workerSrc = workerSrc;
}
// Export the configuration
// Export the configuration - accepts pdfjs module to avoid top-level import
// This is necessary because pdfjs-dist v5+ uses browser APIs at import time
export default {
initialize: () => {
if (browser) {
console.log('PDF.js worker initialized at', workerSrc);
}
}
};
initialize: async () => {
if (browser) {
// Dynamic import to avoid SSR issues
const pdfjs = await import("pdfjs-dist");
const { GlobalWorkerOptions, version } = pdfjs;
// Use CDN-hosted worker to avoid bundling third-party minified code in the repo
const workerSrc = `https://unpkg.com/pdfjs-dist@${version}/build/pdf.worker.min.mjs`;
GlobalWorkerOptions.workerSrc = workerSrc;
console.log(`PDF.js worker v${version} initialized from CDN`);
}
},
};

View File

@@ -1,19 +1,24 @@
import { writable, derived, get } from 'svelte/store';
import type { ChatState, Message, StreamResponse } from '$lib/interfaces/chat-interface';
import { ChatService, ChatError } from '$lib/services/ChatService';
import { languageStore } from '$lib/store/language-store';
import { selectedPatternName } from '$lib/store/pattern-store';
import { derived, get, writable } from "svelte/store";
import { browser } from "$app/environment";
import type {
ChatState,
Message,
StreamResponse,
} from "$lib/interfaces/chat-interface";
import { ChatError, ChatService } from "$lib/services/ChatService";
import { languageStore } from "$lib/store/language-store";
import { selectedPatternName } from "$lib/store/pattern-store";
// Initialize chat service
const chatService = new ChatService();
// Local storage key for persisting messages
const MESSAGES_STORAGE_KEY = 'chat_messages';
const MESSAGES_STORAGE_KEY = "chat_messages";
// Load initial messages from local storage
const initialMessages = typeof localStorage !== 'undefined'
? JSON.parse(localStorage.getItem(MESSAGES_STORAGE_KEY) || '[]')
: [];
// Load initial messages from local storage (only in browser)
const initialMessages = browser
? JSON.parse(localStorage.getItem(MESSAGES_STORAGE_KEY) || "[]")
: [];
// Separate stores for different concerns
export const messageStore = writable<Message[]>(initialMessages);
@@ -21,134 +26,144 @@ export const streamingStore = writable<boolean>(false);
export const errorStore = writable<string | null>(null);
export const currentSession = writable<string | null>(null);
// Subscribe to messageStore changes to persist messages
if (typeof localStorage !== 'undefined') {
messageStore.subscribe($messages => {
localStorage.setItem(MESSAGES_STORAGE_KEY, JSON.stringify($messages));
});
// Subscribe to messageStore changes to persist messages (only in browser)
if (browser) {
messageStore.subscribe(($messages) => {
localStorage.setItem(MESSAGES_STORAGE_KEY, JSON.stringify($messages));
});
}
// Derived store for chat state
export const chatState = derived(
[messageStore, streamingStore],
([$messages, $streaming]) => ({
messages: $messages,
isStreaming: $streaming
})
[messageStore, streamingStore],
([$messages, $streaming]) => ({
messages: $messages,
isStreaming: $streaming,
}),
);
// Error handling utility
function handleError(error: Error | string) {
const errorMessage = error instanceof ChatError
? `${error.code}: ${error.message}`
: error instanceof Error
? error.message
: error;
const errorMessage =
error instanceof ChatError
? `${error.code}: ${error.message}`
: error instanceof Error
? error.message
: error;
errorStore.set(errorMessage);
streamingStore.set(false);
return errorMessage;
errorStore.set(errorMessage);
streamingStore.set(false);
return errorMessage;
}
export const setSession = (sessionName: string | null) => {
currentSession.set(sessionName);
if (!sessionName) {
clearMessages();
}
currentSession.set(sessionName);
if (!sessionName) {
clearMessages();
}
};
export const clearMessages = () => {
messageStore.set([]);
errorStore.set(null);
if (typeof localStorage !== 'undefined') {
localStorage.removeItem(MESSAGES_STORAGE_KEY);
}
messageStore.set([]);
errorStore.set(null);
if (typeof localStorage !== "undefined") {
localStorage.removeItem(MESSAGES_STORAGE_KEY);
}
};
export const revertLastMessage = () => {
messageStore.update(messages => messages.slice(0, -1));
messageStore.update((messages) => messages.slice(0, -1));
};
export async function sendMessage(
content: string,
systemPromptText?: string,
isSystem: boolean = false,
) {
try {
console.log("\n=== Message Processing Start ===");
console.log("1. Initial state:", {
isSystem,
hasSystemPrompt: !!systemPromptText,
currentLanguage: get(languageStore),
pattern: get(selectedPatternName),
});
export async function sendMessage(content: string, systemPromptText?: string, isSystem: boolean = false) {
try {
console.log('\n=== Message Processing Start ===');
console.log('1. Initial state:', {
isSystem,
hasSystemPrompt: !!systemPromptText,
currentLanguage: get(languageStore),
pattern: get(selectedPatternName)
});
const $streaming = get(streamingStore);
if ($streaming) {
throw new ChatError(
"Message submission blocked - already streaming",
"STREAMING_BLOCKED",
);
}
const $streaming = get(streamingStore);
if ($streaming) {
throw new ChatError('Message submission blocked - already streaming', 'STREAMING_BLOCKED');
}
streamingStore.set(true);
errorStore.set(null);
streamingStore.set(true);
errorStore.set(null);
// Add message
messageStore.update((messages) => [
...messages,
{
role: isSystem ? "system" : "user",
content,
},
]);
// Add message
messageStore.update(messages => [...messages, {
role: isSystem ? 'system' : 'user',
content
}]);
console.log("2. Message added:", {
role: isSystem ? "system" : "user",
language: get(languageStore),
});
console.log('2. Message added:', {
role: isSystem ? 'system' : 'user',
language: get(languageStore)
});
if (!isSystem) {
console.log("3. Preparing chat stream:", {
language: get(languageStore),
pattern: get(selectedPatternName),
hasSystemPrompt: !!systemPromptText,
});
if (!isSystem) {
console.log('3. Preparing chat stream:', {
language: get(languageStore),
pattern: get(selectedPatternName),
hasSystemPrompt: !!systemPromptText
});
const stream = await chatService.streamChat(content, systemPromptText);
console.log("4. Stream created");
const stream = await chatService.streamChat(content, systemPromptText);
console.log('4. Stream created');
await chatService.processStream(
stream,
(content: string, response?: StreamResponse) => {
messageStore.update((messages) => {
const newMessages = [...messages];
const lastMessage = newMessages[newMessages.length - 1];
await chatService.processStream(
stream,
(content: string, response?: StreamResponse) => {
messageStore.update(messages => {
const newMessages = [...messages];
const lastMessage = newMessages[newMessages.length - 1];
if (lastMessage?.role === "assistant") {
lastMessage.content = content;
lastMessage.format = response?.format;
console.log("Message updated:", {
role: "assistant",
format: lastMessage.format,
});
} else {
newMessages.push({
role: "assistant",
content,
format: response?.format,
});
}
if (lastMessage?.role === 'assistant') {
lastMessage.content = content;
lastMessage.format = response?.format;
console.log('Message updated:', {
role: 'assistant',
format: lastMessage.format
});
} else {
newMessages.push({
role: 'assistant',
content,
format: response?.format
});
}
return newMessages;
});
},
(error) => {
handleError(error);
},
);
}
return newMessages;
});
},
(error) => {
handleError(error);
}
);
}
streamingStore.set(false);
} catch (error) {
if (error instanceof Error) {
handleError(error);
} else {
handleError(String(error));
}
throw error;
}
streamingStore.set(false);
} catch (error) {
if (error instanceof Error) {
handleError(error);
} else {
handleError(String(error));
}
throw error;
}
}
// Re-export types for convenience

View File

@@ -1,13 +1,14 @@
import { writable } from 'svelte/store';
import { browser } from '$app/environment';
// Load favorites from localStorage if available
const storedFavorites = typeof localStorage !== 'undefined'
const storedFavorites = browser
? JSON.parse(localStorage.getItem('favoritePatterns') || '[]')
: [];
const createFavoritesStore = () => {
const { subscribe, set, update } = writable<string[]>(storedFavorites);
return {
subscribe,
toggleFavorite: (patternName: string) => {
@@ -17,7 +18,7 @@ const createFavoritesStore = () => {
: [...favorites, patternName];
// Save to localStorage
if (typeof localStorage !== 'undefined') {
if (browser) {
localStorage.setItem('favoritePatterns', JSON.stringify(newFavorites));
}
@@ -26,11 +27,11 @@ const createFavoritesStore = () => {
},
reset: () => {
set([]);
if (typeof localStorage !== 'undefined') {
if (browser) {
localStorage.removeItem('favoritePatterns');
}
}
};
};
export const favorites = createFavoritesStore();
export const favorites = createFavoritesStore();

View File

@@ -1924,6 +1924,14 @@
"tags": [
"VISUALIZE"
]
},
{
"patternName": "concall_summary",
"description": "Extract strategic insights from earnings transcripts for investors.",
"tags": [
"SUMMARIZE",
"BUSINESS"
]
}
]
}

File diff suppressed because one or more lines are too long