Compare commits

...

298 Commits

Author SHA1 Message Date
github-actions[bot]
cb3f8ed43d Update version to v1.4.238 and commit 2025-07-07 10:24:00 +00:00
Kayvan Sylvan
4c1803cb6d Merge pull request #1591 from ksylvan/0707-anthropic-can-now-use-only-oauth
Improved Anthropic Plugin Configuration Logic
2025-07-07 03:22:27 -07:00
Kayvan Sylvan
d1c614d44e refactor: extract vendor token identifier constant and remove redundant configure call
## CHANGES

- Extract vendor token identifier into named constant
- Remove redundant Configure() call from IsConfigured method
- Use constant for token validation consistency
- Improve code maintainability with centralized identifier
2025-07-07 03:16:45 -07:00
Kayvan Sylvan
dbaa0b9754 feat: add vendor configuration validation and OAuth auto-authentication
## CHANGES

- Add IsConfigured check to vendor configuration loop
- Implement IsConfigured method for Anthropic client validation
- Remove conditional API key requirement based on OAuth
- Add automatic OAuth flow when no valid token
- Validate both API key and OAuth token configurations
- Simplify API key setup question logic
- Add token expiration checking with 5-minute buffer
2025-07-07 02:49:27 -07:00
github-actions[bot]
4cfe2375ab Update version to v1.4.237 and commit 2025-07-07 03:05:51 +00:00
Kayvan Sylvan
2b371b69c7 Merge pull request #1590 from ksylvan/0706-webui-topp-fix
Do not pass non-default TopP values
2025-07-06 20:04:16 -07:00
Kayvan Sylvan
6222a613e4 fix: add conditional check for TopP parameter in OpenAI client
## CHANGES

- Add zero-value check before setting TopP parameter
- Prevent sending TopP when value is zero
- Apply fix to both chat completions method
- Apply fix to response parameters method
- Ensure consistent parameter handling across OpenAI calls
2025-07-06 19:53:21 -07:00
github-actions[bot]
0882c43532 Update version to v1.4.236 and commit 2025-07-06 19:39:24 +00:00
Kayvan Sylvan
f0e1a1b77f Merge pull request #1587 from ksylvan/0705-enhance-bug-report-template
Enhance bug report template
2025-07-06 12:37:56 -07:00
Kayvan Sylvan
a774f991ab chore: enhance bug report template with detailed system info and installation method fields
## CHANGES

- Add detailed instructions for bug reproduction steps
- Include operating system dropdown with specific architectures
- Add OS version textarea with command examples
- Create installation method dropdown with all options
- Replace version checkbox with proper version output field
- Improve formatting and organization of form sections
- Add helpful links to installation documentation
2025-07-06 11:01:53 -07:00
github-actions[bot]
a40bacaf34 Update version to v1.4.235 and commit 2025-07-06 10:36:33 +00:00
Kayvan Sylvan
969b85380c Merge pull request #1586 from ksylvan/0705-another-fix-for-cistom-directory
Fix to persist the CUSTOM_PATTERNS_DIRECTORY variable
2025-07-06 03:35:05 -07:00
Kayvan Sylvan
e8fe4434db fix: make custom patterns persist correctly 2025-07-06 03:29:10 -07:00
github-actions[bot]
7c7ceca264 Update version to v1.4.234 and commit 2025-07-06 09:00:46 +00:00
Kayvan Sylvan
c19d7ccd9d Merge pull request #1581 from ksylvan/0705-custom-directory-creation-bug
Fix Custom Patterns Directory Creation Logic
2025-07-06 01:59:19 -07:00
Kayvan Sylvan
bd0c5f730e chore: improve directory creation logic in configure method
### CHANGES

- Add `fmt` package for logging errors
- Check directory existence before creating
- Log error without clearing directory value
2025-07-06 01:55:18 -07:00
github-actions[bot]
5900dac58f Update version to v1.4.233 and commit 2025-07-06 08:36:45 +00:00
Kayvan Sylvan
237219c3cc Merge pull request #1580 from ksylvan/0705-fix-custom-pattern-loading
Alphabetical Pattern Sorting and Configuration Refactor
2025-07-06 01:35:19 -07:00
Kayvan Sylvan
26fd700098 refactor: move custom patterns directory initialization to Configure method
- Move custom patterns directory logic to Configure method
- Initialize CustomPatternsDir after loading .env file
- Add alphabetical sorting to pattern names retrieval
- Override ListNames method for PatternsEntity class
- Improve pattern listing with proper error handling
- Ensure custom patterns loaded after environment configuration
2025-07-06 01:30:12 -07:00
Kayvan Sylvan
6bd926dd0f Merge pull request #1578 from ksylvan/0705-custom-pattern-readme
Document Custom Patterns Directory Support
2025-07-06 00:38:01 -07:00
Kayvan Sylvan
16ac519415 docs: add comprehensive custom patterns setup and usage guide
## CHANGES

- Add custom patterns directory setup instructions
- Document priority system for custom vs built-in patterns
- Include step-by-step custom pattern creation workflow
- Explain update-safe custom pattern storage
- Add table of contents entries for new sections
- Document seamless integration with existing fabric commands
- Clarify privacy and precedence behavior for custom patterns
2025-07-06 00:32:54 -07:00
github-actions[bot]
a32cc5fa01 Update version to v1.4.232 and commit 2025-07-06 07:22:25 +00:00
Daniel Miessler 🛡️
26b5bb2e9e Merge pull request #1577 from ksylvan/0705-custom-patterns-dir
Add Custom Patterns Directory Support
2025-07-06 00:20:58 -07:00
Kayvan Sylvan
b751d323b1 feat: add custom patterns directory support with environment variable configuration
## CHANGES

- Add custom patterns directory support via environment variable
- Implement custom patterns plugin with registry integration
- Override main patterns with custom directory patterns
- Expand home directory paths in custom patterns config
- Add comprehensive test coverage for custom patterns functionality
- Integrate custom patterns into plugin setup workflow
- Support pattern precedence with custom over main patterns
2025-07-05 23:51:43 -07:00
github-actions[bot]
d081fd269c Update version to v1.4.231 and commit 2025-07-05 22:25:30 +00:00
Kayvan Sylvan
369a0a850d Merge pull request #1565 from ksylvan/0701-claude-oauth-support
OAuth Authentication Support for Anthropic
2025-07-05 15:23:56 -07:00
Kayvan Sylvan
8dc5343ee6 fix: remove duplicate API key setup question in Anthropic client 2025-07-05 15:05:21 -07:00
Kayvan Sylvan
eda552dac5 refactor: extract OAuth functionality from anthropic client to separate module
## CHANGES

- Remove OAuth transport implementation from main client
- Extract OAuth flow functions to separate module
- Remove unused imports and constants from client
- Replace inline OAuth transport with NewOAuthTransport call
- Update runOAuthFlow to exported RunOAuthFlow function
- Clean up token management and refresh logic
- Simplify client configuration by removing OAuth internals
2025-07-05 14:59:38 -07:00
Kayvan Sylvan
f13a56685b feat: add OAuth login support for Anthropic API configuration 2025-07-05 14:46:43 -07:00
Kayvan Sylvan
2f9afe0247 feat: remove OAuth flow functions for simplified token handling 2025-07-05 11:45:25 -07:00
Kayvan Sylvan
1ec525ad97 chore: simplify base URL configuration in configure method
### CHANGES

- Remove redundant base URL trimming logic
- Append base URL directly without modification
- Eliminate conditional check for API version suffix
2025-07-05 11:39:35 -07:00
Kayvan Sylvan
b7dc6748e0 feat: enhance OAuth authentication flow with automatic re-authentication and timeout handling
## CHANGES

- Add automatic OAuth flow initiation when no token exists
- Implement fallback re-authentication when token refresh fails
- Add timeout contexts for OAuth and refresh operations
- Create context-aware OAuth flow and token exchange functions
- Enhance error handling with graceful authentication recovery
- Add user input timeout protection for authorization codes
- Preserve refresh tokens during token exchange operations
2025-07-05 09:59:27 -07:00
Kayvan Sylvan
f1b612d828 refactor: remove OAuth endpoint logic and standardize on v2 API endpoint
## CHANGES

- Remove OAuth-specific v1 endpoint handling logic
- Standardize all API calls to use v2 endpoint
- Simplify baseURL configuration by removing conditional branching
- Update endpoint logic to always append v2 suffix
2025-07-05 09:37:57 -07:00
Kayvan Sylvan
eac5a104f2 feat: implement OAuth token refresh and persistent storage for Claude authentication
## CHANGES

- Add automatic OAuth token refresh when expired
- Implement persistent token storage using common OAuth storage
- Remove deprecated AuthToken setting from client configuration
- Add token validation with 5-minute expiration buffer
- Create refreshToken function for seamless token renewal
- Update OAuth flow to save complete token information
- Enhance error handling for OAuth authentication failures
- Simplify client configuration by removing manual token management
2025-07-05 09:17:50 -07:00
Kayvan Sylvan
4bff88fae3 feat: add OAuth authentication support for Anthropic Claude
- Move golang.org/x/oauth2 from indirect to direct dependency
- Add OAuth login option for Anthropic client
- Implement PKCE OAuth flow with browser integration
- Add custom HTTP transport for OAuth Bearer tokens
- Support both API key and OAuth authentication methods
- Add Claude Code system message for OAuth sessions
- Update REST API to handle OAuth tokens
- Improve environment variable name sanitization with regex
2025-07-05 08:32:16 -07:00
github-actions[bot]
acf1be71ce Update version to v1.4.230 and commit 2025-07-05 07:08:05 +00:00
Kayvan Sylvan
236a3c5f38 Merge pull request #1575 from ksylvan/0704-advanced-image-output-options
Advanced image generation parameters for OpenAI models
2025-07-05 00:06:38 -07:00
Kayvan Sylvan
b2418984f8 feat: add advanced image generation parameters for OpenAI models
## CHANGES

- Add four new image generation CLI flags
- Implement validation for image parameter combinations
- Support size, quality, compression, and background controls
- Add comprehensive test coverage for new parameters
- Update shell completions for new image options
- Enhance README with detailed image generation examples
- Fix PowerShell code block formatting issues
2025-07-04 23:04:50 -07:00
github-actions[bot]
152d74d160 Update version to v1.4.229 and commit 2025-07-05 01:59:22 +00:00
Kayvan Sylvan
4e16bbccd8 Merge pull request #1574 from ksylvan/0704-image-tool-model-validation
Add Model Validation for Image Generation and Fix CLI Flag Mapping
2025-07-04 18:57:51 -07:00
Kayvan Sylvan
60174f41a4 refactor: extract supported models list to shared constant for image generation validation
## CHANGES

• Extract hardcoded model lists into shared constant
• Create ImageGenerationSupportedModels variable for reusability
• Update supportsImageGeneration function to use shared constant
• Refactor error messages to reference centralized model list
• Add documentation comment for supported models variable
• Import strings package in test file
• Consolidate duplicate model validation logic across files
2025-07-04 18:52:49 -07:00
Kayvan Sylvan
ad4683952e Merge branch 'main' into 0704-image-tool-model-validation 2025-07-04 18:45:08 -07:00
Kayvan Sylvan
86a044735b feat: add model validation for image generation support
### CHANGES

- Add model field to `BuildChatOptions` method
- Implement `supportsImageGeneration` function for model checks
- Validate model supports image generation in `sendResponses`
- Remove `mars-colony.png` from repository
- Add tests for `supportsImageGeneration` function
- Validate image file support in `TestModelValidationLogic`
2025-07-04 18:40:20 -07:00
github-actions[bot]
58583114cb Update version to v1.4.228 and commit 2025-07-05 01:13:09 +00:00
Kayvan Sylvan
36524cd2e4 Merge pull request #1573 from ksylvan/0704-image-dynamic-formats
Add Image File Validation and Dynamic Format Support
2025-07-04 18:11:36 -07:00
Kayvan Sylvan
e59156ac2b feat: add image file validation and format detection for image generation
## CHANGES

• Add image file path validation with extension checking
• Implement dynamic output format detection from file extensions
• Update BuildChatOptions method to return error for validation
• Add comprehensive test coverage for image file validation
• Upgrade YAML library from v2 to v3
• Update shell completions to reflect supported image formats
• Add error handling for existing file conflicts
• Support PNG, JPEG, JPG, and WEBP image formats
2025-07-04 17:56:59 -07:00
Daniel Miessler
1eac026e92 Addded tutorial as a tag. 2025-07-04 16:42:19 -07:00
github-actions[bot]
17d863fd57 Update version to v1.4.227 and commit 2025-07-04 22:40:01 +00:00
Daniel Miessler 🛡️
7c9dbfd343 Merge pull request #1572 from ksylvan/0704-OpenAI-Image-Generation-Tool
Add Image Generation Support to Fabric
2025-07-04 15:38:27 -07:00
Kayvan Sylvan
d9260bdf26 chore: refactor image generation constants for clarity and reuse
### CHANGES

- Define `ImageGenerationResponseType` constant for response handling
- Define `ImageGenerationToolType` constant for tool type usage
- Update `addImageGenerationTool` to use defined constants
- Refactor `extractAndSaveImages` to use response type constant
2025-07-04 15:14:46 -07:00
Kayvan Sylvan
63a0cfeb1e feat: add web search and image file support to fabric CLI
## CHANGES

- Add web search tool for Anthropic and OpenAI models
- Add search location parameter for web search results
- Add image file output option with format support
- Update zsh completion with new search and image flags
- Update bash completion with new option handling logic
- Update fish completion with search and image descriptions
- Support PNG, JPG, JPEG, GIF, BMP image formats
2025-07-04 14:49:40 -07:00
Kayvan Sylvan
12fc6e2000 feat: add image generation support with OpenAI image generation model
## CHANGES

- Add `--image-file` flag for saving generated images
- Implement image generation tool integration with OpenAI
- Support image editing with attachment input files
- Add comprehensive test coverage for image features
- Update documentation with image generation examples
- Fix HTML formatting issues in README
- Improve PowerShell code block indentation
- Clean up help text formatting and spacing
2025-07-04 14:36:55 -07:00
Daniel Miessler
fe5900a5dc Fixed ul tag applier. 2025-07-04 14:25:54 -07:00
Daniel Miessler
1b6b8e3d72 Updated ul tag prompt. 2025-07-04 14:21:25 -07:00
Daniel Miessler
c85301cb1f Added the UL tags pattern. 2025-07-04 14:17:43 -07:00
github-actions[bot]
7cc8226339 Update version to v1.4.226 and commit 2025-07-04 07:25:58 +00:00
Kayvan Sylvan
fc8c4babf8 Merge pull request #1569 from ksylvan/0703-openai-web-search
OpenAI Plugin Now Supports Web Search Functionality
2025-07-04 00:24:32 -07:00
Kayvan Sylvan
bd809a1f94 docs: update README with new web search feature details 2025-07-04 00:22:36 -07:00
Kayvan Sylvan
50aec6291b feat: add web search tool support for OpenAI models with citation formatting
## CHANGES

- Enable web search tool for OpenAI models
- Add location parameter support for search results
- Extract and format citations from search responses
- Implement citation deduplication to avoid duplicates
- Add comprehensive test coverage for search functionality
- Update CLI flag description to include OpenAI
- Format citations as markdown links with sources
2025-07-04 00:01:54 -07:00
github-actions[bot]
f927fdf40f Update version to v1.4.225 and commit 2025-07-04 06:58:02 +00:00
Kayvan Sylvan
918862ef57 Merge pull request #1568 from ksylvan/0703-enhanced-anthropic-search-tool
Runtime Web Search Control via Command-Line Flag
2025-07-03 23:56:34 -07:00
Kayvan Sylvan
d9b8bc3233 chore: refactor Send method to optimize string building
### CHANGES

- Add `sourcesHeader` constant for citation section title.
- Use `strings.Builder` to construct result efficiently.
- Append sources header and citations in result builder.
- Update `ret` to use constructed string from builder.
2025-07-03 23:52:12 -07:00
Kayvan Sylvan
da29b8e388 chore: remove unused web-search tool parameters for simplification
### CHANGES

- Remove unused `AllowedDomains` and `MaxUses` parameters
- Simplify `webTool` definition in `buildMessageParams` method
2025-07-03 23:41:22 -07:00
Kayvan Sylvan
5e6d4110fa refactor: extract web search tool constants in anthropic plugin
## CHANGES

- Add webSearchToolName constant for tool identification
- Add webSearchToolType constant for tool versioning
- Replace hardcoded string literals with named constants
- Improve code maintainability through constant extraction
2025-07-03 23:20:19 -07:00
Kayvan Sylvan
4bb090694b chore: update formatOptions to include search options display
### CHANGES

- Add search option status to `formatOptions`
- Include `SearchLocation` in formatted output if specified
2025-07-03 22:55:55 -07:00
Kayvan Sylvan
d232222787 feat: add web search tool support for Anthropic models
## CHANGES

- Add --search flag to enable web search
- Add --search-location for timezone-based results
- Pass search options through ChatOptions struct
- Implement web search tool in Anthropic client
- Format search citations with sources section
- Add comprehensive tests for search functionality
- Remove plugin-level web search configuration
2025-07-03 22:40:39 -07:00
github-actions[bot]
095890a556 Update version to v1.4.224 and commit 2025-07-01 21:44:24 +00:00
Kayvan Sylvan
64c1fe18ef Merge pull request #1564 from ksylvan/0701-code-review-pattern
Add code_review pattern and updates in Pattern_Descriptions
2025-07-01 14:42:50 -07:00
Kayvan Sylvan
1cea32a677 feat: handle JSONDecodeError in load_existing_file gracefully
### CHANGES

- Add JSONDecodeError handling with warning message.
- Initialize with empty list on JSON decode failure.
- Reorder pattern processing to reduce redundant logs.
- Remove redundant directory check logging.
- Ensure new pattern processing is logged correctly.
2025-07-01 14:36:35 -07:00
Kayvan Sylvan
49658a3214 feat: add new patterns for code review, alpha extraction, and server analysis
### CHANGES
- Add `review_code`, `extract_alpha`, and `extract_mcp_servers` patterns.
- Refactor the pattern extraction script for improved clarity.
- Add docstrings and specific error handling to script.
- Improve formatting in the pattern management README.
- Fix typo in the `analyze_bill_short` pattern description.
2025-07-01 14:05:41 -07:00
Kayvan Sylvan
f236cab276 feat: add comprehensive code review pattern for systematic analysis
## CHANGES

- Add new code review system prompt
- Define principal engineer reviewer role
- Include systematic analysis framework
- Specify markdown output format
- Add prioritized recommendations section
- Include detailed feedback structure
- Provide example Python review
- Cover security, performance, readability
- Add error handling guidelines
2025-07-01 13:43:04 -07:00
github-actions[bot]
5e0aaa1f93 Update version to v1.4.223 and commit 2025-07-01 14:52:18 +00:00
Kayvan Sylvan
eb16806931 Merge pull request #1563 from ksylvan/0701-fix-windows-build
Fix Cross-Platform Compatibility in Release Workflow
2025-07-01 07:50:46 -07:00
Kayvan Sylvan
474dd786a4 chore: update GitHub Actions to use bash shell in release job
### CHANGES

- Adjust repository_dispatch type spacing for consistency
- Use bash shell for creating release if absent
2025-07-01 07:45:05 -07:00
github-actions[bot]
edad63df19 Update version to v1.4.222 and commit 2025-07-01 14:17:23 +00:00
Kayvan Sylvan
c7eb7439ef Merge pull request #1559 from ksylvan/0629-openai-responses-api
OpenAI Plugin Migrates to New Responses API
2025-07-01 07:15:43 -07:00
Daniel Miessler
23d678d62f Updated alpha post. 2025-06-30 06:50:47 -07:00
Kayvan Sylvan
de5260a661 feat(openai): add support for multi-content user messages in chat completions
### CHANGES

- Enhance user message conversion to support multi-content.
- Add capability to process image URLs in messages.
- Build multi-part messages with both text and images.
2025-06-30 00:21:42 -07:00
Kayvan Sylvan
baeadc2270 chore: update NewClient to use NewClientCompatibleWithResponses
### CHANGES

- Modify `NewClient` to call `NewClientCompatibleWithResponses`
- Add support for response handling in client initialization
2025-06-30 00:13:15 -07:00
Kayvan Sylvan
5b4cec81c3 feat: simplify supportsResponsesAPI 2025-06-29 23:57:57 -07:00
Kayvan Sylvan
eda5531087 refactor: extract common message conversion logic to reduce duplication
## CHANGES

- Extract shared message conversion to convertMessageCommon
- Reuse logic between chat and response APIs
- Maintain existing text-only behavior for chat
- Support multi-content messages in response API
- Reduce code duplication across converters
- Preserve backward compatibility for both APIs
2025-06-29 23:48:14 -07:00
Kayvan Sylvan
66925d188a fix: move channel close to defer statement in OpenAI streaming methods
## CHANGES

- Move close(channel) to defer statement
- Ensure channel closes even on errors
- Apply fix to sendStreamChatCompletions method
- Apply fix to sendStreamResponses method
- Improve error handling reliability
- Prevent potential channel leaks
2025-06-29 23:27:24 -07:00
Kayvan Sylvan
6179742e79 feat: add chat completions API support for OpenAI-compatible providers
## CHANGES

* Add chat completions API fallback for non-Responses API providers
* Implement `sendChatCompletions` and `sendStreamChatCompletions` methods
* Introduce `buildChatCompletionParams` to construct API request parameters
* Add `ImplementsResponses` flag to track provider API capabilities
* Update provider configurations with Responses API support status
* Enhance `Send` and `SendStream` methods to use appropriate API endpoints
2025-06-29 22:52:55 -07:00
Kayvan Sylvan
d8fc6940f0 feat: migrate OpenAI plugin to use new responses API instead of chat completions
- Replace chat completions with responses API
- Update message conversion to new format
- Refactor streaming to handle event types
- Remove frequency and presence penalty params
- Replace seed parameter with max tokens
- Update test cases for new API
- Add response text extraction method
2025-06-29 21:06:12 -07:00
Daniel Miessler
44f7e8dfef Updated extract alpha. 2025-06-28 15:18:53 -07:00
Daniel Miessler
c5ada714ff Updated extract alpha. 2025-06-28 15:17:10 -07:00
Daniel Miessler
80c4807f7e Added extract_alpha as kind of an experiment. 2025-06-28 15:14:14 -07:00
github-actions[bot]
b4126b6798 Update version to v1.4.221 and commit 2025-06-28 15:03:37 +00:00
Daniel Miessler 🛡️
f2ffa64af9 Merge pull request #1556 from ksylvan/0628-migrate-to-official-openai-go 2025-06-28 08:02:08 -07:00
Kayvan Sylvan
09e01eddf4 refactor: abstract chat message structs and migrate to official openai-go SDK
### CHANGES

- Introduce local `chat` package for message abstraction
- Replace sashabaranov/go-openai with official openai-go SDK
- Update OpenAI, Azure, and Exolab plugins for new client
- Refactor all AI providers to use internal chat types
- Decouple codebase from third-party AI provider structs
- Replace deprecated `ioutil` functions with `os` equivalents
2025-06-28 07:28:49 -07:00
github-actions[bot]
aa028a4a57 Update version to v1.4.220 and commit 2025-06-28 05:28:02 +00:00
Kayvan Sylvan
d8d157404c Merge pull request #1555 from ksylvan/0627-github-actions-release-fix
fix: Race condition in GitHub actions release flow
2025-06-27 22:26:27 -07:00
Kayvan Sylvan
d0602c9653 chore: improve release creation to gracefully handle pre-existing tags.
### CHANGES

*   Check if a release exists before attempting creation.
*   Suppress error output from `gh release view` command.
*   Add an informative log when release already exists.
2025-06-27 21:05:14 -07:00
github-actions[bot]
35155496a4 Update version to v1.4.219 and commit 2025-06-28 02:59:32 +00:00
Kayvan Sylvan
eef16b89f2 Merge pull request #1553 from ksylvan/0627-deepwiki-badge-added
docs: add DeepWiki badge and fix minor typos in README
2025-06-27 19:58:00 -07:00
Kayvan Sylvan
7f66097577 docs: add DeepWiki badge and fix minor typos in README
## CHANGES

- Add DeepWiki badge to README header
- Fix typo "chatbots" to "chat-bots"
- Correct "Perlexity" to "Perplexity"
- Fix "distro" to "Linux distribution"
- Add alt text to contributor images
- Update dependency versions in go.mod
- Remove unused soup dependency
2025-06-27 19:34:01 -07:00
Kayvan Sylvan
2012f22a9c Merge pull request #1552 from nawarajshahi/main
Fix typos in README.md
2025-06-27 12:18:47 -07:00
Nawaraj Shahi
08695c9e24 Fix typos on README.md 2025-06-27 10:14:48 -04:00
github-actions[bot]
d8cc9b5eef Update version to v1.4.218 and commit 2025-06-27 00:22:16 +00:00
Kayvan Sylvan
9dbe20cf7b Merge pull request #1550 from ksylvan/0626-more-openai-raw-mode
Add Support for OpenAI Search and Research Model Variants
2025-06-26 17:20:47 -07:00
Kayvan Sylvan
64763e1303 feat: add support for new OpenAI search and research model variants
## CHANGES

- Add slices import for array operations
- Define new search preview model names
- Add mini search preview variants
- Include deep research model support
- Add June 2025 dated model versions
- Replace hardcoded check with slices.Contains
- Support both prefix and exact model matching
2025-06-26 17:06:25 -07:00
github-actions[bot]
126a9ff406 Update version to v1.4.217 and commit 2025-06-26 23:09:56 +00:00
Kayvan Sylvan
e906425138 Merge pull request #1546 from ksylvan/0626-fix-yt-in-web-interface
New YouTube Transcript Endpoint Added to REST API
2025-06-26 16:08:23 -07:00
Daniel Miessler
df4a560302 Add extract_mcp_servers pattern
New pattern to extract mentions of MCP (Model Context Protocol) servers from content. Identifies server names, features, capabilities, and usage examples.

🤖 Generated with [Claude Code](https://claude.ai/code)

Co-Authored-By: Claude <noreply@anthropic.com>
2025-06-26 11:39:21 -07:00
Kayvan Sylvan
34cf669bd4 chore: fix endpoint calls from frontend 2025-06-26 01:37:53 -07:00
Kayvan Sylvan
0dbe1bbb4e feat: add dedicated YouTube transcript API endpoint
## CHANGES

- Add new YouTube handler for transcript requests
- Create `/youtube/transcript` POST endpoint route
- Add request/response types for YouTube API
- Support language and timestamp options
- Update frontend to use new endpoint
- Remove chat endpoint dependency for transcripts
- Validate video vs playlist URLs properly
2025-06-26 01:21:27 -07:00
github-actions[bot]
e29ed908e6 Update version to v1.4.216 and commit 2025-06-26 06:52:16 +00:00
Kayvan Sylvan
3d049a435a Merge pull request #1545 from ksylvan/0625-fix-attachments-used-with-patterns
Update Message Handling for Attachments and Multi-Modal content
2025-06-25 23:50:43 -07:00
Kayvan Sylvan
1a335b3fb9 refactor(ai): unify assistant and user message formatting in dryrun
### CHANGES

- Unify assistant and user message formatting logic.
- Use `formatMultiContentMessage` for assistant role messages.
- Improve dryrun support for multi-part message content.
2025-06-25 23:49:23 -07:00
Kayvan Sylvan
e2430b6c75 fix: correctly combine text and attachments in raw mode sessions
### CHANGES

- Combine user text and attachments into MultiContent.
- Preserve existing non-text parts like images.
- Use standard content field for text-only messages.
2025-06-25 23:28:12 -07:00
Kayvan Sylvan
2497f10eca feat: add MultiContent support to chat message construction in raw mode 2025-06-25 23:18:56 -07:00
Kayvan Sylvan
f62d2198f9 refactor: extract message and option formatting logic into reusable methods
## CHANGES

- Extract multi-content message formatting to dedicated method
- Create formatMessages method for all message types
- Add formatOptions method for chat options display
- Replace inline formatting with strings.Builder usage
- Reduce code duplication between Send and SendStream
- Improve code organization and maintainability
2025-06-25 22:08:26 -07:00
Kayvan Sylvan
816e4072f4 fix(chatter): prevent duplicate user message when applying patterns
### CHANGES

*   Prevent adding user message twice when using patterns.
*   Ensure multi-part content is always included in session.
2025-06-25 21:43:46 -07:00
Kayvan Sylvan
85ee6196bd chore: fix formatting. 2025-06-25 18:31:46 -07:00
Kayvan Sylvan
e15645c1bc chore: clean up comments in chatter.go for clarity 2025-06-25 17:15:13 -07:00
Kayvan Sylvan
fada6bb044 chore: simplify user message appending logic in BuildSession
### CHANGES
- Remove conditional check for pattern name in message appending.
- Always append user message if it exists in request.
2025-06-25 17:12:48 -07:00
Kayvan Sylvan
4ad14bb752 feat: enhance dryrun client to display multi-content user messages
### CHANGES

- Handle multi-content messages for the user role.
- Display image URLs from user messages in output.
- Update both `Send` and `SendStream` methods.
- Retain existing behavior for simple text messages.
2025-06-25 17:08:30 -07:00
Kayvan Sylvan
97fc9b0d58 feat: allow combining user messages and attachments with patterns
- Allow user messages and attachments with patterns.
- Append user message to session regardless of pattern.
- Refactor chat request builder for improved clarity.
2025-06-25 16:24:47 -07:00
github-actions[bot]
ad0df37d10 Update version to v1.4.215 and commit 2025-06-25 11:07:45 +00:00
Kayvan Sylvan
666302c3c1 Merge pull request #1543 from ksylvan/0625-fix-pattern-descriptions-json
fix: Revert multiline tags in generated json files
2025-06-25 04:06:12 -07:00
Kayvan Sylvan
71e20cf251 chore: reformat pattern_descriptions.json to improve readability
### CHANGES

- Reformat JSON `tags` array to display on new lines.
- Update `write_essay` pattern description for clarity.
- Apply consistent formatting to both data files.
2025-06-25 03:55:00 -07:00
github-actions[bot]
b591666366 Update version to v1.4.214 and commit 2025-06-25 09:51:32 +00:00
Daniel Miessler 🛡️
155d9f0a76 Merge pull request #1542 from ksylvan/0624-write-essay-by-author-and-updates 2025-06-25 02:49:54 -07:00
Kayvan Sylvan
6a7cca65b4 chore: Fixes caught by review 2025-06-24 23:09:14 -07:00
Kayvan Sylvan
94020dbde0 chore: rename essay patterns to clarify Paul Graham style and author variable usage
## CHANGES

- Rename `write_essay` to `write_essay_pg` for Paul Graham style
- Rename `write_essay_by_author` to `write_essay` with author variable
- Update pattern descriptions to reflect naming changes
- Fix duplicate `write_essay_pg` entry in pattern descriptions
2025-06-24 21:54:39 -07:00
Kayvan Sylvan
f949391098 feat: add new pattern and update pattern metadata files.
### CHANGES

- Add tags and descriptions for five new creative and analytical patterns.
- Introduce `analyze_terraform_plan` for infrastructure review.
- Add `write_essay_by_author` for stylistic writing.
- Include `summarize_board_meeting` for corporate notes.
- Introduce `create_mnemonic_phrases` for memory aids.
- Update and clean pattern description data files.
- Sort the pattern explanations list alphabetically.
2025-06-24 12:42:39 -07:00
Kayvan Sylvan
64c3c69a70 Merge branch 'danielmiessler:main' into main 2025-06-23 13:03:07 -07:00
github-actions[bot]
4a830394be Update version to v1.4.213 and commit 2025-06-23 20:01:04 +00:00
Kayvan Sylvan
9f8a2d3b59 Merge pull request #1538 from andrewsjg/bug/bedrock-region-handling
Bug/bedrock region handling
2025-06-23 12:59:30 -07:00
github-actions[bot]
4353bc9f7f Update version to v1.4.212 and commit 2025-06-23 19:57:58 +00:00
Kayvan Sylvan
7a8024ee79 Merge pull request #1540 from ksylvan/0623-langdock-ai
Add Langdock AI and enhance generic OpenAI compatible support
2025-06-23 12:56:25 -07:00
Kayvan Sylvan
b5bf75ad2e chore: refactor ProviderMap for dynamic URL template handling
# CHANGES

- Add `os` and `strings` packages to imports
- Implement dynamic URL handling with environment variables
- Refactor provider configuration to support URL templates
- Reorder providers for consistent key order in ProviderMap
- Extract and parse template variables from BaseURL
- Use environment variables or default values for templates
- Replace template with actual values in BaseURL
2025-06-23 12:38:52 -07:00
Kayvan Sylvan
1ae847f397 chore: refactor ProviderMap for dynamic URL template handling
# CHANGES

- Add `os` and `strings` packages to imports
- Implement dynamic URL handling with environment variables
- Refactor provider configuration to support URL templates
- Reorder providers for consistent key order in ProviderMap
- Extract and parse template variables from BaseURL
- Use environment variables or default values for templates
- Replace template with actual values in BaseURL
2025-06-23 12:35:59 -07:00
Kayvan Sylvan
3fd923f6b8 chore: refactor Bedrock client to improve error handling and add interface compliance
## CHANGES

- Add ai.Vendor interface implementation check
- Improve error handling with wrapped errors
- Add AWS region validation logic
- Fix resource cleanup in SendStream
- Add nil checks for response parsing
- Update context usage to Background()
- Add user agent constants
- Enhance code documentation
2025-06-23 09:13:11 -07:00
James Andrews
eb251139b8 bedrock region handling - updated to set region value correctly if it exists in the config 2025-06-23 00:12:58 +01:00
James Andrews
0b5d3cfc30 bedrock region handling - updated to fix bad pointer reference 2025-06-23 00:03:32 +01:00
James Andrews
14a3c11930 Fixed bedrock region handling 2025-06-22 23:22:45 +01:00
James Andrews
c8cf6da0cc Updated hasAWSCredentials to also check for AWS_DEFAULT_REGION when access keys are configured in the environment 2025-06-22 14:27:04 +01:00
Daniel Miessler
a2c954ba50 Updated paper analyzer. 2025-06-19 14:48:05 -07:00
github-actions[bot]
730d0adc86 Update version to v1.4.211 and commit 2025-06-19 21:47:20 +00:00
Kayvan Sylvan
dc9168ab6f Merge pull request #1533 from ksylvan/0619-enhance-restapi-and-webui-with-variables
REST API and Web UI Now Support Dynamic Pattern Variables
2025-06-19 14:45:48 -07:00
Daniel Miessler
e500a5916e Updated paper analyzer. Went back to my own format. 2025-06-19 14:45:31 -07:00
Kayvan Sylvan
6ddf46a379 chore: removed a directory of raycast scripts sitting in the patterns/ directory 2025-06-19 14:11:29 -07:00
Kayvan Sylvan
e8aa358b15 refactor(ChatService): clean up message stream and pattern output methods
- Refactor `cleanPatternOutput` to use a dedicated return variable.
- Hoist `processResponse` function for improved stream readability.
- Remove unnecessary whitespace and trailing newlines from file.
2025-06-19 13:55:25 -07:00
Daniel Miessler
62f373c2b4 Updated paper analyzer. 2025-06-19 13:55:03 -07:00
Daniel Miessler
fcf826f3de Updated paper analyzer. 2025-06-19 13:48:57 -07:00
Kayvan Sylvan
bd2db29cee feat: add ApplyPattern route for applying patterns with variables
## CHANGES
- Create `PatternApplyRequest` struct for request body parsing
- Implement `ApplyPattern` method for POST /patterns/:name/apply
- Register manual routes for pattern operations in `NewPatternsHandler`
- Refactor `Get` method to return raw pattern content
- Merge query parameters with request body variables in `ApplyPattern`
- Use `StorageHandler` for pattern-related storage operations
2025-06-19 13:30:56 -07:00
Kayvan Sylvan
c6d612ee9a feat: add pattern variables support to REST API chat endpoint
## CHANGES

- Add Variables field to PromptRequest struct
- Pass pattern variables through chat handler
- Create API variables documentation example
- Add pattern variables UI in web interface
- Create pattern variables store in Svelte
- Include variables in chat service requests
- Add JSON textarea for variable input
2025-06-19 13:10:05 -07:00
Daniel Miessler
d613c25974 Updated sanitization instructions. 2025-06-19 12:24:09 -07:00
Daniel Miessler
c0abea7c66 Updated markdown cleaner. 2025-06-19 12:02:21 -07:00
Daniel Miessler
496bd2812a Updated markdown cleaner. 2025-06-19 11:34:09 -07:00
github-actions[bot]
70fccaf2fb Update version to v1.4.210 and commit 2025-06-18 07:40:11 +00:00
Kayvan Sylvan
9a71f7c96d Merge pull request #1530 from ksylvan/0617-add-citations-to-perplexity
Add Citation Support to Perplexity Response
2025-06-18 00:38:37 -07:00
Kayvan Sylvan
5da3db383d feat: add citation support to perplexity AI responses
## CHANGES

- Add citation extraction from API responses
- Append citations section to response content
- Format citations as numbered markdown list
- Handle citations in streaming responses
- Store last response for citation access
- Add citations after stream completion
- Maintain backward compatibility with responses
2025-06-17 20:45:03 -07:00
Daniel Miessler 🛡️
19438cbd20 Update README.md 2025-06-17 11:52:02 -07:00
Daniel Miessler 🛡️
a0b71ee365 Update README.md
Updated readme.
2025-06-17 11:48:44 -07:00
Daniel Miessler 🛡️
034513ece5 Update README.md
An update to the intro text, describing Fabric's utility to most people.
2025-06-17 11:45:46 -07:00
github-actions[bot]
0affb9bab1 Update version to v1.4.209 and commit 2025-06-17 10:21:02 +00:00
github-actions[bot]
3305df8fb2 Update version to v1.4.208 and commit 2025-06-17 10:19:28 +00:00
Kayvan Sylvan
892c229076 Merge pull request #1527 from ksylvan/0617-add-perplexity-vendor
Add Perplexity AI Provider with Token Limits Support
2025-06-17 03:17:57 -07:00
Kayvan Sylvan
599c5f2b9f Merge pull request #1526 from ConnorKirk/check-for-aws-credentials
Check for AWS_PROFILE or AWS_ROLE_SESSION_NAME environment variables
2025-06-17 03:17:48 -07:00
Kayvan Sylvan
19e5d8dbe0 chore: update README with Perplexity AI support instructions
### CHANGES
- Add instructions for configuring Perplexity AI with Fabric
- Include example command for querying Perplexity AI
- Retain existing instructions for YouTube transcription changes
2025-06-17 02:57:37 -07:00
Kayvan Sylvan
b772127738 feat: add Perplexity AI provider support with token limits and streaming
## CHANGES

- feat: Add `MaxTokens` field to `ChatOptions` struct for response control
- feat: Integrate Perplexity client into core plugin registry initialization
- build: Add perplexity-go/v2 dependency to enable API interactions
- feat: Implement stream handling in Perpexlty client using sync.WaitGroup
- fix: Correct parameter types for penalty options in API requests
## LINKS

<https://github.com/sgaunet/perlexipty-go> - Client library used
2025-06-17 02:32:53 -07:00
Connor Kirkpatrick
5dd61abe2a Check for AWS_PROFILE or AWS_ROLE_SESSION_NAME environment variables 2025-06-17 10:25:17 +01:00
github-actions[bot]
f45e140126 Update version to v1.4.207 and commit 2025-06-17 07:41:51 +00:00
Kayvan Sylvan
752a66cb48 Merge pull request #1525 from ksylvan/0617-fix-lang-code-vtt-youtube-transcript-bug
Refactor yt-dlp Transcript Logic and Fix Language Bug
2025-06-17 00:40:18 -07:00
Kayvan Sylvan
da28d91d65 refactor: extract common yt-dlp logic to reduce code duplication in YouTube plugin
## CHANGES

- Extract shared yt-dlp logic into tryMethodYtDlpInternal helper
- Add processVTTFileFunc parameter for flexible VTT processing
- Implement language matching for 2-char language codes
- Refactor tryMethodYtDlp to use new helper function
- Refactor tryMethodYtDlpWithTimestamps to use helper
- Reduce code duplication between transcript methods
- Maintain existing functionality with cleaner structure
2025-06-17 00:32:33 -07:00
Daniel Miessler
5a66ca1c5a Updated extract insights. 2025-06-16 16:43:21 -07:00
Daniel Miessler
98f3da610b Updated extract insights. 2025-06-16 16:41:14 -07:00
github-actions[bot]
73ce92ccd9 Update version to v1.4.206 and commit 2025-06-16 23:12:53 +00:00
Kayvan Sylvan
7f3f1d641f Merge pull request #1523 from ksylvan/0616-bedrock-plugin-config-fix
Conditional AWS Bedrock Plugin Initialization
2025-06-16 16:10:59 -07:00
Kayvan Sylvan
44b5c46beb feat: add AWS credential detection for Bedrock client initialization
## CHANGES

- Add hasAWSCredentials helper function
- Check for AWS_ACCESS_KEY_ID and AWS_SECRET_ACCESS_KEY
- Look for AWS shared credentials file
- Support custom AWS_SHARED_CREDENTIALS_FILE path
- Default to ~/.aws/credentials location
- Only initialize Bedrock client if credentials exist
- Prevent AWS SDK credential search failures
2025-06-16 15:11:58 -07:00
Daniel Miessler
8d37c9d6b9 Updated prompt. 2025-06-16 13:26:13 -07:00
github-actions[bot]
1138d0b60e Update version to v1.4.205 and commit 2025-06-16 13:26:26 +00:00
Kayvan Sylvan
b78217088d Merge pull request #1519 from ConnorKirk/bedrock-plugin-dynamically-fetch-models 2025-06-16 06:24:54 -07:00
Connor Kirkpatrick
76b889733d Dynamically fetch and list available foundation models and inference profiles 2025-06-16 11:05:34 +01:00
Kayvan Sylvan
3911fd9f5d Merge pull request #1518 from ksylvan/0615-remove-old-redundant-patterns
chore: remove duplicate/outdated patterns
2025-06-15 12:56:31 -07:00
Daniel Miessler
b06e29f8a8 Updated markdown sanitizer. 2025-06-15 12:52:39 -07:00
Kayvan Sylvan
11a7e542e1 chore: remove duplicate/outdated patterns 2025-06-15 12:47:08 -07:00
Daniel Miessler
6681078259 Updated markdown cleaner. 2025-06-15 12:45:34 -07:00
Daniel Miessler
be1edf7b1d Updated markdown cleaner. 2025-06-15 12:44:15 -07:00
github-actions[bot]
8ce748a1b1 Update version to v1.4.204 and commit 2025-06-15 05:53:11 +00:00
Kayvan Sylvan
96070f6f39 Merge pull request #1517 from ksylvan/0614-prevent-race-conditions-tag-and-release
Fix: Prevent race conditions in versioning workflow.
2025-06-14 22:51:39 -07:00
Kayvan Sylvan
ca3e89a889 ci: improve version update workflow to prevent race conditions
### CHANGES

- Add concurrency control to prevent simultaneous runs
- Pull latest main branch changes before tagging
- Fetch all remote tags before calculating version
2025-06-14 22:30:54 -07:00
github-actions[bot]
47d799d7ae Update version to v1.4.203 and commit 2025-06-14 06:01:13 +00:00
Eugen Eisler
4899ce56a5 Merge pull request #1512 from ConnorKirk/1500-add-support-for-amazon-bedrock
feat:Add support for Amazon Bedrock
2025-06-14 07:59:41 +02:00
Eugen Eisler
4a7b7becec Merge pull request #1513 from marcas756/feature/create_mnemonic_phrases
feat: create mnemonic phrase pattern
2025-06-14 07:53:05 +02:00
Eugen Eisler
80fdccbe89 Merge pull request #1516 from ksylvan/0612-fix-REST-api-put-pattern
Fix REST API pattern creation
2025-06-14 07:52:06 +02:00
Kayvan Sylvan
d9d8f7bf96 feat: add Save method to PatternsEntity for persisting patterns to filesystem
## CHANGES

- Add Save method to PatternsEntity struct
- Create pattern directory with proper permissions
- Write pattern content to system pattern file
- Add comprehensive test for Save functionality
- Verify directory creation and file contents
- Handle errors for directory and file operations
2025-06-13 15:52:01 -07:00
Marco Bacchi
a96ddbeef0 feat: create mnemonic phrase pattern
Add a new pattern for generating mnemonic phrases from diceware words. This includes two markdown files defining the user guide, and system implementation details.
2025-06-12 23:27:08 +02:00
Connor Kirkpatrick
d32a1d6a5a Add Bedrock plugin
This commits adds support for using Amazon Bedrock within fabric.
2025-06-12 13:07:12 +01:00
github-actions[bot]
201474791d Update version to v1.4.202 and commit 2025-06-12 05:47:10 +00:00
Eugen Eisler
6d09137fee Merge pull request #1510 from ksylvan/0611-fix-youtube-transcript-for-windows
Cross-Platform fix for Youtube Transcript extraction
2025-06-12 07:45:38 +02:00
Kayvan Sylvan
680febbe66 *fix: replace Unix-specific file operations with cross-platform alternatives
## CHANGES

- Replace hardcoded `/tmp` with `os.TempDir()` for paths
- Use `filepath.Join()` instead of string concatenation
- Remove Unix `find` command dependency completely
- Add new `findVTTFiles()` method using `filepath.Walk()`
- Make VTT file discovery work on Windows
- Improve error handling for file operations
- Maintain backward compatibility with existing functionality
2025-06-11 22:24:48 -07:00
github-actions[bot]
f59e5081f3 Update version to v1.4.201 and commit 2025-06-12 02:35:09 +00:00
Eugen Eisler
6a504c7422 Merge pull request #1503 from danielmiessler/dependabot/npm_and_yarn/web/npm_and_yarn-6ea9762674
chore(deps): bump brace-expansion from 1.1.11 to 1.1.12 in /web in the npm_and_yarn group across 1 directory
2025-06-12 04:33:36 +02:00
Eugen Eisler
89a0abcbe4 Merge pull request #1508 from ksylvan/0611-youtube-followup-fixes
feat: cleanup after `yt-dlp` addition
2025-06-12 04:32:30 +02:00
Kayvan Sylvan
2dfd78ef0b feat: cleanup after yt-dlp addition
### CHANGES
- Update README with yt-dlp requirement for transcripts
- Ensure the errors are clear and actionable.
2025-06-11 17:27:11 -07:00
github-actions[bot]
2200b6ea08 Update version to v1.4.200 and commit 2025-06-11 21:45:09 +00:00
Eugen Eisler
82f9ebaf99 Merge pull request #1507 from ksylvan/0611-youtube-fix
Refactor: No more web scraping, just use yt-dlp
2025-06-11 23:43:33 +02:00
Kayvan Sylvan
704ad3067a refactor: replace web scraping with yt-dlp for YouTube transcript extraction
## CHANGES

- Remove unreliable YouTube API scraping methods
- Add yt-dlp integration for transcript extraction
- Implement VTT subtitle parsing functionality
- Add timestamp preservation for transcripts
- Remove soup HTML parsing dependency
- Add error handling for missing yt-dlp
- Create temporary directory management
- Support multiple subtitle format fallbacks
2025-06-11 14:24:40 -07:00
github-actions[bot]
6f7e3c04d7 Update version to v1.4.199 and commit 2025-06-11 20:27:06 +00:00
Eugen Eisler
79f763456e Merge pull request #1506 from danielmiessler/feat/antropic_tool
fix: fix web search tool location
2025-06-11 22:25:22 +02:00
Eugen Eisler
9d4f7f1571 fix: fix web search tool location 2025-06-11 22:19:21 +02:00
github-actions[bot]
8e7373b308 Update version to v1.4.198 and commit 2025-06-11 18:51:13 +00:00
Eugen Eisler
7a39742507 Merge pull request #1504 from marcas756/fix/ollama-hardcoded-timeout
fix: Add configurable HTTP timeout for Ollama client
2025-06-11 20:49:41 +02:00
github-actions[bot]
cea218e61e Update version to v1.4.197 and commit 2025-06-11 18:41:32 +00:00
dependabot[bot]
02ac68834d chore(deps): bump brace-expansion
Bumps the npm_and_yarn group with 1 update in the /web directory: [brace-expansion](https://github.com/juliangruber/brace-expansion).


Updates `brace-expansion` from 1.1.11 to 1.1.12
- [Release notes](https://github.com/juliangruber/brace-expansion/releases)
- [Commits](https://github.com/juliangruber/brace-expansion/compare/1.1.11...v1.1.12)

---
updated-dependencies:
- dependency-name: brace-expansion
  dependency-version: 1.1.12
  dependency-type: indirect
  dependency-group: npm_and_yarn
...

Signed-off-by: dependabot[bot] <support@github.com>
2025-06-11 18:41:27 +00:00
Eugen Eisler
f673f424da Merge pull request #1502 from danielmiessler/feat/antropic_tool
Feat/antropic tool
2025-06-11 20:40:00 +02:00
Marco Bacchi
0ae41116aa fix: Add configurable HTTP timeout for Ollama client
Add a new setup question to configure the HTTP timeout duration for
Ollama requests. The default value is set to 20 minutes.
2025-06-11 20:36:57 +02:00
Eugen Eisler
2b11f3e48e feat: search tool result collection 2025-06-11 20:21:34 +02:00
Eugen Eisler
ed77cc2320 feat: search tool working 2025-06-11 19:56:38 +02:00
Eugen Eisler
29f19fce51 Merge pull request #1499 from noamsiegel/improve-create-prd-pattern
feat: Enhance the PRD Generator's identity and purpose
2025-06-11 18:04:53 +02:00
Eugen Eisler
62ed5d2b9a Merge pull request #1497 from ksylvan/0608-analyze-terraform-plan
feat: add Terraform plan analyzer pattern for infrastructure changes
2025-06-11 18:00:55 +02:00
GitButler
836e4c4fab GitButler Workspace Commit
This is a merge commit the virtual branches in your workspace.

Due to GitButler managing multiple virtual branches, you cannot switch back and
forth between git branches and virtual branches easily. 

If you switch to another branch, GitButler will need to be reinitialized.
If you commit on this branch, GitButler will throw it away.

Here are the branches that are currently applied:
 - improve-create-prd (refs/gitbutler/improve-create-prd)
For more information about what we're doing here, check out our docs:
https://docs.gitbutler.com/features/virtual-branches/integration-branch
2025-06-09 18:24:48 -07:00
Noam Siegel
946c1af42d feat: Enhance the PRD Generator's identity and purpose
The changes in this commit expand the identity and purpose of the PRD Generator
to provide more clarity on its role and the expected output. The key changes
include:

- Defining the Generator's purpose as transforming product ideas into a
  structured PRD that ensures clarity, alignment, and precision in product
  planning and execution.
- Outlining the key sections typically found in a PRD that the Generator should
  cover, such as Overview, Objectives, Target Audience, Features, User Stories,
  Functional and Non-functional Requirements, Success Metrics, and Timeline.
- Providing more detailed instructions on the expected output format, structure,
  and content, including the use of Markdown, labeled sections, bullet points,
  tables, and highlighting of priorities or MVP features.
2025-06-09 18:24:48 -07:00
Kayvan Sylvan
a74585cb14 feat: add Terraform plan analyzer pattern for infrastructure change assessment
- Create new pattern for analyzing Terraform plans
- Add identity defining expert plan analyzer role
- Include focus on security, cost, and compliance
- Define three output sections for summaries
- Specify 20-word sentence summary requirement
- List 10 critical changes with word limits
- Include 5 key takeaways section format
- Add markdown formatting output instructions
- Require numbered lists over bullet points
- Prohibit warnings and duplicate content
2025-06-08 22:49:02 -07:00
github-actions[bot]
5ffd458aa0 Update version to v1.4.196 and commit 2025-06-07 18:02:01 +00:00
Eugen Eisler
9786721037 Merge pull request #1495 from ksylvan/0606-aiml-provider
Add AIML provider configuration
2025-06-07 20:00:27 +02:00
Kayvan Sylvan
ffb31985e8 feat: add AIML provider to OpenAI compatible providers configuration
## CHANGES

- Add AIML provider configuration
- Set AIML base URL to api.aimlapi.com/v1
- Expand supported OpenAI compatible providers list
- Enable AIML API integration support
2025-06-06 07:13:10 -07:00
Daniel Miessler
eeee37a7cc Updated output. 2025-05-31 12:48:46 -07:00
Daniel Miessler
bd89a8d776 Updated output. 2025-05-31 07:14:36 -07:00
Daniel Miessler
2311e7e7a1 Updated output. 2025-05-31 07:14:10 -07:00
Daniel Miessler
09b79283e9 Updated output. 2025-05-31 07:12:29 -07:00
Daniel Miessler
7fbb5e0935 Updated output. 2025-05-31 07:07:56 -07:00
Daniel Miessler
984d9d03f5 Updated output. 2025-05-31 07:06:29 -07:00
Daniel Miessler
c47502fa8c Updated output. 2025-05-31 06:58:46 -07:00
Daniel Miessler
1fe02bdf22 Added simpler paper analyzer, updated the output. 2025-05-31 06:51:19 -07:00
Daniel Miessler
d550385a5e Added simpler paper analyzer. 2025-05-31 06:48:53 -07:00
github-actions[bot]
1e81da5f42 Update version to v1.4.195 and commit 2025-05-24 09:08:17 +00:00
Eugen Eisler
5b318dc402 Merge pull request #1487 from ksylvan/0524-update-pdfjs
Dependency Updates and PDF Worker Refactoring
2025-05-24 11:06:43 +02:00
Kayvan Sylvan
4027305345 feat: upgrade PDF.js to v4.2 and refactor worker initialization
### CHANGES
- Add `.browserslistrc` to define target browser versions.
- Upgrade `pdfjs-dist` dependency from v2.16 to v4.2.67.
- Upgrade `nanoid` dependency from v4.0.2 to v5.0.9.
- Introduce `pdf-config.ts` for centralized PDF.js worker setup.
- Refactor `PdfConversionService` to use new PDF worker configuration.
- Add static `pdf.worker.min.mjs` to serve PDF.js worker.
- Update Vite configuration for ESNext build target and PDF.js.
2025-05-24 00:29:20 -07:00
github-actions[bot]
63879d5cf7 Update version to v1.4.194 and commit 2025-05-24 06:04:31 +00:00
Eugen Eisler
9539441496 Merge pull request #1485 from ksylvan/0523-generalize-web-ui-connect-to-fabric-api
Web UI: Centralize Environment Configuration and Make Fabric Base URL Configurable
2025-05-24 08:02:57 +02:00
github-actions[bot]
352ade34c8 Update version to v1.4.193 and commit 2025-05-24 05:59:22 +00:00
Eugen Eisler
9abc69c1a9 Merge pull request #1484 from ksylvan/0523-web-ui-cleanup-and-updates
Web UI update all packages, reorganize docs, add install scripts
2025-05-24 07:57:42 +02:00
Kayvan Sylvan
93f6f2f0c4 feat: add centralized environment configuration for Fabric base URL
- Create environment config module for URL handling
- Add getFabricBaseUrl() function with server/client support
- Add getFabricApiUrl() helper for API endpoints
- Configure Vite to inject FABRIC_BASE_URL client-side
- Update proxy targets to use environment variable
- Add TypeScript definitions for window config
- Support FABRIC_BASE_URL env var with fallback
2025-05-23 20:45:57 -07:00
Kayvan Sylvan
1f5d3db3fb fix typo in script name 2025-05-23 17:51:41 -07:00
Kayvan Sylvan
4446b456ba docs: reorganize web documentation and add installation scripts
## CHANGES

- Move legacy documentation files to web/legacy/
- Update web README with installation instructions
- Add convenience scripts for npm and pnpm installation
- Update all package dependencies to latest versions
- Add PDF-to-Markdown installation steps to README
- Remove duplicate documentation files
2025-05-23 17:47:33 -07:00
Eugen Eisler
870941090a Merge pull request #1481 from skibum1869/feature/summarize_board_meeting
Add board meeting summary pattern template
2025-05-23 23:36:46 +02:00
Max Harris
5fc004805e Update meeting summary template with word count requirement
AI:

Add minimum word count for context section in board summary
2025-05-23 10:27:18 -05:00
Max Harris
ce47018fc3 Merge branch 'danielmiessler:main' into main 2025-05-23 09:38:40 -05:00
Max Harris
a09131ea72 Add board meeting summary pattern template 2025-05-23 09:38:24 -05:00
github-actions[bot]
36eb321059 Update version to v1.4.192 and commit 2025-05-23 05:44:31 +00:00
Eugen Eisler
47bf9600d6 Merge pull request #1480 from ksylvan/0522-auto-raw-mode-for-some-models
Automatic setting of "raw mode" for some models
2025-05-23 07:43:04 +02:00
Kayvan Sylvan
be674841e7 feat: add automatic raw mode detection for specific AI models
## CHANGES

- Add model-specific raw mode detection logic
- Check Ollama llama2/llama3 models for raw mode
- Check OpenAI o1/o3/o4 models for raw mode
- Use model from options or default chatter
- Auto-enable raw mode when vendor requires it
- Import strings package for prefix matching
2025-05-22 17:04:11 -07:00
Kayvan Sylvan
39a8b67438 feat: add NeedsRawMode method to AI vendor interface
## CHANGES

- Add NeedsRawMode to Vendor interface
- Implement NeedsRawMode in all AI clients
- Return false for all implementations
- Support model-specific raw mode detection
- Enable future raw mode requirements
2025-05-22 16:41:12 -07:00
github-actions[bot]
0a4950dd08 Update version to v1.4.191 and commit 2025-05-22 19:03:47 +00:00
Eugen Eisler
593c1558c0 Merge pull request #1478 from ksylvan/0522-upgrade-to-claude-4
Claude 4 Integration and README Updates
2025-05-22 21:02:11 +02:00
Kayvan Sylvan
c8f9a39a40 feat: add support for Anthropic Claude 4 models and update SDK to v1.2.0
CHANGES
- Upgrade `anthropic-sdk-go` dependency to version `v1.2.0`.
- Integrate new Anthropic Claude 4 Opus and Sonnet models.
- Remove deprecated Claude 2.0 and 2.1 models from list.
- Adjust model type casting for `anthropic-sdk-go v1.2.0` compatibility.
- Refresh README: announce Claude 4, update date, fix links.
2025-05-22 11:26:04 -07:00
github-actions[bot]
50ec02546f Update version to v1.4.190 and commit 2025-05-20 10:12:21 +00:00
Eugen Eisler
881085d0fe Merge pull request #1475 from ksylvan/0519-fix-dupe-input-attempt-2
refactor: improve raw mode handling in BuildSession
2025-05-20 12:10:47 +02:00
Kayvan Sylvan
2d75052e57 refactor: improve raw mode handling in BuildSession
## CHANGES

- Fix system message handling with patterns in raw mode
- Prevent duplicate inputs when using patterns
- Add conditional logic for pattern vs non-pattern scenarios
- Simplify message construction with clearer variable names
- Improve code comments for better readability
2025-05-19 22:18:12 -07:00
github-actions[bot]
fee604682b Update version to v1.4.189 and commit 2025-05-19 21:39:14 +00:00
Eugen Eisler
941ccabd92 Merge pull request #1473 from roumy/add_authent_ollama
add authentification for ollama instance
2025-05-19 23:37:45 +02:00
github-actions[bot]
57cd563963 Update version to v1.4.188 and commit 2025-05-19 21:36:30 +00:00
Eugen Eisler
274b6eada6 Merge pull request #1474 from ksylvan/0519-fix-doubled-user-input
feat: update `BuildSession` to handle message appending logic
2025-05-19 23:35:03 +02:00
Kayvan Sylvan
bc27f9d685 refactor: improve message handling for raw mode and Anthropic client
## CHANGES

- Clarify raw mode message handling in BuildSession
- Fix pattern-based message handling in non-raw mode
- Refactor Anthropic client message normalization
- Add proper handling for empty message arrays
- Implement user/assistant message alternation for Anthropic
- Preserve system messages in Anthropic conversations
- Add safeguards for message sequence validation
2025-05-19 12:50:41 -07:00
pr
1291b35b63 add authentification for ollama instance 2025-05-19 11:01:47 +02:00
Eugen Eisler
9862564c45 Merge pull request #1467 from joshuafuller/main
Typos, spelling, grammar and other minor updates
2025-05-19 07:52:12 +02:00
Eugen Eisler
bbc183f276 Merge pull request #1468 from NavNab/main
Refactor content structure in create_hormozi_offer system.md for clarity and readability
2025-05-18 20:26:28 +02:00
NavNab
9c4445d7bd Refactor content structure in system.md for clarity and readability
- Improved formatting of the introduction and content summary sections for better flow.
- Consolidated repetitive sentences and enhanced the overall coherence of the text.
- Adjusted bullet points and numbering for consistency and easier comprehension.
- Ensured that key concepts are clearly articulated and visually distinct to aid understanding.
2025-05-18 17:03:24 +02:00
Joshua Fuller
920620d771 Merge pull request #1 from joshuafuller/branch/fix-spelling-in-pattern-management-guide 2025-05-16 23:48:52 -05:00
Joshua Fuller
d734e25e0d Merge pull request #2 from joshuafuller/branch/fix-spelling-in-pr-1284-update-notes 2025-05-16 23:48:42 -05:00
Joshua Fuller
a31b2d5e41 Merge pull request #3 from joshuafuller/branch/fix-typos-in-web-readme 2025-05-16 23:48:31 -05:00
Joshua Fuller
8e7e4aa169 Merge pull request #4 from joshuafuller/branch/fix-spelling-of-anthropic-in-notes-md 2025-05-16 23:48:23 -05:00
Joshua Fuller
ea57a64afa Merge pull request #5 from joshuafuller/branch/fix-grammar-in-nuclei-template-instructions 2025-05-16 23:48:13 -05:00
Joshua Fuller
da1a9dab56 docs: fix grammar in nuclei template instructions 2025-05-16 23:45:33 -05:00
Joshua Fuller
068f111986 docs: correct Anthropic spelling in notes 2025-05-16 23:44:31 -05:00
Joshua Fuller
dd0be51726 docs: fix typos in web README 2025-05-16 23:44:19 -05:00
Joshua Fuller
43a1e66cc8 docs: fix spelling in PR 1284 update notes 2025-05-16 23:44:06 -05:00
Joshua Fuller
430a272e1d docs: fix spelling in pattern management guide 2025-05-16 23:43:38 -05:00
github-actions[bot]
0e892f38e4 Update version to v1.4.187 and commit 2025-05-10 07:42:11 +00:00
Eugen Eisler
aa0fe90258 Merge pull request #1463 from CodeCorrupt/nixpkgs_completion
Add completion to the build output for Nix
2025-05-10 09:40:45 +02:00
CodeCorrupt
c59c7553b3 Add completion files to the build output for Nix 2025-05-07 17:06:00 -04:00
github-actions[bot]
703756d0b0 Update version to v1.4.186 and commit 2025-05-06 22:06:46 +00:00
Eugen Eisler
50d22f8e77 Merge pull request #1459 from ksylvan/0505-cleanup-some-old-detritus
chore: Repository cleanup and .gitignore Update
2025-05-07 00:05:19 +02:00
Kayvan Sylvan
fde2efd4ce chore: update .gitignore and remove obsolete files
- Add `coverage.out` to `.gitignore` for ignoring coverage output.
- Remove `Alma.md` documentation file from the repository.
- Delete `rate_ai_result.txt` stitch script from `stitches` folder.
- Remove `readme.md` for `rate_ai_result` stitch documentation.
2025-05-05 17:16:38 -07:00
github-actions[bot]
0150c3a37d Update version to v1.4.185 and commit 2025-04-28 19:27:01 +00:00
Eugen Eisler
2a0216b9aa Merge pull request #1453 from ksylvan/0428-default-model-setting-fix
Fix for default model setting
2025-04-28 21:25:35 +02:00
Kayvan Sylvan
a6d14d86b8 refactor: introduce getSortedGroupsItems for consistent sorting logic
### CHANGES
- Add `getSortedGroupsItems` to centralize sorting logic.
- Sort groups and items alphabetically, case-insensitive.
- Replace inline sorting in `Print` with new method.
- Update `GetGroupAndItemByItemNumber` to use sorted data.
- Ensure original `GroupsItems` remains unmodified.
2025-04-28 11:41:32 -07:00
github-actions[bot]
a9374c128b Update version to v1.4.184 and commit 2025-04-25 08:27:55 +00:00
Eugen Eisler
f32b9f81da Merge pull request #1447 from ksylvan/0424-more-shell-completions
More shell completion scripts: Zsh, Bash, and Fish
2025-04-25 10:26:20 +02:00
Kayvan Sylvan
bf3af8e98e feat: add shell completion scripts for Zsh, Bash, and Fish
CHANGES:
- Add shell completion support for three major shells
- Create standardized completion scripts in completions/ directory
- Add --shell-complete-list flag for machine-readable output
- Update Print() methods to support plain output format
- Document installation steps for each shell in README
- Replace old fish completion script with improved version
2025-04-24 17:47:39 -07:00
github-actions[bot]
095c295ee5 Update version to v1.4.183 and commit 2025-04-23 20:03:10 +00:00
Eugen Eisler
93ecc9cfea Merge pull request #1431 from KenMacD/fish-completion
Add a completion script for fish
2025-04-23 22:01:41 +02:00
github-actions[bot]
e7aaa23fc2 Update version to v1.4.182 and commit 2025-04-23 20:00:59 +00:00
Eugen Eisler
197d3454f8 Merge pull request #1441 from ksylvan/0423-nix-go-build-toolchain-update
Update go toolchain and go module packages to latest versions
2025-04-23 21:59:25 +02:00
Kayvan Sylvan
50a4f8b491 chore: fix "nix flake check" errors 2025-04-23 11:07:01 -07:00
Kayvan Sylvan
894b4967dd refactor: centralize Go version definition in flake.nix
CHANGES
*   Define `getGoVersion` function in `flake.nix`.
*   Use `getGoVersion` to set Go version consistently.
*   Pass `goVersion` explicitly into `nix/shell.nix`.
*   Remove redundant Go version definition from `shell.nix`.
2025-04-23 09:34:39 -07:00
Kayvan Sylvan
9837bd6664 chore: update Go to 1.24.2 and refresh dependencies
Update Go version across Dockerfile, Nix configurations, and Go modules.
Refresh dependencies and Nix flake inputs.

CHANGES:
*   Update Go version to 1.24.2 in Dockerfile.
*   Set Go version to 1.24.0 and toolchain to 1.24.2.
*   Refresh Go module dependencies and sums (go.mod, go.sum).
*   Update Nix flake lock file inputs.
*   Configure Nix environment and packages for Go 1.24.
*   Update gomod2nix lock file with dependency hashes.
*   Use Go 1.24 in Nix development shell environment.
2025-04-23 09:18:01 -07:00
github-actions[bot]
6ca1b5dac4 Update version to v1.4.181 and commit 2025-04-22 16:03:07 +00:00
Eugen Eisler
c85135c04e Merge pull request #1433 from ksylvan/0421-anthropic-api-update
chore: update Anthropic SDK to v0.2.0-beta.3 and migrate to V2 API
2025-04-22 18:01:53 +02:00
github-actions[bot]
31e4e42a94 Update version to v1.4.180 and commit 2025-04-22 11:40:37 +00:00
Eugen Eisler
196db04fc2 Merge pull request #1435 from ksylvan/0421-fix-raw-input-with-stratetgies
chore: Fix user input handling when using raw mode and `--strategy` flag
2025-04-22 13:39:22 +02:00
Kayvan Sylvan
b3b1b5a471 chore: unify raw mode message handling and preserve env vars in extension executor
## CHANGES

- refactor BuildSession raw mode to prepend system to user content
- ensure raw mode messages always have User role
- keep existing user message when no systemMessage provided
- append systemMessage separately in non-raw mode sessions
- store original cmd.Env before context-based exec command creation
- recreate exec command with context then restore originalEnv
- add comments clarifying raw vs non-raw handling behavior
2025-04-21 17:04:11 -07:00
Kayvan Sylvan
892439a177 chore: update Anthropic SDK to v0.2.0-beta.3 and migrate to V2 API
## CHANGES

- Upgrade Anthropic SDK from alpha.11 to beta.3
- Update API endpoint from v1 to v2
- Replace anthropic.F() with direct assignment
- Replace anthropic.F() with anthropic.Opt() for optional params
- Simplify event delta handling in streaming
- Change client type from pointer to value type
- Update comment with SDK changelog reference
2025-04-21 13:17:03 -07:00
Kenny MacDermid
486ff42b59 Add a completion script for fish 2025-04-21 12:58:05 -03:00
140 changed files with 14760 additions and 3879 deletions

View File

@@ -7,29 +7,74 @@ body:
attributes:
value: |
Thanks for taking the time to fill out this bug report!
Please provide as much detail as possible to help us understand and reproduce the issue.
- type: textarea
id: what-happened
attributes:
label: What happened?
description: Also tell us, what did you expect to happen?
placeholder: Tell us what you see!
value: "I was doing THIS, when THAT happened. I was expecting THAT_OTHER_THING to happen instead."
value: "Please provide all the steps to reproduce the bug. I was doing THIS, when THAT happened. I was expecting THAT_OTHER_THING to happen instead."
validations:
required: true
- type: checkboxes
- type: dropdown
id: os
attributes:
label: Operating System
options:
- macOS - Silicon (arm64)
- macOS - Intel (amd64)
- Linux - amd64
- Linux - arm64
- Windows
validations:
required: true
- type: textarea
id: os-version
attributes:
label: OS Version
description: Please provide details about your OS version by running one of the following commands.
placeholder: |
macOS: `sw_vers`
Linux: `uname -a` or `cat /etc/os-release`
Windows: `ver`
render: shell
- type: dropdown
id: installation
attributes:
label: How did you install Fabric?
description: "Please select the method you used to install Fabric. You can find this information in the [Installation section of the README](https://github.com/ksylvan/fabric/blob/main/README.md#installation)."
options:
- Release Binary - Windows
- Release Binary - macOS (arm64)
- Release Binary - macOS (amd64)
- Release Binary - Linux (amd64)
- Release Binary - Linux (arm64)
- Package Manager - Homebrew (macOS)
- Package Manager - AUR (Arch Linux)
- From Source
- Other
validations:
required: true
- type: textarea
id: version
attributes:
label: Version check
description: Please make sure you were using the latest version of this project available in the `main` branch.
options:
- label: Yes I was.
required: true
label: Version
description: Please copy and paste the output of `fabric --version` (or `fabric-ai --version` if you installed it via brew) here.
render: text
- type: textarea
id: logs
attributes:
label: Relevant log output
description: Please copy and paste any relevant log output. This will be automatically formatted into code, so no need for backticks.
render: shell
- type: textarea
id: screens
attributes:

View File

@@ -2,7 +2,7 @@ name: Go Release
on:
repository_dispatch:
types: [ tag_created ]
types: [tag_created]
push:
tags:
- "v*"
@@ -108,10 +108,15 @@ jobs:
Add-Content -Path $env:GITHUB_ENV -Value "latest_tag=$latest_tag"
- name: Create release if it doesn't exist
shell: bash
env:
GH_TOKEN: ${{ secrets.GITHUB_TOKEN }}
run: |
gh release view ${{ env.latest_tag }} || gh release create ${{ env.latest_tag }} --title "Release ${{ env.latest_tag }}" --notes "Automated release for ${{ env.latest_tag }}"
if ! gh release view ${{ env.latest_tag }} >/dev/null 2>&1; then
gh release create ${{ env.latest_tag }} --title "Release ${{ env.latest_tag }}" --notes "Automated release for ${{ env.latest_tag }}"
else
echo "Release ${{ env.latest_tag }} already exists."
fi
- name: Upload release artifact
if: matrix.os == 'windows-latest'

View File

@@ -11,6 +11,10 @@ on:
permissions:
contents: write # Ensure the workflow has write permissions
concurrency:
group: version-update
cancel-in-progress: false
jobs:
update-version:
if: github.event_name == 'push' && github.ref == 'refs/heads/main'
@@ -30,6 +34,11 @@ jobs:
git config user.name "github-actions[bot]"
git config user.email "github-actions[bot]@users.noreply.github.com"
- name: Pull latest main and tags
run: |
git pull --rebase origin main
git fetch --tags
- name: Get the latest tag
id: get_latest_tag
run: |

1
.gitignore vendored
View File

@@ -58,6 +58,7 @@ coverage.xml
.hypothesis/
.pytest_cache/
cover/
coverage.out
# Translations
*.mo

318
Alma.md
View File

@@ -1,318 +0,0 @@
# SPQA Policy and State for Alma Security
## Document Purpose
This document captures the SPQA policy and State for Alma Security, a security startup out of Redwood City, Ca.
This is part of the SPQA context that will be used to answer questions and create artifacts for the company, e.g., company strategy, security strategy, quarterly security reports (QSRs), project plans, recommendations on which projects to undertake, which investments to take and avoid, and other such decisions.
A major aspect of the SPQA system is the definition of the company's mission, goals, KPIs, and challenges. These shape everything within the company and thus should be used to shape the recommendations made when asked.
In addition to the clearly stated goals and other defining characteristics listed above, there will also be a streaming list of updates coming into this system using the Activity document.
Those will be changes, updates, or modifications to the direction of the company. For example, if Goal number 4 is to build a new datacenter in Boise, Idaho, but we see an update in the Activity section that says we've lost the ability to build in Boise, we should consider goal #4 out of the picture for prioritization and other decision purposes. In other words, the streaming activity log into this document should be considered updates to the core content.
## Company History
Alma Security was started by Chris Meyers, who was previously at Sigma Systems as CTO and HPE as a senior security engineer.
He started the company because, "I saw a gap in the authentication market, where companies were only looking at one or two aspects of one's identity to do authentication. They we're looking at the whole picture and turning that into a continuous authentication story."
## Company Mission
The mission of Alma Security is to ensure businesses can continuously authenticate their users using their whole selves.
## Company Goals (G1 means goal 1, G2 is goal 2, etc. Treat each item (goal/kpi/etc) as half as important as the one before it.)
NOTE: Some goals are things like project rollout which serve the higher goals. In that case they shouldn't always be considered so much lower priority because one is serving the other.
## Company Goals
- G1: Achieve 20% market share by January 2025
- G2: Hit 10000 active customers by January 2025
- G3: Hit a customer trust score of 90+% by January 2025
- G4: Get churn below 5% by August 2024
- G5: Launch in Europe by August 2024
- G6: Launch in India by November 2024
- G7: Launch Mood-monitor integration by February 2024
- G8: Launch partnership with Apple Passkeys by June 2024
## Company KPIs
- K1: Current market share percentage
- K2: Number of active customers
- K3: Current churn percentage
- K4: Launched_in_Europe (yes/no)
- K4: Launched_in_India (yes/no)
-----------------------------------------------------------------------------------------------------------------------
## Security Team Mission
- SM1: Protect Alma Security's customers and intellectual property from security and privacy incidents.
## Security Team Goals
- SG1: Secure all customer data -- especially biometric -- from security and privacy incidents.
- SG2: Protect Alma Security's intellectual property from being captured by unauthorized parties.
- SG3: Reach a time to detect malicious behavior of less than 4 minutes by January 2025
- SG4: Ensure the public trusts our product, because it's an authentication product we can't survive if people don't trust us.
- SG5: Reach a time to remediate critical vulnerabilities on crown jewel systems of less than 16 hours by August 2025
- SG6: Reach a time to remediate critical vulnerabilities on all systems of less than 3 days by August 2025
- SG5: Reach a time to remediate critical vulnerabilities on crown jewel systems of less than 16 hours by August 2025
- SG6: Reach a time to remediate critical vulnerabilities on all systems of less than 3 days by August 2025
- SG7: Complete audit of Apple Passkey integration by February 2025
- SG8: Complete remediation of Apple Passkey vulnerabilities by February 2025
## Security Team KPIs (How we measure the team)
- SK1: TTD: Time to detect malicious behavior (Minutes)
- SK1: TTI: Time to begin investigation of malicious behavior (Minutes)
- SK3: TTR-CJC: Time to remediate critical vulnerabilities on crown jewel systems (Hours)
- SK3: TTR-C: Time to remediate critical vulnerabilities on all systems (Hours)
- SK4: PT: Public trust score (Complete, Significant, Moderate, Minimal, Distrust, N/A)
## Risk Register (The things we're most worried about)
- R1: Our infrastructure security team is understaffed by 50% after 5 key people left
- R2: We are not currently monitoring our external perimeter for attack surface related vulnerabilities like open ports, listening applications, unknown hosts, unknown subdomains pointing to these things, etc. We only do scans once every couple of months and we don't really have anyone to look at the results
- R3: It takes us multiple days to investigate potential malicious behavior on our systems.
- R4: We lack a full list of our assets, including externally facing hosts, S3 buckets, etc., which make up our attack surface
- R5: We have a low public trust score due to the events of 2022.
## Security Team Narrative
### Background
Alma hired a new security team starting in January of 2023 and we have been building out the program since then. The philosophy and approach for the security team is to explicitly articulate what we believe the highest risks are to Alma, to deploy targeted strategies to address those risks, and to use clear, transparent KPIs to show progress towards our goals over time.
### Current Risks
So our risk register looks like this:
1. We are understaffed by 50% after 5 key people left in 2022
2. Our perimeter is not being monitored for attack surface related vulnerabilities
3. It takes us too long to detect and start investigating malicious behavior on our systems
4. We do not have a full list of our assets, which makes it difficult to know what we need to protect
5. We have a low public trust score due to the events of 2022
### Strategies
As such, our strategies are as follows:
1. Hire 5 more A-tier security professionals
2. Purchase and implement an attack surface management solution
3. Invest in our detection and response capabilities
4. Purchase an asset inventory system that integrates with our attack surface management tool
5. Leverage PR to share as much of our progress as possible with the public to rebuild trust
### How We're Doing
We believe being transparent about our progress is key to everything, and for that reason we maintain a limited number of KPIs that we update every quarter. These metrics will not change often. They will remain consistent so that it's easy to track how we're spending our resources and the progress we're making.
Those KPIs are:
1. Time to detect malicious behavior
2. Time to start investigating malicious behavior
3. Time to remediate critical vulnerabilities on crown jewel systems
4. Time to remediate critical vulnerabilities on all systems
5. Our public trust score
As of $DATE$, our KPIs for these are currently:
$GIVE CURRENT KPIs from the Activity section below$
$INSERT GRAPHS OF KPI PROGRESS OVER TIME HERE$
## Security Team Strategies
- STS1: Hire 5 more A-tier security professionals
- STS2: Purchase an attack surface management solution
- STS3: Invest in our detection and response capabilities
- STS4: Purchase an asset inventory system that integrates with our attack surface management tool
## Infrastructure Notes (a basic description of our tech stack and various context around it)
- We currently have no WAF protecting our main web app but we're considering adding one
- We have had some issues with S3 buckets becoming public, or being set up as public, which has lead to some close calls with customer data almost being exposed.
- alma.amazon-domain.com is our primary S3 bucket that contains everything, but it's not public readable or listable
- We have a root account for our AWS account that doesn't yet have 2FA on it, but we're working on fixing that within a few weeks (but it's been open for a few months)
- We also use Postgres for all our databases.
- Developers have root access to the all kubernetes nodes via SSH on port 45,001 using a shared developer key issued during laptop provisioning.
- We're a kubernetes shop and do everything through AWS
- We're logging most stuff to Cloudtrail and we kind of use Amazon Guarduty, but we don't have a 24/7 team to monitor alerts and logs. We should add that to our list of challenges next time we update our overall policy
- We also have a Windows infrastructure because some key personnel came from Microsoft. The DC is hosted in our head office which is in Redwood City, and anyone who works in that office (most of the 300 employees) uses that to log in when they start work. The domain is ALMA.
- There's a domain-joined fileserver running Windows 2012 that most people use to upload new ideas and plans for new products. It uses Windows authentication from the domain.
- We use a Palo Alto Networks firewall with 2fa using windows authenticator tied to SSO.
- The name of the AI system doing all this context creation using SPQA is Alma, which is also the name of the company.
- We use Workday for HR stuff. Slack for realtime communications. Outlook 365 as a service. Sentinel One on the workstations and laptops. Servers in AWS are mostly Amazon Linux 2 with a few Ubuntu boxes that are a few years old.
- We also primarily use Postgres for all of our systems.
## Team
TEAM MEMBER | TEAM ASSIGNED | SKILLS | PAY LEVEL | LOCATION | PROJECTS
Nadia Khan | Detection and Response | D&R (Expert), AWS (Strong), Python (Expert), Kubernetes (Basic), Postgres (Basic) | $249K | Redwood City
Chris Magann | Vulnerability Management | VM (Expert), AWS (Strong), Python (Basic), Postgres (Basic) | $212K | Redwood City
Tigan Wang | Vulnerability Management | VM (Expert), AWS (Strong), Python (Basic), Postgres (Basic) | $217K | Redwood City
## Projects
PROJECT NAME | PROJECT DESCRIPTION | PROJECT PRIORITY | PROJECT MEMBERS | START DATE | END DATE | STATUS | PROJECT COST
WAF Install | Install a WAF in front of our main web app | Critical | Nadia Khan | 2024-01-01 - Ongoing | In Progress | $112K one-time, $9K/month
Multi-Factor Authentication (MFA) Rollout | Implement MFA across all internal and external systems | Critical | Chris Magann | 2024-01-15 | 2024-05-01 | Planned | $80K one-time, $5K/month
Procure and Implement ASM | Implement continuous monitoring for attack surface vulnerabilities | High | Tigan Wang | 2024-02-15 | 2024-06-15 | Not Started | $75K one-time, $6K/month
Data Encryption Upgrade | Upgrade encryption protocols for all sensitive data | Medium | Nadia Khan | 2024-04-01 | 2024-08-01 | Planned | $95K one-time
Incident Response Enhancement | Develop and implement a 24/7 incident response team | High | Nadia Khan | 2024-03-01 | 2024-07-01 | In Progress | $150K one-time, $10K/month
Cloud Security Optimization | Optimize AWS cloud security configurations and practices | Medium | Tigan Wang | 2024-02-01 | 2024-06-01 | In Progress | $100K one-time, $8K/month
S3 Bucket Security | Review and secure all S3 buckets to prevent data breaches | High | Chris Magann | 2024-01-10 | 2024-04-10 | In Progress | $70K one-time, $5K/month
SQL Injection Mitigation | Implement measures to eliminate SQL injection vulnerabilities | High | Tigan Wang | 2024-01-20 | 2024-05-20 | Not Started | $60K one-time
## SECURITY POSTURE (To be referenced for compliance questions and security questionnaires)
July 2019
Admin accounts still not required to use 2FA.
Company laptops distributed to employees, no MDM yet for device management.
AWS IAM roles created for engineers, but root access still frequently used.
Started basic vulnerability scanning using open-source tools.
December 2019
MFA enforced for all Google Workspace accounts after a phishing attempt.
Introduced ClamAV for basic endpoint protection on corporate laptops.
AWS GuardDuty enabled for threat detection, but no formal incident response team.
First incident response plan table-top exercise conducted, but findings not fully documented.
April 2020
Migrated from Google Workspace to Office 365, with MFA enabled for all users.
Rolled out SentinelOne for endpoint protection on 50% of company laptops.
Implemented least-privilege access control for AWS IAM roles.
First formal vendor risk management review completed for major SaaS providers.
August 2020
Completed full deployment of SentinelOne across all endpoints.
Implemented AWS CloudWatch for real-time alerts; however, logs still not monitored 24/7.
Began encrypting all AWS S3 buckets at rest using server-side encryption.
First internal review of data retention policies, started drafting data disposal policy.
January 2021
Rolled out Jamf MDM for centralized management of macOS devices, enforcing encryption (FileVault) on all laptops.
Strengthened Office 365 security by implementing phishing-resistant MFA using authenticator apps.
AWS KMS introduced for managing encryption keys; manual key rotation policy documented.
Introduced formal onboarding and offboarding processes for employee account management.
July 2021
Conditional access policies introduced for Office 365, restricting access based on geography (US-only).
Conducted company-wide security awareness training for the first time, focusing on phishing threats.
Completed first backup and disaster recovery (DR) drill with AWS, documenting recovery times.
AWS Config deployed to monitor and enforce encryption and access control policies across accounts.
December 2021
Full migration to AWS for all production systems completed.
Incident response playbook finalized and shared with the security team; still no 24/7 monitoring.
Documented data classification policies for handling sensitive customer data in preparation for SOC 2 audit.
First third-party penetration test conducted, critical vulnerabilities identified and remediated within 30 days.
March 2022
Rolled out company-wide 2FA for all critical systems, including Office 365, AWS, GitHub, and Slack.
Introduced AWS Secrets Manager for managing sensitive credentials, eliminating hardcoded API keys.
Updated all documentation for identity and access management in preparation for SOC 2 Type 1 audit.
First external vulnerability scan completed using Qualys, with remediation SLAs established.
April 2022
Updated and consolidated all security policies (incident response, access control, data retention) in preparation for SOC 2 audit.
Conducted tabletop exercise for ransomware response, documenting gaps in the incident response process.
Implemented Just-In-Time (JIT) access for administrative privileges in AWS, reducing unnecessary persistent access.
October 2022
Passed SOC 2 Type 1 audit, with recommendations to improve monitoring and asset management.
Launched quarterly phishing simulations to raise employee awareness and track training effectiveness.
Fully enforced encryption for all customer data in transit and at rest using AWS KMS.
Extended GuardDuty to cover all AWS regions; started monitoring alerts daily.
January 2023
Hired a dedicated CISO and expanded security team by 30%.
Integrated continuous vulnerability scanning across all externally facing assets using Qualys.
Conducted first third-party vendor risk assessment to ensure alignment with SOC 2 and internal security standards.
Implemented automated patch management for all AWS EC2 instances, reducing time to deploy critical patches.
July 2023
Rolled out continuous attack surface monitoring (ASM) to identify and remediate external vulnerabilities.
Performed annual data retention review, ensuring compliance with SOC 2 and GDPR requirements.
Conducted a disaster recovery drill for AWS workloads, achieving a recovery time objective (RTO) of under 4 hours.
Completed SOC 2 Type 2 readiness assessment, with focus on improving incident response times.
November 2023
Updated incident response documentation and assigned 24/7 monitoring to a third-party SOC provider.
Rolled out zero-trust network architecture across the organization, removing reliance on VPN for remote access.
Passed SOC 2 Type 2 audit with no major findings; recommendations included improved asset inventory tracking.
Conducted full audit of access control policies and JIT access implementation in preparation for ISO 27001 certification.
April 2024
Implemented AI-driven threat detection to reduce time to detect security incidents from 10 hours to under 2 hours.
Completed full encryption audit across all databases, ensuring compliance with GDPR, HIPAA, and other privacy regulations.
Updated employee training programs to include privacy regulations (GDPR, CCPA) and data handling best practices.
Completed internal review and audit of vendor access to critical systems as part of SOC 2 compliance effort.
Completed move of all AWS services to us-west-2 and us-east-1 regions for 100% us-based cloud services.
October 2024
Conducted organization-wide review of data retention and disposal policies, implementing automated data deletion for expired data.
Implemented continuous compliance monitoring for SOC 2, with automated alerts for deviations in access controls and encryption settings.
Finalized implementation of AI-based monitoring and response systems, significantly reducing time to remediate critical vulnerabilities.
Passed SOC 2 Type 2 and ISO 27001 audits with zero non-conformities, achieving full compliance across all control areas.March 2018
Personal Gmail accounts used for internal and external communication.
No 2FA enabled on any accounts.
AWS accounts shared with engineers, no IAM roles or formal access control policies.
No centralized endpoint protection; employees use personal laptops with no security controls.
No documented security policies or incident response plan.
September 2018
Initiated migration from personal Gmail to Google Workspace (G Suite) for business email.
Password complexity requirements introduced (minimum 8 characters).
AWS root credentials still shared among team members, no MFA enabled.
No formal logging or monitoring in place for AWS activity.
February 2019
Completed migration to Google Workspace; no email encryption yet.
Introduced a basic password manager (LastPass) but no enforcement policy.
AWS CloudTrail enabled for logging, but no one is reviewing logs.
First draft of the incident response plan created, but not tested.
June 2019
Enforced MFA for Google Workspace admin accounts; standard user
## CURRENT STATE (KPIs, Metrics, Project Activity Updates, etc.)
- October 2022: Current time to detect malicious behavior is 81 hours
- October 2022: Current time to start investigating malicious behavior is 82 hours
- October 2022: Current time to remediate critical vulnerabilities on crown jewel systems is 21 days
- October 2022: Current time to remediate critical vulnerabilities on all systems is 51 days
- January 2023: Current time to detect malicious behavior is 62 hours
- January 2023: Current time to start investigating malicious behavior is 72 hours
- January 2023: Current time to remediate critical vulnerabilities on crown jewel systems is 17 days
- January 2023: Current time to remediate critical vulnerabilities on all systems is 43 days
- July 2023: Current time to detect malicious behavior is 29 hours
- July 2023: Current time to start investigating malicious behavior is 41 hours
- July 2023: Current time to remediate critical vulnerabilities on crown jewel systems is 12 days
- July 2023: Current time to remediate critical vulnerabilities on all systems is 29 days
- November 2023: Current time to start detect malicious behavior is 12 hours
- November 2023: Current time to start investigating malicious behavior is 16 hours
- November 2023: Current time to remediate critical vulnerabilities on crown jewel systems is 9 days
- November 2023: Current time to remediate critical vulnerabilities on all systems is 17 days
- February 2024: Started attack surface management vendor selection process
- January 2024: Current time to start detect malicious behavior is 9 hours
- January 2024: Current time to start investigating malicious behavior is 14 hours
- January 2024: Current time to remediate critical vulnerabilities on crown jewel systems is 8 days
- January 2024: Current time to remediate critical vulnerabilities on all systems is 12 days
- March 2024: We're now remediating critical vulnerabilities on crown jewels in less than 6 days
- April 2024: We're now remediating all critical vulnerabilities within 11 days
- July 2024: critical vulnerabilities are now being fixed in 9 days
- On August 5 we got remediation of critical vulnerabilities down to 7 days

View File

@@ -1,5 +1,5 @@
# Use official golang image as builder
FROM golang:1.23.4-alpine AS builder
FROM golang:1.24.2-alpine AS builder
# Set working directory
WORKDIR /app

View File

@@ -2,7 +2,7 @@
- The goal is to bring more encapsulation of the models management and simplified configuration management to bring increased flexibility, transparency on the overall flow, and simplicity in adding new model.
- We need to differentiate:
- Vendors: the producer of models (like OpenAI, Azure, Anthropric, Ollama, ..etc) and their associated APIs
- Vendors: the producer of models (like OpenAI, Azure, Anthropic, Ollama, ..etc) and their associated APIs
- Models: the LLM models these vendors are making public
- Each vendor and operations allowed by the vendor needs to be encapsulated. This includes:
- The questions needed to setup the model (like the API key, or the URL)

View File

@@ -5,6 +5,7 @@ This document explains the complete workflow for managing pattern descriptions a
## System Overview
The pattern system follows this hierarchy:
1. `~/.config/fabric/patterns/` directory: The source of truth for available patterns
2. `pattern_extracts.json`: Contains first 500 words of each pattern for reference
3. `pattern_descriptions.json`: Stores pattern metadata (descriptions and tags)
@@ -13,17 +14,21 @@ The pattern system follows this hierarchy:
## Pattern Processing Workflow
### 1. Adding New Patterns
- Add patterns to `~/.config/fabric/patterns/`
- Run extract_patterns.py to process new additions:
```bash
python extract_patterns.py
The Python Script automatically:
- Creates pattern extracts for reference
- Adds placeholder entries in descriptions file
- Syncs to web interface
### 2. Pattern Extract Creation
The script extracts first 500 words from each pattern's system.md file to:
- Provide context for writing descriptions
@@ -31,8 +36,8 @@ The script extracts first 500 words from each pattern's system.md file to:
- Aid in pattern categorization
### 3. Description and Tag Management
Pattern descriptions and tags are managed in pattern_descriptions.json:
Pattern descriptions and tags are managed in pattern_descriptions.json:
{
"patterns": [
@@ -44,20 +49,21 @@ Pattern descriptions and tags are managed in pattern_descriptions.json:
]
}
## Completing Pattern Metadata
### Writing Descriptions
1. Check pattern_descriptions.json for "[Description pending]" entries
2. Reference pattern_extracts.json for context
3. How to update Pattern short descriptions (one sentence).
3. How to update Pattern short descriptions (one sentence).
You can update your descriptions in pattern_descriptions.json manually or using LLM assistance (prefered approach).
You can update your descriptions in pattern_descriptions.json manually or using LLM assistance (preferred approach).
Tell AI to look for "Description pending" entries in this file and write a short description based on the extract info in the pattern_extracts.json file. You can also ask your LLM to add tags for those newly added patterns, using other patterns tag assignments as example.
Tell AI to look for "Description pending" entries in this file and write a short description based on the extract info in the pattern_extracts.json file. You can also ask your LLM to add tags for those newly added patterns, using other patterns tag assignments as example.
### Managing Tags
1. Add appropriate tags to new patterns
2. Update existing tags as needed
3. Tags are stored as arrays: ["TAG1", "TAG2"]
@@ -67,6 +73,7 @@ Tell AI to look for "Description pending" entries in this file and write a short
## File Synchronization
The script maintains synchronization between:
- Local pattern_descriptions.json
- Web interface copy in static/data/
- No manual file copying needed
@@ -91,6 +98,7 @@ The script maintains synchronization between:
## Troubleshooting
If patterns are not showing in the web interface:
1. Verify pattern_descriptions.json format
2. Check web static copy exists
3. Ensure proper file permissions
@@ -108,17 +116,3 @@ fabric/
└── static/
└── data/
└── pattern_descriptions.json # Web interface copy

106
Pattern_Descriptions/extract_patterns.py Normal file → Executable file
View File

@@ -1,81 +1,96 @@
#!/usr/bin/env python3
"""Extracts pattern information from the ~/.config/fabric/patterns directory,
creates JSON files for pattern extracts and descriptions, and updates web static files.
"""
import os
import json
import shutil
def load_existing_file(filepath):
"""Load existing JSON file or return default structure"""
if os.path.exists(filepath):
with open(filepath, 'r', encoding='utf-8') as f:
return json.load(f)
try:
with open(filepath, "r", encoding="utf-8") as f:
return json.load(f)
except json.JSONDecodeError:
print(
f"Warning: Malformed JSON in {filepath}. Starting with an empty list."
)
return {"patterns": []}
return {"patterns": []}
def get_pattern_extract(pattern_path):
"""Extract first 500 words from pattern's system.md file"""
system_md_path = os.path.join(pattern_path, "system.md")
with open(system_md_path, 'r', encoding='utf-8') as f:
content = ' '.join(f.read().split()[:500])
with open(system_md_path, "r", encoding="utf-8") as f:
content = " ".join(f.read().split()[:500])
return content
def extract_pattern_info():
"""Extract pattern information from the patterns directory"""
script_dir = os.path.dirname(os.path.abspath(__file__))
patterns_dir = os.path.expanduser("~/.config/fabric/patterns")
print(f"\nScanning patterns directory: {patterns_dir}")
extracts_path = os.path.join(script_dir, "pattern_extracts.json")
descriptions_path = os.path.join(script_dir, "pattern_descriptions.json")
existing_extracts = load_existing_file(extracts_path)
existing_descriptions = load_existing_file(descriptions_path)
existing_extract_names = {p["patternName"] for p in existing_extracts["patterns"]}
existing_description_names = {p["patternName"] for p in existing_descriptions["patterns"]}
existing_description_names = {
p["patternName"] for p in existing_descriptions["patterns"]
}
print(f"Found existing patterns: {len(existing_extract_names)}")
new_extracts = []
new_descriptions = []
for dirname in sorted(os.listdir(patterns_dir)):
# Only log new pattern processing
if dirname not in existing_extract_names:
print(f"Processing new pattern: {dirname}")
pattern_path = os.path.join(patterns_dir, dirname)
system_md_path = os.path.join(pattern_path, "system.md")
print(f"Checking system.md at: {system_md_path}")
if os.path.isdir(pattern_path) and os.path.exists(system_md_path):
print(f"Valid pattern directory found: {dirname}")
if dirname not in existing_extract_names:
print(f"Processing new pattern: {dirname}")
try:
if dirname not in existing_extract_names:
print(f"Creating new extract for: {dirname}")
pattern_extract = get_pattern_extract(pattern_path) # Pass directory path
new_extracts.append({
"patternName": dirname,
"pattern_extract": pattern_extract
})
pattern_extract = get_pattern_extract(
pattern_path
) # Pass directory path
new_extracts.append(
{"patternName": dirname, "pattern_extract": pattern_extract}
)
if dirname not in existing_description_names:
print(f"Creating new description for: {dirname}")
new_descriptions.append({
"patternName": dirname,
"description": "[Description pending]",
"tags": []
})
except Exception as e:
new_descriptions.append(
{
"patternName": dirname,
"description": "[Description pending]",
"tags": [],
}
)
except OSError as e:
print(f"Error processing {dirname}: {str(e)}")
else:
print(f"Invalid pattern directory or missing system.md: {dirname}")
print(f"\nProcessing summary:")
print("\nProcessing summary:")
print(f"New extracts created: {len(new_extracts)}")
print(f"New descriptions added: {len(new_descriptions)}")
existing_extracts["patterns"].extend(new_extracts)
existing_descriptions["patterns"].extend(new_descriptions)
return existing_extracts, existing_descriptions, len(new_descriptions)
@@ -87,28 +102,29 @@ def update_web_static(descriptions_path):
static_path = os.path.join(static_dir, "pattern_descriptions.json")
shutil.copy2(descriptions_path, static_path)
def save_pattern_files():
"""Save both pattern files and sync to web"""
script_dir = os.path.dirname(os.path.abspath(__file__))
extracts_path = os.path.join(script_dir, "pattern_extracts.json")
descriptions_path = os.path.join(script_dir, "pattern_descriptions.json")
pattern_extracts, pattern_descriptions, new_count = extract_pattern_info()
# Save files
with open(extracts_path, 'w', encoding='utf-8') as f:
with open(extracts_path, "w", encoding="utf-8") as f:
json.dump(pattern_extracts, f, indent=2, ensure_ascii=False)
with open(descriptions_path, 'w', encoding='utf-8') as f:
with open(descriptions_path, "w", encoding="utf-8") as f:
json.dump(pattern_descriptions, f, indent=2, ensure_ascii=False)
# Update web static
update_web_static(descriptions_path)
print(f"\nProcessing complete:")
print("\nProcessing complete:")
print(f"Total patterns: {len(pattern_descriptions['patterns'])}")
print(f"New patterns added: {new_count}")
if __name__ == "__main__":
save_pattern_files()

View File

@@ -1634,8 +1634,8 @@
]
},
{
"patternName": "write_essay",
"description": "Create essays with thesis statements and arguments.",
"patternName": "write_essay_pg",
"description": "Create essays with thesis statements and arguments in the style of Paul Graham.",
"tags": [
"WRITING",
"RESEARCH",
@@ -1703,22 +1703,22 @@
{
"patternName": "analyze_bill",
"description": "Analyze a legislative bill and implications.",
"tags": [
"tags": [
"ANALYSIS",
"BILL"
]
},
{
"patternName": "analyze_bill_short",
"description": "Consended - Analyze a legislative bill and implications.",
"tags": [
"description": "Condensed - Analyze a legislative bill and implications.",
"tags": [
"ANALYSIS",
"BILL"
]
},
{
"patternName": "create_coding_feature",
"description": "[Description pending]",
"description": "Generate secure and composable code features using latest technology and best practices.",
"tags": [
"DEVELOPMENT"
]
@@ -1774,6 +1774,76 @@
"tags": [
"SUMMARIZE"
]
},
{
"patternName": "analyze_paper_simple",
"description": "Analyze research papers to determine primary findings and assess scientific rigor.",
"tags": [
"ANALYSIS",
"RESEARCH",
"WRITING"
]
},
{
"patternName": "analyze_terraform_plan",
"description": "Analyze Terraform plans for infrastructure changes, security risks, and cost implications.",
"tags": [
"ANALYSIS",
"DEVOPS"
]
},
{
"patternName": "create_mnemonic_phrases",
"description": "Create memorable mnemonic sentences using given words in exact order for memory aids.",
"tags": [
"CREATIVITY",
"LEARNING"
]
},
{
"patternName": "summarize_board_meeting",
"description": "Convert board meeting transcripts into formal meeting notes for corporate records.",
"tags": [
"ANALYSIS",
"BUSINESS"
]
},
{
"patternName": "write_essay",
"description": "Write essays on given topics in the distinctive style of specified authors.",
"tags": [
"WRITING",
"CREATIVITY"
]
},
{
"patternName": "extract_alpha",
"description": "Extracts the most novel and surprising ideas (\"alpha\") from content, inspired by information theory.",
"tags": [
"EXTRACT",
"ANALYSIS",
"CR THINKING",
"WISDOM"
]
},
{
"patternName": "extract_mcp_servers",
"description": "Analyzes content to identify and extract detailed information about Model Context Protocol (MCP) servers.",
"tags": [
"ANALYSIS",
"EXTRACT",
"DEVELOPMENT",
"AI"
]
},
{
"patternName": "review_code",
"description": "Performs a comprehensive code review, providing detailed feedback on correctness, security, and performance.",
"tags": [
"DEVELOPMENT",
"REVIEW",
"SECURITY"
]
}
]
}

File diff suppressed because one or more lines are too long

207
README.md
View File

@@ -12,16 +12,17 @@ Fabric is graciously supported by…
![GitHub top language](https://img.shields.io/github/languages/top/danielmiessler/fabric)
![GitHub last commit](https://img.shields.io/github/last-commit/danielmiessler/fabric)
[![License: MIT](https://img.shields.io/badge/License-MIT-green.svg)](https://opensource.org/licenses/MIT)
[![Ask DeepWiki](https://deepwiki.com/badge.svg)](https://deepwiki.com/danielmiessler/fabric)
<p class="align center">
<div align="center">
<h4><code>fabric</code> is an open-source framework for augmenting humans using AI.</h4>
</p>
</div>
[Updates](#updates) •
[What and Why](#what-and-why) •
[Philosophy](#philosophy) •
[Installation](#Installation) •
[Usage](#Usage) •
[Installation](#installation) •
[Usage](#usage) •
[Examples](#examples) •
[Just Use the Patterns](#just-use-the-patterns) •
[Custom Patterns](#custom-patterns) •
@@ -32,13 +33,36 @@ Fabric is graciously supported by…
</div>
## What and why
Since the start of modern AI in late 2022 we've seen an **_extraordinary_** number of AI applications for accomplishing tasks. There are thousands of websites, chat-bots, mobile apps, and other interfaces for using all the different AI out there.
It's all really exciting and powerful, but _it's not easy to integrate this functionality into our lives._
<div class="align center">
<h4>In other words, AI doesn't have a capabilities problem—it has an <em>integration</em> problem.</h4>
</div>
**Fabric was created to address this by creating and organizing the fundamental units of AI—the prompts themselves!**
Fabric organizes prompts by real-world task, allowing people to create, collect, and organize their most important AI solutions in a single place for use in their favorite tools. And if you're command-line focused, you can use Fabric itself as the interface!
## Intro videos
Keep in mind that many of these were recorded when Fabric was Python-based, so remember to use the current [install instructions](#installation) below.
- [Network Chuck](https://www.youtube.com/watch?v=UbDyjIIGaxQ)
- [David Bombal](https://www.youtube.com/watch?v=vF-MQmVxnCs)
- [My Own Intro to the Tool](https://www.youtube.com/watch?v=wPEyyigh10g)
- [More Fabric YouTube Videos](https://www.youtube.com/results?search_query=fabric+ai)
## Navigation
- [`fabric`](#fabric)
- [Navigation](#navigation)
- [Updates](#updates)
- [What and why](#what-and-why)
- [Intro videos](#intro-videos)
- [Navigation](#navigation)
- [Updates](#updates)
- [Philosophy](#philosophy)
- [Breaking problems into components](#breaking-problems-into-components)
- [Too many prompts](#too-many-prompts)
@@ -59,12 +83,19 @@ Fabric is graciously supported by…
- [Save your files in markdown using aliases](#save-your-files-in-markdown-using-aliases)
- [Migration](#migration)
- [Upgrading](#upgrading)
- [Shell Completions](#shell-completions)
- [Zsh Completion](#zsh-completion)
- [Bash Completion](#bash-completion)
- [Fish Completion](#fish-completion)
- [Usage](#usage)
- [Our approach to prompting](#our-approach-to-prompting)
- [Examples](#examples)
- [Just use the Patterns](#just-use-the-patterns)
- [Prompt Strategies](#prompt-strategies)
- [Custom Patterns](#custom-patterns)
- [Setting Up Custom Patterns](#setting-up-custom-patterns)
- [Using Custom Patterns](#using-custom-patterns)
- [How It Works](#how-it-works)
- [Helper Apps](#helper-apps)
- [`to_pdf`](#to_pdf)
- [`to_pdf` Installation](#to_pdf-installation)
@@ -83,28 +114,29 @@ Fabric is graciously supported by…
## Updates
> [!NOTE]
> April 16, 2025
>
> - Fabric now supports Grok (from XAI)! Update and use `-S` to select it as your default if you want, or just use the shortcut `-m grok-3-beta`. Enjoy!
## What and why
Since the start of 2023 and GenAI we've seen a massive number of AI applications for accomplishing tasks. It's powerful, but _it's not easy to integrate this functionality into our lives._
<div align="center">
<h4>In other words, AI doesn't have a capabilities problem—it has an <em>integration</em> problem.</h4>
</div>
Fabric was created to address this by enabling everyone to granularly apply AI to everyday challenges.
## Intro videos
Keep in mind that many of these were recorded when Fabric was Python-based, so remember to use the current [install instructions](#installation) below.
- [Network Chuck](https://www.youtube.com/watch?v=UbDyjIIGaxQ)
- [David Bombal](https://www.youtube.com/watch?v=vF-MQmVxnCs)
- [My Own Intro to the Tool](https://www.youtube.com/watch?v=wPEyyigh10g)
- [More Fabric YouTube Videos](https://www.youtube.com/results?search_query=fabric+ai)
> July 4, 2025
>
> - **Web Search**: Fabric now supports web search for Anthropic and OpenAI models using the `--search` and `--search-location` flags. This replaces the previous plugin-based search, so you may want to remove the old `ANTHROPIC_WEB_SEARCH_TOOL_*` variables from your `~/.config/fabric/.env` file.
> - **Image Generation**: Fabric now has powerful image generation capabilities with OpenAI.
> - Generate images from text prompts and save them using `--image-file`.
> - Edit existing images by providing an input image with `--attachment`.
> - Control image `size`, `quality`, `compression`, and `background` with the new `--image-*` flags.
>
>June 17, 2025
>
>- Fabric now supports Perplexity AI. Configure it by using `fabric -S` to add your Perplexity AI API Key,
> and then try:
>
> ```bash
> fabric -m sonar-pro "What is the latest world news?"
> ```
>
>June 11, 2025
>
>- Fabric's YouTube transcription now needs `yt-dlp` to be installed. Make sure to install the latest
> version (2025.06.09 as of this note). The YouTube API key is only needed for comments (the `--comments` flag)
> and metadata extraction (the `--metadata` flag).
## Philosophy
@@ -411,6 +443,48 @@ The great thing about Go is that it's super easy to upgrade. Just run the same c
go install github.com/danielmiessler/fabric@latest
```
### Shell Completions
Fabric provides shell completion scripts for Zsh, Bash, and Fish
shells, making it easier to use the CLI by providing tab completion
for commands and options.
#### Zsh Completion
To enable Zsh completion:
```bash
# Copy the completion file to a directory in your $fpath
mkdir -p ~/.zsh/completions
cp completions/_fabric ~/.zsh/completions/
# Add the directory to fpath in your .zshrc before compinit
echo 'fpath=(~/.zsh/completions $fpath)' >> ~/.zshrc
echo 'autoload -Uz compinit && compinit' >> ~/.zshrc
```
#### Bash Completion
To enable Bash completion:
```bash
# Source the completion script in your .bashrc
echo 'source /path/to/fabric/completions/fabric.bash' >> ~/.bashrc
# Or copy to the system-wide bash completion directory
sudo cp completions/fabric.bash /etc/bash_completion.d/
```
#### Fish Completion
To enable Fish completion:
```bash
# Copy the completion file to the fish completions directory
mkdir -p ~/.config/fish/completions
cp completions/fabric.fish ~/.config/fish/completions/
```
## Usage
Once you have it all set up, here's how to use it.
@@ -419,8 +493,7 @@ Once you have it all set up, here's how to use it.
fabric -h
```
```bash
```plaintext
Usage:
fabric [OPTIONS]
@@ -435,7 +508,9 @@ Application Options:
-T, --topp= Set top P (default: 0.9)
-s, --stream Stream
-P, --presencepenalty= Set presence penalty (default: 0.0)
-r, --raw Use the defaults of the model without sending chat options (like temperature etc.) and use the user role instead of the system role for patterns.
-r, --raw Use the defaults of the model without sending chat options (like
temperature etc.) and use the user role instead of the system role for
patterns.
-F, --frequencypenalty= Set frequency penalty (default: 0.0)
-l, --listpatterns List all patterns
-L, --listmodels List all available models
@@ -449,9 +524,12 @@ Application Options:
--output-session Output the entire session (also a temporary one) to the output file
-n, --latest= Number of latest patterns to list (default: 0)
-d, --changeDefaultModel Change default model
-y, --youtube= YouTube video or play list "URL" to grab transcript, comments from it and send to chat or print it put to the console and store it in the output file
-y, --youtube= YouTube video or play list "URL" to grab transcript, comments from it
and send to chat or print it put to the console and store it in the
output file
--playlist Prefer playlist over video if both ids are present in the URL
--transcript Grab transcript from YouTube video and send to chat (it is used per default).
--transcript Grab transcript from YouTube video and send to chat (it is used per
default).
--transcript-with-timestamps Grab transcript from YouTube video with timestamps and send to chat
--comments Grab comments from YouTube video and send to chat
--metadata Output video metadata
@@ -469,6 +547,7 @@ Application Options:
--serve Serve the Fabric Rest API
--serveOllama Serve the Fabric Rest API with ollama endpoints
--address= The address to bind the REST API (default: :8080)
--api-key= API key used to secure server routes
--config= Path to YAML config file
--version Print current version
--listextensions List all registered extensions
@@ -477,6 +556,15 @@ Application Options:
--strategy= Choose a strategy from the available strategies
--liststrategies List all strategies
--listvendors List all vendors
--shell-complete-list Output raw list without headers/formatting (for shell completion)
--search Enable web search tool for supported models (Anthropic, OpenAI)
--search-location= Set location for web search results (e.g., 'America/Los_Angeles')
--image-file= Save generated image to specified file path (e.g., 'output.png')
--image-size= Image dimensions: 1024x1024, 1536x1024, 1024x1536, auto (default: auto)
--image-quality= Image quality: low, medium, high, auto (default: auto)
--image-compression= Compression level 0-100 for JPEG/WebP formats (default: not set)
--image-background= Background type: opaque, transparent (default: opaque, only for
PNG/WebP)
Help Options:
-h, --help Show this help message
@@ -567,11 +655,48 @@ Use `fabric -S` and select the option to install the strategies in your `~/.conf
You may want to use Fabric to create your own custom Patterns—but not share them with others. No problem!
Just make a directory in `~/.config/custompatterns/` (or wherever) and put your `.md` files in there.
Fabric now supports a dedicated custom patterns directory that keeps your personal patterns separate from the built-in ones. This means your custom patterns won't be overwritten when you update Fabric's built-in patterns.
When you're ready to use them, copy them into `~/.config/fabric/patterns/`
### Setting Up Custom Patterns
You can then use them like any other Patterns, but they won't be public unless you explicitly submit them as Pull Requests to the Fabric project. So don't worry—they're private to you.
1. Run the Fabric setup:
```bash
fabric --setup
```
2. Select the "Custom Patterns" option from the Tools menu and enter your desired directory path (e.g., `~/my-custom-patterns`)
3. Fabric will automatically create the directory if it does not exist.
### Using Custom Patterns
1. Create your custom pattern directory structure:
```bash
mkdir -p ~/my-custom-patterns/my-analyzer
```
2. Create your pattern file
```bash
echo "You are an expert analyzer of ..." > ~/my-custom-patterns/my-analyzer/system.md
```
3. **Use your custom pattern:**
```bash
fabric --pattern my-analyzer "analyze this text"
```
### How It Works
- **Priority System**: Custom patterns take precedence over built-in patterns with the same name
- **Seamless Integration**: Custom patterns appear in `fabric --listpatterns` alongside built-in ones
- **Update Safe**: Your custom patterns are never affected by `fabric --updatepatterns`
- **Private by Default**: Custom patterns remain private unless you explicitly share them
Your custom patterns are completely private and won't be affected by Fabric updates!
## Helper Apps
@@ -695,7 +820,7 @@ The Streamlit UI supports clipboard operations across different platforms:
- **macOS**: Uses `pbcopy` and `pbpaste` (built-in)
- **Windows**: Uses `pyperclip` library (install with `pip install pyperclip`)
- **Linux**: Uses `xclip` (install with `sudo apt-get install xclip` or equivalent for your distro)
- **Linux**: Uses `xclip` (install with `sudo apt-get install xclip` or equivalent for your Linux distribution)
## Meta
@@ -713,15 +838,15 @@ The Streamlit UI supports clipboard operations across different platforms:
### Primary contributors
<a href="https://github.com/danielmiessler"><img src="https://avatars.githubusercontent.com/u/50654?v=4" title="Daniel Miessler" width="50" height="50"></a>
<a href="https://github.com/xssdoctor"><img src="https://avatars.githubusercontent.com/u/9218431?v=4" title="Jonathan Dunn" width="50" height="50"></a>
<a href="https://github.com/sbehrens"><img src="https://avatars.githubusercontent.com/u/688589?v=4" title="Scott Behrens" width="50" height="50"></a>
<a href="https://github.com/agu3rra"><img src="https://avatars.githubusercontent.com/u/10410523?v=4" title="Andre Guerra" width="50" height="50"></a>
<a href="https://github.com/danielmiessler"><img src="https://avatars.githubusercontent.com/u/50654?v=4" title="Daniel Miessler" width="50" height="50" alt="Daniel Miessler"></a>
<a href="https://github.com/xssdoctor"><img src="https://avatars.githubusercontent.com/u/9218431?v=4" title="Jonathan Dunn" width="50" height="50" alt="Jonathan Dunn"></a>
<a href="https://github.com/sbehrens"><img src="https://avatars.githubusercontent.com/u/688589?v=4" title="Scott Behrens" width="50" height="50" alt="Scott Behrens"></a>
<a href="https://github.com/agu3rra"><img src="https://avatars.githubusercontent.com/u/10410523?v=4" title="Andre Guerra" width="50" height="50" alt="Andre Guerra"></a>
### Contributors
<a href="https://github.com/danielmiessler/fabric/graphs/contributors">
<img src="https://contrib.rocks/image?repo=danielmiessler/fabric" />
<img src="https://contrib.rocks/image?repo=danielmiessler/fabric" alt="contrib.rocks" />
</a>
Made with [contrib.rocks](https://contrib.rocks).

View File

@@ -1,295 +0,0 @@
This Cummulative PR adds several Web UI and functionality improvements to make pattern selection more intuitive with the addition of pattern descriptions, ability to save favorite patterns, a Pattern TAG system, powerful multilingual capabilities, PDF-to-markdown functionnalities, a help reference section, more robust Youtube processing and a variety of other ui improvements.
## 🎥 Demo Video
https://youtu.be/XMzjgqvdltM
## 🌟 Key Features
### 1. Web UI and Pattern Selection Improvements
- Pattern Descriptions
- Pattern Tags
- Pattern Favourites
- Pattern Search bar
- PDF to markdown (pdf as pattern input)
- Better handling of Youtube url
- Multilingual Support
- Web UI refinements for clearer interaction
- Help section via modal
### 2. Multilingual Support System
- Seamless language switching via UI dropdown
- Persistent language state management
- Pattern processing now use the selected language seamlessly
### 3. YouTube Integration Enhancement
- Robust language handling for YouTube transcript processing
- Chunk-based language maintenance for long transcripts
- Consistent language output throughout transcript analysis
### 4. Enhanced Tag Management Integration
The tag filtering system has been deeply integrated into the Pattern Selection interface through several UI enhancements:
1. **Dual-Position Tag Panel**
- Sliding panel positioned to the right of pattern modal
- Dynamic toggle button that adapts position and text based on panel state
- Smooth transitions for opening/closing animations
2. **Tag Selection Visibility**
- New dedicated tag display section in pattern modal
- Visual separation through subtle background styling
- Immediate feedback showing selected tags with comma separation
- Inline reset capability for quick tag clearing
3. **Improved User Experience**
- Clear visual hierarchy between pattern list and tag filtering
- Multiple ways to manage tags (panel or quick reset)
- Consistent styling with existing design language
- Space-efficient tag brick layout in 3-column grid
4. **Technical Implementation**
- Reactive tag state management
- Efficient tag filtering logic
- Proper event dispatching between components
- Maintained accessibility standards
- Responsive design considerations
5. **PDF to Markdown conversion functionality for the web interface**
- Automatic detection and processing of PDF files in chat
- Conversion to markdown format for LLM processing
- Installation instructions from the pdf-to-markdown repository
The PDF conversion module has been integrated in the svelte web browser interface. Once installed, it will automatically detect pdf files in the chat interface and convert them to markdown
## HOW TO INSTALL PDF-TO-MARKDOWN
If you need to update the web component follow the instructions in "Web Interface MOD Readme Files/WEB V2 Install Guide.md".
Assuming your web install is up to date and web svelte config complete, you can simply follow these steps to add Pdf-to-mardown.
# FROM FABRIC ROOT DIRECTORY
cd .. web
# Install in this sequence:
# Step 1
npm install -D patch-package
# Step 2
npm install -D pdfjs-dist@2.5.207
# Step 3
npm install -D github:jzillmann/pdf-to-markdown#modularize
These enhancements create a more intuitive and efficient pattern discovery experience, allowing users to quickly filter and find relevant patterns while maintaining a clean, modern interface.
## 🛠 Technical Implementation
### Language Support Architecture
```typescript
// Language state management
export const languageStore = writable<string>('');
// Chat input language detection
if (qualifier === 'fr') {
languageStore.set('fr');
userInput = userInput.replace(/--fr\s*/, '');
}
// Service layer integration
const language = get(languageStore) || 'en';
const languageInstruction = language !== 'en'
? `. Please use the language '${language}' for the output.`
: '';
```
### YouTube Processing Enhancement
```typescript
// Process stream with language instruction per chunk
await chatService.processStream(
stream,
(content: string, response?: StreamResponse) => {
if (currentLanguage !== 'en') {
content = `${content}. Please use the language '${currentLanguage}' for the output.`;
}
// Update messages...
}
);
```
# Pattern Descriptions and Tags Management
This document explains the complete workflow for managing pattern descriptions and tags, including how to process new patterns and maintain metadata.
## System Overview
The pattern system follows this hierarchy:
1. `~/.config/fabric/patterns/` directory: The source of truth for available patterns
2. `pattern_extracts.json`: Contains first 500 words of each pattern for reference
3. `pattern_descriptions.json`: Stores pattern metadata (descriptions and tags)
4. `web/static/data/pattern_descriptions.json`: Web-accessible copy for the interface
## Pattern Processing Workflow
### 1. Adding New Patterns
- Add patterns to `~/.config/fabric/patterns/`
- Run extract_patterns.py to process new additions:
```bash
python extract_patterns.py
The Python Script automatically:
- Creates pattern extracts for reference
- Adds placeholder entries in descriptions file
- Syncs to web interface
### 2. Pattern Extract Creation
The script extracts first 500 words from each pattern's system.md file to:
- Provide context for writing descriptions
- Maintain reference material
- Aid in pattern categorization
### 3. Description and Tag Management
Pattern descriptions and tags are managed in pattern_descriptions.json:
{
"patterns": [
{
"patternName": "pattern_name",
"description": "[Description pending]",
"tags": []
}
]
}
## Completing Pattern Metadata
### Writing Descriptions
1. Check pattern_descriptions.json for "[Description pending]" entries
2. Reference pattern_extracts.json for context
3. How to update Pattern short descriptions (one sentence).
You can update your descriptions in pattern_descriptions.json manually or using LLM assistance (prefered approach).
Tell AI to look for "Description pending" entries in this file and write a short description based on the extract info in the pattern_extracts.json file. You can also ask your LLM to add tags for those newly added patterns, using other patterns tag assignments as example.
### Managing Tags
1. Add appropriate tags to new patterns
2. Update existing tags as needed
3. Tags are stored as arrays: ["TAG1", "TAG2"]
4. Edit pattern_descriptions.json directly to modify tags
5. Make tags your own. You can delete, replace, amend existing tags.
## File Synchronization
The script maintains synchronization between:
- Local pattern_descriptions.json
- Web interface copy in static/data/
- No manual file copying needed
## Best Practices
1. Run extract_patterns.py when:
- Adding new patterns
- Updating existing patterns
- Modifying pattern structure
2. Description Writing:
- Use pattern extracts for context
- Keep descriptions clear and concise
- Focus on pattern purpose and usage
3. Tag Management:
- Use consistent tag categories
- Apply multiple tags when relevant
- Update tags to reflect pattern evolution
## Troubleshooting
If patterns are not showing in the web interface:
1. Verify pattern_descriptions.json format
2. Check web static copy exists
3. Ensure proper file permissions
4. Run extract_patterns.py to resync
## File Structure
fabric/
├── patterns/ # Pattern source files
├── PATTERN_DESCRIPTIONS/
│ ├── extract_patterns.py # Pattern processing script
│ ├── pattern_extracts.json # Pattern content references
│ └── pattern_descriptions.json # Pattern metadata
└── web/
└── static/
└── data/
└── pattern_descriptions.json # Web interface copy
## 🎯 Usage Examples
### 1. Using Language Qualifiers
```
User: What is the weather?
AI: The weather information...
User: --fr What is the weather?
AI: Voici les informations météo...
```
### 2. Global Settings
1. Select language from dropdown
2. All interactions use selected language
3. Automatic reset to English after each message
### 3. YouTube Analysis
```
User: Analyze this YouTube video --fr
AI: [Provides analysis in French, maintaining language throughout the transcript]
```
## 💡 Key Benefits
1. **Enhanced User Experience**
- Intuitive language switching
- Consistent language handling
- Seamless integration with existing features
2. **Robust Implementation**
- Simple yet powerful design
- No complex language detection needed
- Direct AI instruction approach
3. **Maintainable Architecture**
- Clean separation of concerns
- Stateful language management
- Easy to extend for new languages
4. **YouTube Integration**
- Handles long transcripts effectively
- Maintains language consistency
- Robust chunk processing
## 🔄 Implementation Notes
1. **State Management**
- Language persists until changed
- Resets to English after each message
- Handles UI state updates efficiently
2. **Error Handling**
- Invalid qualifiers are ignored
- Unknown languages default to English
- Proper store reset on errors
3. **Best Practices**
- Clear language instructions
- Consistent state management
- Robust error handling

132
chat/chat.go Normal file
View File

@@ -0,0 +1,132 @@
package chat
import (
"encoding/json"
"errors"
)
const (
ChatMessageRoleSystem = "system"
ChatMessageRoleUser = "user"
ChatMessageRoleAssistant = "assistant"
ChatMessageRoleFunction = "function"
ChatMessageRoleTool = "tool"
ChatMessageRoleDeveloper = "developer"
)
var ErrContentFieldsMisused = errors.New("can't use both Content and MultiContent properties simultaneously")
type ChatMessagePartType string
const (
ChatMessagePartTypeText ChatMessagePartType = "text"
ChatMessagePartTypeImageURL ChatMessagePartType = "image_url"
)
type ChatMessageImageURL struct {
URL string `json:"url,omitempty"`
}
type ChatMessagePart struct {
Type ChatMessagePartType `json:"type,omitempty"`
Text string `json:"text,omitempty"`
ImageURL *ChatMessageImageURL `json:"image_url,omitempty"`
}
type FunctionCall struct {
Name string `json:"name,omitempty"`
Arguments string `json:"arguments,omitempty"`
}
type ToolType string
const (
ToolTypeFunction ToolType = "function"
)
type ToolCall struct {
Index *int `json:"index,omitempty"`
ID string `json:"id,omitempty"`
Type ToolType `json:"type"`
Function FunctionCall `json:"function"`
}
type ChatCompletionMessage struct {
Role string `json:"role"`
Content string `json:"content,omitempty"`
Refusal string `json:"refusal,omitempty"`
MultiContent []ChatMessagePart `json:"-"`
Name string `json:"name,omitempty"`
ReasoningContent string `json:"reasoning_content,omitempty"`
FunctionCall *FunctionCall `json:"function_call,omitempty"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
}
func (m ChatCompletionMessage) MarshalJSON() ([]byte, error) {
if m.Content != "" && m.MultiContent != nil {
return nil, ErrContentFieldsMisused
}
if len(m.MultiContent) > 0 {
msg := struct {
Role string `json:"role"`
Content string `json:"-"`
Refusal string `json:"refusal,omitempty"`
MultiContent []ChatMessagePart `json:"content,omitempty"`
Name string `json:"name,omitempty"`
ReasoningContent string `json:"reasoning_content,omitempty"`
FunctionCall *FunctionCall `json:"function_call,omitempty"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
}(m)
return json.Marshal(msg)
}
msg := struct {
Role string `json:"role"`
Content string `json:"content,omitempty"`
Refusal string `json:"refusal,omitempty"`
MultiContent []ChatMessagePart `json:"-"`
Name string `json:"name,omitempty"`
ReasoningContent string `json:"reasoning_content,omitempty"`
FunctionCall *FunctionCall `json:"function_call,omitempty"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
}(m)
return json.Marshal(msg)
}
func (m *ChatCompletionMessage) UnmarshalJSON(bs []byte) error {
msg := struct {
Role string `json:"role"`
Content string `json:"content"`
Refusal string `json:"refusal,omitempty"`
MultiContent []ChatMessagePart
Name string `json:"name,omitempty"`
ReasoningContent string `json:"reasoning_content,omitempty"`
FunctionCall *FunctionCall `json:"function_call,omitempty"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
}{}
if err := json.Unmarshal(bs, &msg); err == nil {
*m = ChatCompletionMessage(msg)
return nil
}
multiMsg := struct {
Role string `json:"role"`
Content string
Refusal string `json:"refusal,omitempty"`
MultiContent []ChatMessagePart `json:"content"`
Name string `json:"name,omitempty"`
ReasoningContent string `json:"reasoning_content,omitempty"`
FunctionCall *FunctionCall `json:"function_call,omitempty"`
ToolCalls []ToolCall `json:"tool_calls,omitempty"`
ToolCallID string `json:"tool_call_id,omitempty"`
}{}
if err := json.Unmarshal(bs, &multiMsg); err != nil {
return err
}
*m = ChatCompletionMessage(multiMsg)
return nil
}

View File

@@ -93,7 +93,7 @@ func Cli(version string) (err error) {
}
if currentFlags.ListPatterns {
err = fabricDb.Patterns.ListNames()
err = fabricDb.Patterns.ListNames(currentFlags.ShellCompleteOutput)
return
}
@@ -102,17 +102,17 @@ func Cli(version string) (err error) {
if models, err = registry.VendorManager.GetModels(); err != nil {
return
}
models.Print()
models.Print(currentFlags.ShellCompleteOutput)
return
}
if currentFlags.ListAllContexts {
err = fabricDb.Contexts.ListNames()
err = fabricDb.Contexts.ListNames(currentFlags.ShellCompleteOutput)
return
}
if currentFlags.ListAllSessions {
err = fabricDb.Sessions.ListNames()
err = fabricDb.Sessions.ListNames(currentFlags.ShellCompleteOutput)
return
}
@@ -160,7 +160,7 @@ func Cli(version string) (err error) {
}
if currentFlags.ListStrategies {
err = registry.Strategies.ListStrategies()
err = registry.Strategies.ListStrategies(currentFlags.ShellCompleteOutput)
return
}
@@ -256,7 +256,8 @@ func Cli(version string) (err error) {
}
var chatter *core.Chatter
if chatter, err = registry.GetChatter(currentFlags.Model, currentFlags.ModelContextLength, currentFlags.Strategy, currentFlags.Stream, currentFlags.DryRun); err != nil {
if chatter, err = registry.GetChatter(currentFlags.Model, currentFlags.ModelContextLength,
currentFlags.Strategy, currentFlags.Stream, currentFlags.DryRun); err != nil {
return
}
@@ -269,7 +270,11 @@ func Cli(version string) (err error) {
if chatReq.Language == "" {
chatReq.Language = registry.Language.DefaultLanguage.Value
}
if session, err = chatter.Send(chatReq, currentFlags.BuildChatOptions()); err != nil {
var chatOptions *common.ChatOptions
if chatOptions, err = currentFlags.BuildChatOptions(); err != nil {
return
}
if session, err = chatter.Send(chatReq, chatOptions); err != nil {
return
}

View File

@@ -6,15 +6,16 @@ import (
"fmt"
"io"
"os"
"path/filepath"
"reflect"
"strconv"
"strings"
"github.com/danielmiessler/fabric/chat"
"github.com/danielmiessler/fabric/common"
"github.com/jessevdk/go-flags"
goopenai "github.com/sashabaranov/go-openai"
"golang.org/x/text/language"
"gopkg.in/yaml.v2"
"gopkg.in/yaml.v3"
)
// Flags create flags struct. the users flags go into this, this will be passed to the chat struct in cli
@@ -73,6 +74,14 @@ type Flags struct {
Strategy string `long:"strategy" description:"Choose a strategy from the available strategies" default:""`
ListStrategies bool `long:"liststrategies" description:"List all strategies"`
ListVendors bool `long:"listvendors" description:"List all vendors"`
ShellCompleteOutput bool `long:"shell-complete-list" description:"Output raw list without headers/formatting (for shell completion)"`
Search bool `long:"search" description:"Enable web search tool for supported models (Anthropic, OpenAI)"`
SearchLocation string `long:"search-location" description:"Set location for web search results (e.g., 'America/Los_Angeles')"`
ImageFile string `long:"image-file" description:"Save generated image to specified file path (e.g., 'output.png')"`
ImageSize string `long:"image-size" description:"Image dimensions: 1024x1024, 1536x1024, 1024x1536, auto (default: auto)"`
ImageQuality string `long:"image-quality" description:"Image quality: low, medium, high, auto (default: auto)"`
ImageCompression int `long:"image-compression" description:"Compression level 0-100 for JPEG/WebP formats (default: not set)"`
ImageBackground string `long:"image-background" description:"Background type: opaque, transparent (default: opaque, only for PNG/WebP)"`
}
var debug = false
@@ -253,8 +262,121 @@ func readStdin() (ret string, err error) {
return
}
func (o *Flags) BuildChatOptions() (ret *common.ChatOptions) {
// validateImageFile validates the image file path and extension
func validateImageFile(imagePath string) error {
if imagePath == "" {
return nil // No validation needed if no image file specified
}
// Check if file already exists
if _, err := os.Stat(imagePath); err == nil {
return fmt.Errorf("image file already exists: %s", imagePath)
}
// Check file extension
ext := strings.ToLower(filepath.Ext(imagePath))
validExtensions := []string{".png", ".jpeg", ".jpg", ".webp"}
for _, validExt := range validExtensions {
if ext == validExt {
return nil // Valid extension found
}
}
return fmt.Errorf("invalid image file extension '%s'. Supported formats: .png, .jpeg, .jpg, .webp", ext)
}
// validateImageParameters validates image generation parameters
func validateImageParameters(imagePath, size, quality, background string, compression int) error {
if imagePath == "" {
// Check if any image parameters are specified without --image-file
if size != "" || quality != "" || background != "" || compression != 0 {
return fmt.Errorf("image parameters (--image-size, --image-quality, --image-background, --image-compression) can only be used with --image-file")
}
return nil
}
// Validate size
if size != "" {
validSizes := []string{"1024x1024", "1536x1024", "1024x1536", "auto"}
valid := false
for _, validSize := range validSizes {
if size == validSize {
valid = true
break
}
}
if !valid {
return fmt.Errorf("invalid image size '%s'. Supported sizes: 1024x1024, 1536x1024, 1024x1536, auto", size)
}
}
// Validate quality
if quality != "" {
validQualities := []string{"low", "medium", "high", "auto"}
valid := false
for _, validQuality := range validQualities {
if quality == validQuality {
valid = true
break
}
}
if !valid {
return fmt.Errorf("invalid image quality '%s'. Supported qualities: low, medium, high, auto", quality)
}
}
// Validate background
if background != "" {
validBackgrounds := []string{"opaque", "transparent"}
valid := false
for _, validBackground := range validBackgrounds {
if background == validBackground {
valid = true
break
}
}
if !valid {
return fmt.Errorf("invalid image background '%s'. Supported backgrounds: opaque, transparent", background)
}
}
// Get file format for format-specific validations
ext := strings.ToLower(filepath.Ext(imagePath))
// Validate compression (only for jpeg/webp)
if compression != 0 { // 0 means not set
if ext != ".jpg" && ext != ".jpeg" && ext != ".webp" {
return fmt.Errorf("image compression can only be used with JPEG and WebP formats, not %s", ext)
}
if compression < 0 || compression > 100 {
return fmt.Errorf("image compression must be between 0 and 100, got %d", compression)
}
}
// Validate background transparency (only for png/webp)
if background == "transparent" {
if ext != ".png" && ext != ".webp" {
return fmt.Errorf("transparent background can only be used with PNG and WebP formats, not %s", ext)
}
}
return nil
}
func (o *Flags) BuildChatOptions() (ret *common.ChatOptions, err error) {
// Validate image file if specified
if err = validateImageFile(o.ImageFile); err != nil {
return nil, err
}
// Validate image parameters
if err = validateImageParameters(o.ImageFile, o.ImageSize, o.ImageQuality, o.ImageBackground, o.ImageCompression); err != nil {
return nil, err
}
ret = &common.ChatOptions{
Model: o.Model,
Temperature: o.Temperature,
TopP: o.TopP,
PresencePenalty: o.PresencePenalty,
@@ -262,6 +384,13 @@ func (o *Flags) BuildChatOptions() (ret *common.ChatOptions) {
Raw: o.Raw,
Seed: o.Seed,
ModelContextLength: o.ModelContextLength,
Search: o.Search,
SearchLocation: o.SearchLocation,
ImageFile: o.ImageFile,
ImageSize: o.ImageSize,
ImageQuality: o.ImageQuality,
ImageCompression: o.ImageCompression,
ImageBackground: o.ImageBackground,
}
return
}
@@ -277,22 +406,15 @@ func (o *Flags) BuildChatRequest(Meta string) (ret *common.ChatRequest, err erro
Meta: Meta,
}
var message *goopenai.ChatCompletionMessage
if len(o.Attachments) == 0 {
if o.Message != "" {
message = &goopenai.ChatCompletionMessage{
Role: goopenai.ChatMessageRoleUser,
Content: strings.TrimSpace(o.Message),
}
}
} else {
message = &goopenai.ChatCompletionMessage{
Role: goopenai.ChatMessageRoleUser,
var message *chat.ChatCompletionMessage
if len(o.Attachments) > 0 {
message = &chat.ChatCompletionMessage{
Role: chat.ChatMessageRoleUser,
}
if o.Message != "" {
message.MultiContent = append(message.MultiContent, goopenai.ChatMessagePart{
Type: goopenai.ChatMessagePartTypeText,
message.MultiContent = append(message.MultiContent, chat.ChatMessagePart{
Type: chat.ChatMessagePartTypeText,
Text: strings.TrimSpace(o.Message),
})
}
@@ -315,14 +437,20 @@ func (o *Flags) BuildChatRequest(Meta string) (ret *common.ChatRequest, err erro
dataURL := fmt.Sprintf("data:%s;base64,%s", mimeType, base64Image)
url = &dataURL
}
message.MultiContent = append(message.MultiContent, goopenai.ChatMessagePart{
Type: goopenai.ChatMessagePartTypeImageURL,
ImageURL: &goopenai.ChatMessageImageURL{
message.MultiContent = append(message.MultiContent, chat.ChatMessagePart{
Type: chat.ChatMessagePartTypeImageURL,
ImageURL: &chat.ChatMessageImageURL{
URL: *url,
},
})
}
} else if o.Message != "" {
message = &chat.ChatCompletionMessage{
Role: chat.ChatMessageRoleUser,
Content: strings.TrimSpace(o.Message),
}
}
ret.Message = message
if o.Language != "" {

View File

@@ -4,6 +4,7 @@ import (
"bytes"
"io"
"os"
"path/filepath"
"strings"
"testing"
@@ -64,7 +65,8 @@ func TestBuildChatOptions(t *testing.T) {
Raw: false,
Seed: 1,
}
options := flags.BuildChatOptions()
options, err := flags.BuildChatOptions()
assert.NoError(t, err)
assert.Equal(t, expectedOptions, options)
}
@@ -84,7 +86,8 @@ func TestBuildChatOptionsDefaultSeed(t *testing.T) {
Raw: false,
Seed: 0,
}
options := flags.BuildChatOptions()
options, err := flags.BuildChatOptions()
assert.NoError(t, err)
assert.Equal(t, expectedOptions, options)
}
@@ -164,3 +167,269 @@ model: 123 # should be string
assert.Error(t, err)
})
}
func TestValidateImageFile(t *testing.T) {
t.Run("Empty path should be valid", func(t *testing.T) {
err := validateImageFile("")
assert.NoError(t, err)
})
t.Run("Valid extensions should pass", func(t *testing.T) {
validExtensions := []string{".png", ".jpeg", ".jpg", ".webp"}
for _, ext := range validExtensions {
filename := "/tmp/test" + ext
err := validateImageFile(filename)
assert.NoError(t, err, "Extension %s should be valid", ext)
}
})
t.Run("Invalid extensions should fail", func(t *testing.T) {
invalidExtensions := []string{".gif", ".bmp", ".tiff", ".svg", ".txt", ""}
for _, ext := range invalidExtensions {
filename := "/tmp/test" + ext
err := validateImageFile(filename)
assert.Error(t, err, "Extension %s should be invalid", ext)
assert.Contains(t, err.Error(), "invalid image file extension")
}
})
t.Run("Existing file should fail", func(t *testing.T) {
// Create a temporary file
tempFile, err := os.CreateTemp("", "test*.png")
assert.NoError(t, err)
defer os.Remove(tempFile.Name())
tempFile.Close()
// Validation should fail because file exists
err = validateImageFile(tempFile.Name())
assert.Error(t, err)
assert.Contains(t, err.Error(), "image file already exists")
})
t.Run("Non-existing file with valid extension should pass", func(t *testing.T) {
nonExistentFile := filepath.Join(os.TempDir(), "non_existent_file.png")
// Make sure the file doesn't exist
os.Remove(nonExistentFile)
err := validateImageFile(nonExistentFile)
assert.NoError(t, err)
})
}
func TestBuildChatOptionsWithImageFileValidation(t *testing.T) {
t.Run("Valid image file should pass", func(t *testing.T) {
flags := &Flags{
ImageFile: "/tmp/output.png",
}
options, err := flags.BuildChatOptions()
assert.NoError(t, err)
assert.Equal(t, "/tmp/output.png", options.ImageFile)
})
t.Run("Invalid extension should fail", func(t *testing.T) {
flags := &Flags{
ImageFile: "/tmp/output.gif",
}
options, err := flags.BuildChatOptions()
assert.Error(t, err)
assert.Nil(t, options)
assert.Contains(t, err.Error(), "invalid image file extension")
})
t.Run("Existing file should fail", func(t *testing.T) {
// Create a temporary file
tempFile, err := os.CreateTemp("", "existing*.png")
assert.NoError(t, err)
defer os.Remove(tempFile.Name())
tempFile.Close()
flags := &Flags{
ImageFile: tempFile.Name(),
}
options, err := flags.BuildChatOptions()
assert.Error(t, err)
assert.Nil(t, options)
assert.Contains(t, err.Error(), "image file already exists")
})
}
func TestValidateImageParameters(t *testing.T) {
t.Run("No image file and no parameters should pass", func(t *testing.T) {
err := validateImageParameters("", "", "", "", 0)
assert.NoError(t, err)
})
t.Run("Image parameters without image file should fail", func(t *testing.T) {
// Test each parameter individually
err := validateImageParameters("", "1024x1024", "", "", 0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "image parameters")
assert.Contains(t, err.Error(), "can only be used with --image-file")
err = validateImageParameters("", "", "high", "", 0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "image parameters")
err = validateImageParameters("", "", "", "transparent", 0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "image parameters")
err = validateImageParameters("", "", "", "", 50)
assert.Error(t, err)
assert.Contains(t, err.Error(), "image parameters")
// Test multiple parameters
err = validateImageParameters("", "1024x1024", "high", "transparent", 50)
assert.Error(t, err)
assert.Contains(t, err.Error(), "image parameters")
})
t.Run("Valid size values should pass", func(t *testing.T) {
validSizes := []string{"1024x1024", "1536x1024", "1024x1536", "auto"}
for _, size := range validSizes {
err := validateImageParameters("/tmp/test.png", size, "", "", 0)
assert.NoError(t, err, "Size %s should be valid", size)
}
})
t.Run("Invalid size should fail", func(t *testing.T) {
err := validateImageParameters("/tmp/test.png", "invalid", "", "", 0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid image size")
})
t.Run("Valid quality values should pass", func(t *testing.T) {
validQualities := []string{"low", "medium", "high", "auto"}
for _, quality := range validQualities {
err := validateImageParameters("/tmp/test.png", "", quality, "", 0)
assert.NoError(t, err, "Quality %s should be valid", quality)
}
})
t.Run("Invalid quality should fail", func(t *testing.T) {
err := validateImageParameters("/tmp/test.png", "", "invalid", "", 0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid image quality")
})
t.Run("Valid background values should pass", func(t *testing.T) {
validBackgrounds := []string{"opaque", "transparent"}
for _, background := range validBackgrounds {
err := validateImageParameters("/tmp/test.png", "", "", background, 0)
assert.NoError(t, err, "Background %s should be valid", background)
}
})
t.Run("Invalid background should fail", func(t *testing.T) {
err := validateImageParameters("/tmp/test.png", "", "", "invalid", 0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "invalid image background")
})
t.Run("Compression for JPEG should pass", func(t *testing.T) {
err := validateImageParameters("/tmp/test.jpg", "", "", "", 75)
assert.NoError(t, err)
})
t.Run("Compression for WebP should pass", func(t *testing.T) {
err := validateImageParameters("/tmp/test.webp", "", "", "", 50)
assert.NoError(t, err)
})
t.Run("Compression for PNG should fail", func(t *testing.T) {
err := validateImageParameters("/tmp/test.png", "", "", "", 75)
assert.Error(t, err)
assert.Contains(t, err.Error(), "image compression can only be used with JPEG and WebP formats")
})
t.Run("Invalid compression range should fail", func(t *testing.T) {
err := validateImageParameters("/tmp/test.jpg", "", "", "", 150)
assert.Error(t, err)
assert.Contains(t, err.Error(), "image compression must be between 0 and 100")
err = validateImageParameters("/tmp/test.jpg", "", "", "", -10)
assert.Error(t, err)
assert.Contains(t, err.Error(), "image compression must be between 0 and 100")
})
t.Run("Transparent background for PNG should pass", func(t *testing.T) {
err := validateImageParameters("/tmp/test.png", "", "", "transparent", 0)
assert.NoError(t, err)
})
t.Run("Transparent background for WebP should pass", func(t *testing.T) {
err := validateImageParameters("/tmp/test.webp", "", "", "transparent", 0)
assert.NoError(t, err)
})
t.Run("Transparent background for JPEG should fail", func(t *testing.T) {
err := validateImageParameters("/tmp/test.jpg", "", "", "transparent", 0)
assert.Error(t, err)
assert.Contains(t, err.Error(), "transparent background can only be used with PNG and WebP formats")
})
}
func TestBuildChatOptionsWithImageParameters(t *testing.T) {
t.Run("Valid image parameters should pass", func(t *testing.T) {
flags := &Flags{
ImageFile: "/tmp/test.png",
ImageSize: "1024x1024",
ImageQuality: "high",
ImageBackground: "transparent",
ImageCompression: 0, // Not set for PNG
}
options, err := flags.BuildChatOptions()
assert.NoError(t, err)
assert.NotNil(t, options)
assert.Equal(t, "/tmp/test.png", options.ImageFile)
assert.Equal(t, "1024x1024", options.ImageSize)
assert.Equal(t, "high", options.ImageQuality)
assert.Equal(t, "transparent", options.ImageBackground)
assert.Equal(t, 0, options.ImageCompression)
})
t.Run("Invalid image parameters should fail", func(t *testing.T) {
flags := &Flags{
ImageFile: "/tmp/test.png",
ImageSize: "invalid",
ImageQuality: "high",
ImageBackground: "transparent",
}
options, err := flags.BuildChatOptions()
assert.Error(t, err)
assert.Nil(t, options)
assert.Contains(t, err.Error(), "invalid image size")
})
t.Run("JPEG with compression should pass", func(t *testing.T) {
flags := &Flags{
ImageFile: "/tmp/test.jpg",
ImageSize: "1536x1024",
ImageQuality: "medium",
ImageBackground: "opaque",
ImageCompression: 80,
}
options, err := flags.BuildChatOptions()
assert.NoError(t, err)
assert.NotNil(t, options)
assert.Equal(t, 80, options.ImageCompression)
})
t.Run("Image parameters without image file should fail in BuildChatOptions", func(t *testing.T) {
flags := &Flags{
ImageSize: "1024x1024", // Image parameter without ImageFile
}
options, err := flags.BuildChatOptions()
assert.Error(t, err)
assert.Nil(t, options)
assert.Contains(t, err.Error(), "image parameters")
assert.Contains(t, err.Error(), "can only be used with --image-file")
})
}

View File

@@ -1,6 +1,6 @@
package common
import goopenai "github.com/sashabaranov/go-openai"
import "github.com/danielmiessler/fabric/chat"
const ChatMessageRoleMeta = "meta"
@@ -9,7 +9,7 @@ type ChatRequest struct {
SessionName string
PatternName string
PatternVariables map[string]string
Message *goopenai.ChatCompletionMessage
Message *chat.ChatCompletionMessage
Language string
Meta string
InputHasVars bool
@@ -25,10 +25,18 @@ type ChatOptions struct {
Raw bool
Seed int
ModelContextLength int
MaxTokens int
Search bool
SearchLocation string
ImageFile string
ImageSize string
ImageQuality string
ImageCompression int
ImageBackground string
}
// NormalizeMessages remove empty messages and ensure messages order user-assist-user
func NormalizeMessages(msgs []*goopenai.ChatCompletionMessage, defaultUserMessage string) (ret []*goopenai.ChatCompletionMessage) {
func NormalizeMessages(msgs []*chat.ChatCompletionMessage, defaultUserMessage string) (ret []*chat.ChatCompletionMessage) {
// Iterate over messages to enforce the odd position rule for user messages
fullMessageIndex := 0
for _, message := range msgs {
@@ -38,8 +46,8 @@ func NormalizeMessages(msgs []*goopenai.ChatCompletionMessage, defaultUserMessag
}
// Ensure, that each odd position shall be a user message
if fullMessageIndex%2 == 0 && message.Role != goopenai.ChatMessageRoleUser {
ret = append(ret, &goopenai.ChatCompletionMessage{Role: goopenai.ChatMessageRoleUser, Content: defaultUserMessage})
if fullMessageIndex%2 == 0 && message.Role != chat.ChatMessageRoleUser {
ret = append(ret, &chat.ChatCompletionMessage{Role: chat.ChatMessageRoleUser, Content: defaultUserMessage})
fullMessageIndex++
}
ret = append(ret, message)

View File

@@ -3,23 +3,23 @@ package common
import (
"testing"
goopenai "github.com/sashabaranov/go-openai"
"github.com/danielmiessler/fabric/chat"
"github.com/stretchr/testify/assert"
)
func TestNormalizeMessages(t *testing.T) {
msgs := []*goopenai.ChatCompletionMessage{
{Role: goopenai.ChatMessageRoleUser, Content: "Hello"},
{Role: goopenai.ChatMessageRoleAssistant, Content: "Hi there!"},
{Role: goopenai.ChatMessageRoleUser, Content: ""},
{Role: goopenai.ChatMessageRoleUser, Content: ""},
{Role: goopenai.ChatMessageRoleUser, Content: "How are you?"},
msgs := []*chat.ChatCompletionMessage{
{Role: chat.ChatMessageRoleUser, Content: "Hello"},
{Role: chat.ChatMessageRoleAssistant, Content: "Hi there!"},
{Role: chat.ChatMessageRoleUser, Content: ""},
{Role: chat.ChatMessageRoleUser, Content: ""},
{Role: chat.ChatMessageRoleUser, Content: "How are you?"},
}
expected := []*goopenai.ChatCompletionMessage{
{Role: goopenai.ChatMessageRoleUser, Content: "Hello"},
{Role: goopenai.ChatMessageRoleAssistant, Content: "Hi there!"},
{Role: goopenai.ChatMessageRoleUser, Content: "How are you?"},
expected := []*chat.ChatCompletionMessage{
{Role: chat.ChatMessageRoleUser, Content: "Hello"},
{Role: chat.ChatMessageRoleAssistant, Content: "Hi there!"},
{Role: chat.ChatMessageRoleUser, Content: "How are you?"},
}
actual := NormalizeMessages(msgs, "default")

View File

@@ -42,13 +42,43 @@ func (o *GroupsItemsSelector[I]) AddGroupItems(group string, items ...I) {
o.GroupsItems = append(o.GroupsItems, &GroupItems[I]{group, items})
}
// getSortedGroupsItems returns a new slice of GroupItems with both groups and their items
// sorted alphabetically in a case-insensitive manner. The original GroupsItems are not modified.
func (o *GroupsItemsSelector[I]) getSortedGroupsItems() []*GroupItems[I] {
// Copy and sort groups (caseinsensitive)
sortedGroupsItems := make([]*GroupItems[I], len(o.GroupsItems))
copy(sortedGroupsItems, o.GroupsItems)
sort.SliceStable(sortedGroupsItems, func(i, j int) bool {
return strings.ToLower(sortedGroupsItems[i].Group) < strings.ToLower(sortedGroupsItems[j].Group)
})
// For each group, sort its items
for i, groupItems := range sortedGroupsItems {
sortedItems := make([]I, len(groupItems.Items))
copy(sortedItems, groupItems.Items)
sort.SliceStable(sortedItems, func(i, j int) bool {
return strings.ToLower(o.GetItemKey(sortedItems[i])) < strings.ToLower(o.GetItemKey(sortedItems[j]))
})
// Create a new GroupItems with the sorted items
sortedGroupsItems[i] = &GroupItems[I]{
Group: groupItems.Group,
Items: sortedItems,
}
}
return sortedGroupsItems
}
func (o *GroupsItemsSelector[I]) GetGroupAndItemByItemNumber(number int) (group string, item I, err error) {
var currentItemNumber int
found := false
for _, groupItems := range o.GroupsItems {
if currentItemNumber+groupItems.Count() < number {
currentItemNumber += groupItems.Count()
sortedGroupsItems := o.getSortedGroupsItems()
for _, groupItems := range sortedGroupsItems {
if currentItemNumber+len(groupItems.Items) < number {
currentItemNumber += len(groupItems.Items)
continue
}
@@ -61,6 +91,10 @@ func (o *GroupsItemsSelector[I]) GetGroupAndItemByItemNumber(number int) (group
break
}
}
if found {
break
}
}
if !found {
@@ -69,35 +103,30 @@ func (o *GroupsItemsSelector[I]) GetGroupAndItemByItemNumber(number int) (group
return
}
func (o *GroupsItemsSelector[I]) Print() {
fmt.Printf("\n%v:\n", o.SelectionLabel)
func (o *GroupsItemsSelector[I]) Print(shellCompleteList bool) {
// Only print the section header if not in plain output mode
if !shellCompleteList {
fmt.Printf("\n%v:\n", o.SelectionLabel)
}
var currentItemIndex int
// Create a copy of groups to sort
sortedGroupsItems := make([]*GroupItems[I], len(o.GroupsItems))
copy(sortedGroupsItems, o.GroupsItems)
// Sort groups alphabetically case-insensitive
sort.SliceStable(sortedGroupsItems, func(i, j int) bool {
return strings.ToLower(sortedGroupsItems[i].Group) < strings.ToLower(sortedGroupsItems[j].Group)
})
sortedGroupsItems := o.getSortedGroupsItems()
for _, groupItems := range sortedGroupsItems {
fmt.Println()
fmt.Printf("%s\n", groupItems.Group)
fmt.Println()
if !shellCompleteList {
fmt.Println()
fmt.Printf("%s\n\n", groupItems.Group)
}
// Create a copy of items to sort
sortedItems := make([]I, len(groupItems.Items))
copy(sortedItems, groupItems.Items)
// Sort items alphabetically case-insensitive
sort.SliceStable(sortedItems, func(i, j int) bool {
return strings.ToLower(o.GetItemKey(sortedItems[i])) < strings.ToLower(o.GetItemKey(sortedItems[j]))
})
for _, item := range sortedItems {
for _, item := range groupItems.Items {
currentItemIndex++
fmt.Printf("\t[%d]\t%s\n", currentItemIndex, o.GetItemKey(item))
if shellCompleteList {
// plain mode: "index key"
fmt.Printf("%s\n", o.GetItemKey(item))
} else {
// formatted mode: "[index] key"
fmt.Printf("\t[%d]\t%s\n", currentItemIndex, o.GetItemKey(item))
}
}
}
}

124
common/oauth_storage.go Normal file
View File

@@ -0,0 +1,124 @@
package common
import (
"encoding/json"
"fmt"
"os"
"path/filepath"
"time"
)
// OAuthToken represents stored OAuth token information
type OAuthToken struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresAt int64 `json:"expires_at"`
TokenType string `json:"token_type"`
Scope string `json:"scope"`
}
// IsExpired checks if the token is expired or will expire within the buffer time
func (t *OAuthToken) IsExpired(bufferMinutes int) bool {
if t.ExpiresAt == 0 {
return true
}
bufferTime := time.Duration(bufferMinutes) * time.Minute
return time.Now().Add(bufferTime).Unix() >= t.ExpiresAt
}
// OAuthStorage handles persistent storage of OAuth tokens
type OAuthStorage struct {
configDir string
}
// NewOAuthStorage creates a new OAuth storage instance
func NewOAuthStorage() (*OAuthStorage, error) {
homeDir, err := os.UserHomeDir()
if err != nil {
return nil, fmt.Errorf("failed to get user home directory: %w", err)
}
configDir := filepath.Join(homeDir, ".config", "fabric")
// Ensure config directory exists
if err := os.MkdirAll(configDir, 0755); err != nil {
return nil, fmt.Errorf("failed to create config directory: %w", err)
}
return &OAuthStorage{configDir: configDir}, nil
}
// GetTokenPath returns the file path for a provider's OAuth token
func (s *OAuthStorage) GetTokenPath(provider string) string {
return filepath.Join(s.configDir, fmt.Sprintf(".%s_oauth", provider))
}
// SaveToken saves an OAuth token to disk with proper permissions
func (s *OAuthStorage) SaveToken(provider string, token *OAuthToken) error {
tokenPath := s.GetTokenPath(provider)
// Marshal token to JSON
data, err := json.MarshalIndent(token, "", " ")
if err != nil {
return fmt.Errorf("failed to marshal token: %w", err)
}
// Write to temporary file first for atomic operation
tempPath := tokenPath + ".tmp"
if err := os.WriteFile(tempPath, data, 0600); err != nil {
return fmt.Errorf("failed to write token file: %w", err)
}
// Atomic rename
if err := os.Rename(tempPath, tokenPath); err != nil {
os.Remove(tempPath) // Clean up temp file
return fmt.Errorf("failed to save token file: %w", err)
}
return nil
}
// LoadToken loads an OAuth token from disk
func (s *OAuthStorage) LoadToken(provider string) (*OAuthToken, error) {
tokenPath := s.GetTokenPath(provider)
// Check if file exists
if _, err := os.Stat(tokenPath); os.IsNotExist(err) {
return nil, nil // No token stored
}
// Read token file
data, err := os.ReadFile(tokenPath)
if err != nil {
return nil, fmt.Errorf("failed to read token file: %w", err)
}
// Unmarshal token
var token OAuthToken
if err := json.Unmarshal(data, &token); err != nil {
return nil, fmt.Errorf("failed to parse token file: %w", err)
}
return &token, nil
}
// DeleteToken removes a stored OAuth token
func (s *OAuthStorage) DeleteToken(provider string) error {
tokenPath := s.GetTokenPath(provider)
if err := os.Remove(tokenPath); err != nil && !os.IsNotExist(err) {
return fmt.Errorf("failed to delete token file: %w", err)
}
return nil
}
// HasValidToken checks if a valid (non-expired) token exists for a provider
func (s *OAuthStorage) HasValidToken(provider string, bufferMinutes int) bool {
token, err := s.LoadToken(provider)
if err != nil || token == nil {
return false
}
return !token.IsExpired(bufferMinutes)
}

View File

@@ -0,0 +1,232 @@
package common
import (
"os"
"path/filepath"
"testing"
"time"
)
func TestOAuthToken_IsExpired(t *testing.T) {
tests := []struct {
name string
expiresAt int64
bufferMinutes int
expected bool
}{
{
name: "token not expired",
expiresAt: time.Now().Unix() + 3600, // 1 hour from now
bufferMinutes: 5,
expected: false,
},
{
name: "token expired",
expiresAt: time.Now().Unix() - 3600, // 1 hour ago
bufferMinutes: 5,
expected: true,
},
{
name: "token expires within buffer",
expiresAt: time.Now().Unix() + 120, // 2 minutes from now
bufferMinutes: 5,
expected: true,
},
{
name: "zero expiry time",
expiresAt: 0,
bufferMinutes: 5,
expected: true,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
token := &OAuthToken{ExpiresAt: tt.expiresAt}
if got := token.IsExpired(tt.bufferMinutes); got != tt.expected {
t.Errorf("IsExpired() = %v, want %v", got, tt.expected)
}
})
}
}
func TestOAuthStorage_SaveAndLoadToken(t *testing.T) {
// Create temporary directory for testing
tempDir, err := os.MkdirTemp("", "fabric_oauth_test")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tempDir)
// Create storage with custom config dir
storage := &OAuthStorage{configDir: tempDir}
// Test token
token := &OAuthToken{
AccessToken: "test_access_token",
RefreshToken: "test_refresh_token",
ExpiresAt: time.Now().Unix() + 3600,
TokenType: "Bearer",
Scope: "test_scope",
}
// Test saving token
err = storage.SaveToken("test_provider", token)
if err != nil {
t.Fatalf("Failed to save token: %v", err)
}
// Verify file exists and has correct permissions
tokenPath := storage.GetTokenPath("test_provider")
info, err := os.Stat(tokenPath)
if err != nil {
t.Fatalf("Token file not created: %v", err)
}
if info.Mode().Perm() != 0600 {
t.Errorf("Token file has wrong permissions: %v, want 0600", info.Mode().Perm())
}
// Test loading token
loadedToken, err := storage.LoadToken("test_provider")
if err != nil {
t.Fatalf("Failed to load token: %v", err)
}
if loadedToken == nil {
t.Fatal("Loaded token is nil")
}
// Verify token data
if loadedToken.AccessToken != token.AccessToken {
t.Errorf("AccessToken mismatch: got %v, want %v", loadedToken.AccessToken, token.AccessToken)
}
if loadedToken.RefreshToken != token.RefreshToken {
t.Errorf("RefreshToken mismatch: got %v, want %v", loadedToken.RefreshToken, token.RefreshToken)
}
if loadedToken.ExpiresAt != token.ExpiresAt {
t.Errorf("ExpiresAt mismatch: got %v, want %v", loadedToken.ExpiresAt, token.ExpiresAt)
}
}
func TestOAuthStorage_LoadNonExistentToken(t *testing.T) {
// Create temporary directory for testing
tempDir, err := os.MkdirTemp("", "fabric_oauth_test")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tempDir)
storage := &OAuthStorage{configDir: tempDir}
// Try to load non-existent token
token, err := storage.LoadToken("nonexistent")
if err != nil {
t.Fatalf("Unexpected error loading non-existent token: %v", err)
}
if token != nil {
t.Error("Expected nil token for non-existent provider")
}
}
func TestOAuthStorage_DeleteToken(t *testing.T) {
// Create temporary directory for testing
tempDir, err := os.MkdirTemp("", "fabric_oauth_test")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tempDir)
storage := &OAuthStorage{configDir: tempDir}
// Create and save a token
token := &OAuthToken{
AccessToken: "test_token",
RefreshToken: "test_refresh",
ExpiresAt: time.Now().Unix() + 3600,
}
err = storage.SaveToken("test_provider", token)
if err != nil {
t.Fatalf("Failed to save token: %v", err)
}
// Verify token exists
tokenPath := storage.GetTokenPath("test_provider")
if _, err := os.Stat(tokenPath); os.IsNotExist(err) {
t.Fatal("Token file should exist before deletion")
}
// Delete token
err = storage.DeleteToken("test_provider")
if err != nil {
t.Fatalf("Failed to delete token: %v", err)
}
// Verify token is deleted
if _, err := os.Stat(tokenPath); !os.IsNotExist(err) {
t.Error("Token file should not exist after deletion")
}
// Test deleting non-existent token (should not error)
err = storage.DeleteToken("nonexistent")
if err != nil {
t.Errorf("Deleting non-existent token should not error: %v", err)
}
}
func TestOAuthStorage_HasValidToken(t *testing.T) {
// Create temporary directory for testing
tempDir, err := os.MkdirTemp("", "fabric_oauth_test")
if err != nil {
t.Fatalf("Failed to create temp dir: %v", err)
}
defer os.RemoveAll(tempDir)
storage := &OAuthStorage{configDir: tempDir}
// Test with no token
if storage.HasValidToken("test_provider", 5) {
t.Error("Should return false when no token exists")
}
// Save valid token
validToken := &OAuthToken{
AccessToken: "valid_token",
RefreshToken: "refresh_token",
ExpiresAt: time.Now().Unix() + 3600, // 1 hour from now
}
err = storage.SaveToken("test_provider", validToken)
if err != nil {
t.Fatalf("Failed to save valid token: %v", err)
}
// Test with valid token
if !storage.HasValidToken("test_provider", 5) {
t.Error("Should return true for valid token")
}
// Save expired token
expiredToken := &OAuthToken{
AccessToken: "expired_token",
RefreshToken: "refresh_token",
ExpiresAt: time.Now().Unix() - 3600, // 1 hour ago
}
err = storage.SaveToken("expired_provider", expiredToken)
if err != nil {
t.Fatalf("Failed to save expired token: %v", err)
}
// Test with expired token
if storage.HasValidToken("expired_provider", 5) {
t.Error("Should return false for expired token")
}
}
func TestOAuthStorage_GetTokenPath(t *testing.T) {
storage := &OAuthStorage{configDir: "/test/config"}
expected := filepath.Join("/test/config", ".test_provider_oauth")
actual := storage.GetTokenPath("test_provider")
if actual != expected {
t.Errorf("GetTokenPath() = %v, want %v", actual, expected)
}
}

118
completions/_fabric Normal file
View File

@@ -0,0 +1,118 @@
#compdef fabric
# Zsh completion for fabric CLI
# Place this file in a directory in your $fpath (e.g. /usr/local/share/zsh/site-functions)
_fabric_patterns() {
local -a patterns
patterns=(${(f)"$(fabric --listpatterns --shell-complete-list 2>/dev/null)"})
compadd -X "Patterns:" ${patterns}
}
_fabric_models() {
local -a models
models=(${(f)"$(fabric --listmodels --shell-complete-list 2>/dev/null)"})
compadd -X "Models:" ${models}
}
_fabric_contexts() {
local -a contexts
contexts=(${(f)"$(fabric --listcontexts --shell-complete-list 2>/dev/null)"})
compadd -X "Contexts:" ${contexts}
}
_fabric_sessions() {
local -a sessions
sessions=(${(f)"$(fabric --listsessions --shell-complete-list 2>/dev/null)"})
compadd -X "Sessions:" ${sessions}
}
_fabric_strategies() {
local -a strategies
strategies=(${(f)"$(fabric --liststrategies --shell-complete-list 2>/dev/null)"})
compadd -X "Strategies:" ${strategies}
}
_fabric_extensions() {
local -a extensions
extensions=(${(f)"$(fabric --listextensions --shell-complete-list 2>/dev/null)"})
compadd -X "Extensions:" ${extensions}
'(-L --listmodels)'{-L,--listmodels}'[List all available models]:list models:_fabric_models' \
'(-x --listcontexts)'{-x,--listcontexts}'[List all contexts]:list contexts:_fabric_contexts' \
'(-X --listsessions)'{-X,--listsessions}'[List all sessions]:list sessions:_fabric_sessions' \
'(--listextensions)--listextensions[List all registered extensions]' \
'(--liststrategies)--liststrategies[List all strategies]:list strategies:_fabric_strategies' \
'(--listvendors)--listvendors[List all vendors]' \
vendors=(${(f)"$(fabric --listvendors 2>/dev/null)"})
compadd -X "Vendors:" ${vendors}
}
_fabric() {
local curcontext="$curcontext" state line
typeset -A opt_args
_arguments -C \
'(-p --pattern)'{-p,--pattern}'[Choose a pattern from the available patterns]:pattern:_fabric_patterns' \
'(-v --variable)'{-v,--variable}'[Values for pattern variables, e.g. -v=#role:expert -v=#points:30]:variable:' \
'(-C --context)'{-C,--context}'[Choose a context from the available contexts]:context:_fabric_contexts' \
'(--session)--session[Choose a session from the available sessions]:session:_fabric_sessions' \
'(-a --attachment)'{-a,--attachment}'[Attachment path or URL (e.g. for OpenAI image recognition messages)]:file:_files' \
'(-S --setup)'{-S,--setup}'[Run setup for all reconfigurable parts of fabric]' \
'(-t --temperature)'{-t,--temperature}'[Set temperature (default: 0.7)]:temperature:' \
'(-T --topp)'{-T,--topp}'[Set top P (default: 0.9)]:topp:' \
'(-s --stream)'{-s,--stream}'[Stream]' \
'(-P --presencepenalty)'{-P,--presencepenalty}'[Set presence penalty (default: 0.0)]:presence penalty:' \
'(-r --raw)'{-r,--raw}'[Use the defaults of the model without sending chat options]' \
'(-F --frequencypenalty)'{-F,--frequencypenalty}'[Set frequency penalty (default: 0.0)]:frequency penalty:' \
'(-l --listpatterns)'{-l,--listpatterns}'[List all patterns]' \
'(-L --listmodels)'{-L,--listmodels}'[List all available models]' \
'(-x --listcontexts)'{-x,--listcontexts}'[List all contexts]' \
'(-X --listsessions)'{-X,--listsessions}'[List all sessions]' \
'(-U --updatepatterns)'{-U,--updatepatterns}'[Update patterns]' \
'(-c --copy)'{-c,--copy}'[Copy to clipboard]' \
'(-m --model)'{-m,--model}'[Choose model]:model:_fabric_models' \
'(--modelContextLength)--modelContextLength[Model context length (only affects ollama)]:length:' \
'(-o --output)'{-o,--output}'[Output to file]:file:_files' \
'(--output-session)--output-session[Output the entire session to the output file]' \
'(-n --latest)'{-n,--latest}'[Number of latest patterns to list (default: 0)]:number:' \
'(-d --changeDefaultModel)'{-d,--changeDefaultModel}'[Change default model]' \
'(-y --youtube)'{-y,--youtube}'[YouTube video or play list URL]:youtube url:' \
'(--playlist)--playlist[Prefer playlist over video if both ids are present in the URL]' \
'(--transcript)--transcript[Grab transcript from YouTube video and send to chat]' \
'(--transcript-with-timestamps)--transcript-with-timestamps[Grab transcript from YouTube video with timestamps]' \
'(--comments)--comments[Grab comments from YouTube video and send to chat]' \
'(--metadata)--metadata[Output video metadata]' \
'(-g --language)'{-g,--language}'[Specify the Language Code for the chat, e.g. -g=en -g=zh]:language:' \
'(-u --scrape_url)'{-u,--scrape_url}'[Scrape website URL to markdown using Jina AI]:url:' \
'(-q --scrape_question)'{-q,--scrape_question}'[Search question using Jina AI]:question:' \
'(-e --seed)'{-e,--seed}'[Seed to be used for LMM generation]:seed:' \
'(-w --wipecontext)'{-w,--wipecontext}'[Wipe context]:context:_fabric_contexts' \
'(-W --wipesession)'{-W,--wipesession}'[Wipe session]:session:_fabric_sessions' \
'(--printcontext)--printcontext[Print context]:context:_fabric_contexts' \
'(--printsession)--printsession[Print session]:session:_fabric_sessions' \
'(--readability)--readability[Convert HTML input into a clean, readable view]' \
'(--input-has-vars)--input-has-vars[Apply variables to user input]' \
'(--dry-run)--dry-run[Show what would be sent to the model without actually sending it]' \
'(--serve)--serve[Serve the Fabric Rest API]' \
'(--serveOllama)--serveOllama[Serve the Fabric Rest API with ollama endpoints]' \
'(--address)--address[The address to bind the REST API (default: :8080)]:address:' \
'(--api-key)--api-key[API key used to secure server routes]:api-key:' \
'(--config)--config[Path to YAML config file]:config file:_files -g "*.yaml *.yml"' \
'(--version)--version[Print current version]' \
'(--search)--search[Enable web search tool for supported models (Anthropic, OpenAI)]' \
'(--search-location)--search-location[Set location for web search results]:location:' \
'(--image-file)--image-file[Save generated image to specified file path]:image file:_files -g "*.png *.webp *.jpeg *.jpg"' \
'(--image-size)--image-size[Image dimensions]:size:(1024x1024 1536x1024 1024x1536 auto)' \
'(--image-quality)--image-quality[Image quality]:quality:(low medium high auto)' \
'(--image-compression)--image-compression[Compression level 0-100 for JPEG/WebP formats]:compression:' \
'(--image-background)--image-background[Background type]:background:(opaque transparent)' \
'(--listextensions)--listextensions[List all registered extensions]' \
'(--addextension)--addextension[Register a new extension from config file path]:config file:_files -g "*.yaml *.yml"' \
'(--rmextension)--rmextension[Remove a registered extension by name]:extension:_fabric_extensions' \
'(--strategy)--strategy[Choose a strategy from the available strategies]:strategy:_fabric_strategies' \
'(--liststrategies)--liststrategies[List all strategies]' \
'(--listvendors)--listvendors[List all vendors]' \
'(--shell-complete-list)--shell-complete-list[Output raw list without headers/formatting (for shell completion)]' \
'(-h --help)'{-h,--help}'[Show this help message]' \
'*:arguments:'
}
_fabric "$@"

103
completions/fabric.bash Normal file
View File

@@ -0,0 +1,103 @@
# Bash completion for fabric CLI
#
# Installation:
# 1. Place this file in a standard completion directory, e.g.,
# - /etc/bash_completion.d/
# - /usr/local/etc/bash_completion.d/
# - ~/.local/share/bash-completion/completions/
# 2. Or, source it directly in your ~/.bashrc or ~/.bash_profile:
# source /path/to/fabric.bash
_fabric() {
local cur prev words cword
_get_comp_words_by_ref -n : cur prev words cword
# Define all possible options/flags
local opts="--pattern -p --variable -v --context -C --session --attachment -a --setup -S --temperature -t --topp -T --stream -s --presencepenalty -P --raw -r --frequencypenalty -F --listpatterns -l --listmodels -L --listcontexts -x --listsessions -X --updatepatterns -U --copy -c --model -m --modelContextLength --output -o --output-session --latest -n --changeDefaultModel -d --youtube -y --playlist --transcript --transcript-with-timestamps --comments --metadata --language -g --scrape_url -u --scrape_question -q --seed -e --wipecontext -w --wipesession -W --printcontext --printsession --readability --input-has-vars --dry-run --serve --serveOllama --address --api-key --config --search --search-location --image-file --image-size --image-quality --image-compression --image-background --version --listextensions --addextension --rmextension --strategy --liststrategies --listvendors --shell-complete-list --help -h"
# Helper function for dynamic completions
_fabric_get_list() {
fabric "$1" --shell-complete-list 2>/dev/null
}
# Handle completions based on the previous word
case "${prev}" in
-p | --pattern)
COMPREPLY=($(compgen -W "$(_fabric_get_list --listpatterns)" -- "${cur}"))
return 0
;;
-C | --context)
COMPREPLY=($(compgen -W "$(_fabric_get_list --listcontexts)" -- "${cur}"))
return 0
;;
--session)
COMPREPLY=($(compgen -W "$(_fabric_get_list --listsessions)" -- "${cur}"))
return 0
;;
-m | --model)
COMPREPLY=($(compgen -W "$(_fabric_get_list --listmodels)" -- "${cur}"))
return 0
;;
-w | --wipecontext)
COMPREPLY=($(compgen -W "$(_fabric_get_list --listcontexts)" -- "${cur}"))
return 0
;;
-W | --wipesession)
COMPREPLY=($(compgen -W "$(_fabric_get_list --listsessions)" -- "${cur}"))
return 0
;;
--printcontext)
COMPREPLY=($(compgen -W "$(_fabric_get_list --listcontexts)" -- "${cur}"))
return 0
;;
--printsession)
COMPREPLY=($(compgen -W "$(_fabric_get_list --listsessions)" -- "${cur}"))
return 0
;;
--rmextension)
COMPREPLY=($(compgen -W "$(_fabric_get_list --listextensions)" -- "${cur}"))
return 0
;;
--strategy)
COMPREPLY=($(compgen -W "$(_fabric_get_list --liststrategies)" -- "${cur}"))
return 0
;;
# Options requiring file/directory paths
-a | --attachment | -o | --output | --config | --addextension | --image-file)
_filedir
return 0
;;
# Image generation options with specific values
--image-size)
COMPREPLY=($(compgen -W "1024x1024 1536x1024 1024x1536 auto" -- "$cur"))
return 0
;;
--image-quality)
COMPREPLY=($(compgen -W "low medium high auto" -- "$cur"))
return 0
;;
--image-background)
COMPREPLY=($(compgen -W "opaque transparent" -- "$cur"))
return 0
;;
# Options requiring simple arguments (no specific completion logic here)
-v | --variable | -t | --temperature | -T | --topp | -P | --presencepenalty | -F | --frequencypenalty | --modelContextLength | -n | --latest | -y | --youtube | -g | --language | -u | --scrape_url | -q | --scrape_question | -e | --seed | --address | --api-key | --search-location | --image-compression)
# No specific completion suggestions, user types the value
return 0
;;
esac
# If the current word starts with '-', suggest options
if [[ "${cur}" == -* ]]; then
COMPREPLY=($(compgen -W "${opts}" -- "${cur}"))
return 0
fi
# Default: complete files/directories if no other rule matches
# _filedir
# Or provide no completions if it's not an option or argument following a known flag
COMPREPLY=()
}
complete -F _fabric fabric

101
completions/fabric.fish Executable file
View File

@@ -0,0 +1,101 @@
# Fish shell completion for fabric CLI
#
# Installation:
# Copy this file to ~/.config/fish/completions/fabric.fish
# or run:
# mkdir -p ~/.config/fish/completions
# cp completions/fabric.fish ~/.config/fish/completions/
# Helper functions for dynamic completions
function __fabric_get_patterns
fabric --listpatterns --shell-complete-list 2>/dev/null
end
function __fabric_get_models
fabric --listmodels --shell-complete-list 2>/dev/null
end
function __fabric_get_contexts
fabric --listcontexts --shell-complete-list 2>/dev/null
end
function __fabric_get_sessions
fabric --listsessions --shell-complete-list 2>/dev/null
end
function __fabric_get_strategies
fabric --liststrategies --shell-complete-list 2>/dev/null
end
function __fabric_get_extensions
fabric --listextensions --shell-complete-list 2>/dev/null
end
# Main completion function
complete -c fabric -f
# Flag completions with arguments
complete -c fabric -s p -l pattern -d "Choose a pattern from the available patterns" -a "(__fabric_get_patterns)"
complete -c fabric -s v -l variable -d "Values for pattern variables, e.g. -v=#role:expert -v=#points:30"
complete -c fabric -s C -l context -d "Choose a context from the available contexts" -a "(__fabric_get_contexts)"
complete -c fabric -l session -d "Choose a session from the available sessions" -a "(__fabric_get_sessions)"
complete -c fabric -s a -l attachment -d "Attachment path or URL (e.g. for OpenAI image recognition messages)" -r
complete -c fabric -s t -l temperature -d "Set temperature (default: 0.7)"
complete -c fabric -s T -l topp -d "Set top P (default: 0.9)"
complete -c fabric -s P -l presencepenalty -d "Set presence penalty (default: 0.0)"
complete -c fabric -s F -l frequencypenalty -d "Set frequency penalty (default: 0.0)"
complete -c fabric -s m -l model -d "Choose model" -a "(__fabric_get_models)"
complete -c fabric -l modelContextLength -d "Model context length (only affects ollama)"
complete -c fabric -s o -l output -d "Output to file" -r
complete -c fabric -s n -l latest -d "Number of latest patterns to list (default: 0)"
complete -c fabric -s y -l youtube -d "YouTube video or play list URL to grab transcript, comments from it"
complete -c fabric -s g -l language -d "Specify the Language Code for the chat, e.g. -g=en -g=zh"
complete -c fabric -s u -l scrape_url -d "Scrape website URL to markdown using Jina AI"
complete -c fabric -s q -l scrape_question -d "Search question using Jina AI"
complete -c fabric -s e -l seed -d "Seed to be used for LMM generation"
complete -c fabric -s w -l wipecontext -d "Wipe context" -a "(__fabric_get_contexts)"
complete -c fabric -s W -l wipesession -d "Wipe session" -a "(__fabric_get_sessions)"
complete -c fabric -l printcontext -d "Print context" -a "(__fabric_get_contexts)"
complete -c fabric -l printsession -d "Print session" -a "(__fabric_get_sessions)"
complete -c fabric -l address -d "The address to bind the REST API (default: :8080)"
complete -c fabric -l api-key -d "API key used to secure server routes"
complete -c fabric -l config -d "Path to YAML config file" -r -a "*.yaml *.yml"
complete -c fabric -l search-location -d "Set location for web search results (e.g., 'America/Los_Angeles')"
complete -c fabric -l image-file -d "Save generated image to specified file path (e.g., 'output.png')" -r -a "*.png *.webp *.jpeg *.jpg"
complete -c fabric -l image-size -d "Image dimensions: 1024x1024, 1536x1024, 1024x1536, auto (default: auto)" -a "1024x1024 1536x1024 1024x1536 auto"
complete -c fabric -l image-quality -d "Image quality: low, medium, high, auto (default: auto)" -a "low medium high auto"
complete -c fabric -l image-compression -d "Compression level 0-100 for JPEG/WebP formats (default: not set)" -r
complete -c fabric -l image-background -d "Background type: opaque, transparent (default: opaque, only for PNG/WebP)" -a "opaque transparent"
complete -c fabric -l addextension -d "Register a new extension from config file path" -r -a "*.yaml *.yml"
complete -c fabric -l rmextension -d "Remove a registered extension by name" -a "(__fabric_get_extensions)"
complete -c fabric -l strategy -d "Choose a strategy from the available strategies" -a "(__fabric_get_strategies)"
# Boolean flags (no arguments)
complete -c fabric -s S -l setup -d "Run setup for all reconfigurable parts of fabric"
complete -c fabric -s s -l stream -d "Stream"
complete -c fabric -s r -l raw -d "Use the defaults of the model without sending chat options"
complete -c fabric -s l -l listpatterns -d "List all patterns"
complete -c fabric -s L -l listmodels -d "List all available models"
complete -c fabric -s x -l listcontexts -d "List all contexts"
complete -c fabric -s X -l listsessions -d "List all sessions"
complete -c fabric -s U -l updatepatterns -d "Update patterns"
complete -c fabric -s c -l copy -d "Copy to clipboard"
complete -c fabric -l output-session -d "Output the entire session to the output file"
complete -c fabric -s d -l changeDefaultModel -d "Change default model"
complete -c fabric -l playlist -d "Prefer playlist over video if both ids are present in the URL"
complete -c fabric -l transcript -d "Grab transcript from YouTube video and send to chat"
complete -c fabric -l transcript-with-timestamps -d "Grab transcript from YouTube video with timestamps"
complete -c fabric -l comments -d "Grab comments from YouTube video and send to chat"
complete -c fabric -l metadata -d "Output video metadata"
complete -c fabric -l readability -d "Convert HTML input into a clean, readable view"
complete -c fabric -l input-has-vars -d "Apply variables to user input"
complete -c fabric -l dry-run -d "Show what would be sent to the model without actually sending it"
complete -c fabric -l search -d "Enable web search tool for supported models (Anthropic, OpenAI)"
complete -c fabric -l serve -d "Serve the Fabric Rest API"
complete -c fabric -l serveOllama -d "Serve the Fabric Rest API with ollama endpoints"
complete -c fabric -l version -d "Print current version"
complete -c fabric -l listextensions -d "List all registered extensions"
complete -c fabric -l liststrategies -d "List all strategies"
complete -c fabric -l listvendors -d "List all vendors"
complete -c fabric -l shell-complete-list -d "Output raw list without headers/formatting (for shell completion)"
complete -c fabric -s h -l help -d "Show this help message"

View File

@@ -7,7 +7,7 @@ import (
"os"
"strings"
goopenai "github.com/sashabaranov/go-openai"
"github.com/danielmiessler/fabric/chat"
"github.com/danielmiessler/fabric/common"
"github.com/danielmiessler/fabric/plugins/ai"
@@ -30,8 +30,15 @@ type Chatter struct {
strategy string
}
// Send processes a chat request and applies any file changes if using the create_coding_feature pattern
// Send processes a chat request and applies file changes for create_coding_feature pattern
func (o *Chatter) Send(request *common.ChatRequest, opts *common.ChatOptions) (session *fsdb.Session, err error) {
modelToUse := opts.Model
if modelToUse == "" {
modelToUse = o.model
}
if o.vendor.NeedsRawMode(modelToUse) {
opts.Raw = true
}
if session, err = o.BuildSession(request, opts.Raw); err != nil {
return
}
@@ -82,18 +89,15 @@ func (o *Chatter) Send(request *common.ChatRequest, opts *common.ChatOptions) (s
return
}
// Process file changes if using the create_coding_feature pattern
// Process file changes for create_coding_feature pattern
if request.PatternName == "create_coding_feature" {
// Look for file changes in the response
summary, fileChanges, parseErr := common.ParseFileChanges(message)
if parseErr != nil {
fmt.Printf("Warning: Failed to parse file changes: %v\n", parseErr)
} else if len(fileChanges) > 0 {
// Get the project root - use the current directory
projectRoot, err := os.Getwd()
if err != nil {
fmt.Printf("Warning: Failed to get current directory: %v\n", err)
// Continue without applying changes
} else {
if applyErr := common.ApplyFileChanges(projectRoot, fileChanges); applyErr != nil {
fmt.Printf("Warning: Failed to apply file changes: %v\n", applyErr)
@@ -106,7 +110,7 @@ func (o *Chatter) Send(request *common.ChatRequest, opts *common.ChatOptions) (s
message = summary
}
session.Append(&goopenai.ChatCompletionMessage{Role: goopenai.ChatMessageRoleAssistant, Content: message})
session.Append(&chat.ChatCompletionMessage{Role: chat.ChatMessageRoleAssistant, Content: message})
if session.Name != "" {
err = o.db.Sessions.SaveSession(session)
@@ -115,7 +119,6 @@ func (o *Chatter) Send(request *common.ChatRequest, opts *common.ChatOptions) (s
}
func (o *Chatter) BuildSession(request *common.ChatRequest, raw bool) (session *fsdb.Session, err error) {
// If a session name is provided, retrieve it from the database
if request.SessionName != "" {
var sess *fsdb.Session
if sess, err = o.db.Sessions.Get(request.SessionName); err != nil {
@@ -128,7 +131,7 @@ func (o *Chatter) BuildSession(request *common.ChatRequest, raw bool) (session *
}
if request.Meta != "" {
session.Append(&goopenai.ChatCompletionMessage{Role: common.ChatMessageRoleMeta, Content: request.Meta})
session.Append(&chat.ChatCompletionMessage{Role: common.ChatMessageRoleMeta, Content: request.Meta})
}
// if a context name is provided, retrieve it from the database
@@ -142,12 +145,12 @@ func (o *Chatter) BuildSession(request *common.ChatRequest, raw bool) (session *
contextContent = ctx.Content
}
// Process any template variables in the message content (user input)
// Process template variables in message content
// Double curly braces {{variable}} indicate template substitution
// Ensure we have a message before processing, other wise we'll get an error when we pass to pattern.go
// Ensure we have a message before processing
if request.Message == nil {
request.Message = &goopenai.ChatCompletionMessage{
Role: goopenai.ChatMessageRoleUser,
request.Message = &chat.ChatCompletionMessage{
Role: chat.ChatMessageRoleUser,
Content: " ",
}
}
@@ -161,19 +164,19 @@ func (o *Chatter) BuildSession(request *common.ChatRequest, raw bool) (session *
}
var patternContent string
inputUsed := false
if request.PatternName != "" {
pattern, err := o.db.Patterns.GetApplyVariables(request.PatternName, request.PatternVariables, request.Message.Content)
// pattern will now contain user input, and all variables will be resolved, or errored
if err != nil {
return nil, fmt.Errorf("could not get pattern %s: %v", request.PatternName, err)
}
patternContent = pattern.Pattern
inputUsed = true
}
systemMessage := strings.TrimSpace(contextContent) + strings.TrimSpace(patternContent)
// Apply strategy if specified
if request.StrategyName != "" {
strategy, err := strategy.LoadStrategy(request.StrategyName)
if err != nil {
@@ -192,24 +195,53 @@ func (o *Chatter) BuildSession(request *common.ChatRequest, raw bool) (session *
}
if raw {
var finalContent string
if systemMessage != "" {
if request.PatternName != "" {
finalContent = systemMessage
} else {
finalContent = fmt.Sprintf("%s\n\n%s", systemMessage, request.Message.Content)
}
// Handle MultiContent properly in raw mode
if len(request.Message.MultiContent) > 0 {
// When we have attachments, add the text as a text part in MultiContent
newMultiContent := []chat.ChatMessagePart{
{
Type: chat.ChatMessagePartTypeText,
Text: finalContent,
},
}
// Add existing non-text parts (like images)
for _, part := range request.Message.MultiContent {
if part.Type != chat.ChatMessagePartTypeText {
newMultiContent = append(newMultiContent, part)
}
}
request.Message = &chat.ChatCompletionMessage{
Role: chat.ChatMessageRoleUser,
MultiContent: newMultiContent,
}
} else {
// No attachments, use regular Content field
request.Message = &chat.ChatCompletionMessage{
Role: chat.ChatMessageRoleUser,
Content: finalContent,
}
}
}
if request.Message != nil {
if systemMessage != "" {
request.Message.Content = systemMessage
// system contains pattern which contains user input
}
} else {
if systemMessage != "" {
request.Message = &goopenai.ChatCompletionMessage{Role: goopenai.ChatMessageRoleSystem, Content: systemMessage}
}
session.Append(request.Message)
}
} else {
if systemMessage != "" {
session.Append(&goopenai.ChatCompletionMessage{Role: goopenai.ChatMessageRoleSystem, Content: systemMessage})
session.Append(&chat.ChatCompletionMessage{Role: chat.ChatMessageRoleSystem, Content: systemMessage})
}
// If multi-part content, it is in the user message, and should be added.
// Otherwise, we should only add it if we have not already used it in the systemMessage.
if len(request.Message.MultiContent) > 0 || (request.Message != nil && !inputUsed) {
session.Append(request.Message)
}
}
if request.Message != nil {
session.Append(request.Message)
}
if session.IsEmpty() {

View File

@@ -10,7 +10,9 @@ import (
"strconv"
"strings"
"github.com/danielmiessler/fabric/plugins/ai/bedrock"
"github.com/danielmiessler/fabric/plugins/ai/exolab"
"github.com/danielmiessler/fabric/plugins/ai/perplexity" // Added Perplexity plugin
"github.com/danielmiessler/fabric/plugins/strategy"
"github.com/samber/lo"
@@ -29,17 +31,46 @@ import (
"github.com/danielmiessler/fabric/plugins/db/fsdb"
"github.com/danielmiessler/fabric/plugins/template"
"github.com/danielmiessler/fabric/plugins/tools"
"github.com/danielmiessler/fabric/plugins/tools/custom_patterns"
"github.com/danielmiessler/fabric/plugins/tools/jina"
"github.com/danielmiessler/fabric/plugins/tools/lang"
"github.com/danielmiessler/fabric/plugins/tools/youtube"
)
// hasAWSCredentials checks if any AWS credentials are present either in the
// environment variables or in the default/shared credentials file. It doesn't
// attempt to verify the validity of the credentials, but simply ensures that a
// potential authentication source exists so we can safely initialize the
// Bedrock client without causing the AWS SDK to search for credentials.
func hasAWSCredentials() bool {
if os.Getenv("AWS_PROFILE") != "" ||
os.Getenv("AWS_ROLE_SESSION_NAME") != "" ||
(os.Getenv("AWS_ACCESS_KEY_ID") != "" && os.Getenv("AWS_SECRET_ACCESS_KEY") != "") {
return true
}
credFile := os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
if credFile == "" {
if home, err := os.UserHomeDir(); err == nil {
credFile = filepath.Join(home, ".aws", "credentials")
}
}
if credFile != "" {
if _, err := os.Stat(credFile); err == nil {
return true
}
}
return false
}
func NewPluginRegistry(db *fsdb.Db) (ret *PluginRegistry, err error) {
ret = &PluginRegistry{
Db: db,
VendorManager: ai.NewVendorsManager(),
VendorsAll: ai.NewVendorsManager(),
PatternsLoader: tools.NewPatternsLoader(db.Patterns),
CustomPatterns: custom_patterns.NewCustomPatterns(),
YouTube: youtube.NewYouTube(),
Language: lang.NewLanguage(),
Jina: jina.NewClient(),
@@ -66,8 +97,13 @@ func NewPluginRegistry(db *fsdb.Db) (ret *PluginRegistry, err error) {
anthropic.NewClient(),
lmstudio.NewClient(),
exolab.NewClient(),
perplexity.NewClient(), // Added Perplexity client
)
if hasAWSCredentials() {
vendors = append(vendors, bedrock.NewClient())
}
// Add all OpenAI-compatible providers
for providerName := range openai_compatible.ProviderMap {
provider, _ := openai_compatible.GetProviderByName(providerName)
@@ -104,6 +140,7 @@ type PluginRegistry struct {
VendorsAll *ai.VendorsManager
Defaults *tools.Defaults
PatternsLoader *tools.PatternsLoader
CustomPatterns *custom_patterns.CustomPatterns
YouTube *youtube.YouTube
Language *lang.Language
Jina *jina.Client
@@ -117,6 +154,7 @@ func (o *PluginRegistry) SaveEnvFile() (err error) {
o.Defaults.Settings.FillEnvFileContent(&envFileContent)
o.PatternsLoader.SetupFillEnvFileContent(&envFileContent)
o.CustomPatterns.SetupFillEnvFileContent(&envFileContent)
o.Strategies.SetupFillEnvFileContent(&envFileContent)
for _, vendor := range o.VendorManager.Vendors {
@@ -149,10 +187,10 @@ func (o *PluginRegistry) Setup() (err error) {
return vendor
})...)
groupsPlugins.AddGroupItems("Tools", o.Defaults, o.Jina, o.Language, o.PatternsLoader, o.Strategies, o.YouTube)
groupsPlugins.AddGroupItems("Tools", o.CustomPatterns, o.Defaults, o.Jina, o.Language, o.PatternsLoader, o.Strategies, o.YouTube)
for {
groupsPlugins.Print()
groupsPlugins.Print(false)
if answerErr := setupQuestion.Ask("Plugin Number"); answerErr != nil {
break
@@ -205,7 +243,7 @@ func (o *PluginRegistry) SetupVendor(vendorName string) (err error) {
func (o *PluginRegistry) ConfigureVendors() {
o.VendorManager.Clear()
for _, vendor := range o.VendorsAll.Vendors {
if vendorErr := vendor.Configure(); vendorErr == nil {
if vendorErr := vendor.Configure(); vendorErr == nil && vendor.IsConfigured() {
o.VendorManager.AddVendors(vendor)
}
}

18
flake.lock generated
View File

@@ -26,11 +26,11 @@
]
},
"locked": {
"lastModified": 1733668782,
"narHash": "sha256-tPsqU00FhgdFr0JiQUiBMgPVbl1jbPCY5gbFiJycL3I=",
"lastModified": 1742209644,
"narHash": "sha256-jMy1XqXqD0/tJprEbUmKilTkvbDY/C0ZGSsJJH4TNCE=",
"owner": "nix-community",
"repo": "gomod2nix",
"rev": "514283ec89c39ad0079ff2f3b1437404e4cba608",
"rev": "8f3534eb8f6c5c3fce799376dc3b91bae6b11884",
"type": "github"
},
"original": {
@@ -41,11 +41,11 @@
},
"nixpkgs": {
"locked": {
"lastModified": 1736344531,
"narHash": "sha256-8YVQ9ZbSfuUk2bUf2KRj60NRraLPKPS0Q4QFTbc+c2c=",
"lastModified": 1745234285,
"narHash": "sha256-GfpyMzxwkfgRVN0cTGQSkTC0OHhEkv3Jf6Tcjm//qZ0=",
"owner": "nixos",
"repo": "nixpkgs",
"rev": "bffc22eb12172e6db3c5dde9e3e5628f8e3e7912",
"rev": "c11863f1e964833214b767f4a369c6e6a7aba141",
"type": "github"
},
"original": {
@@ -100,11 +100,11 @@
]
},
"locked": {
"lastModified": 1736154270,
"narHash": "sha256-p2r8xhQZ3TYIEKBoiEhllKWQqWNJNoT9v64Vmg4q8Zw=",
"lastModified": 1744961264,
"narHash": "sha256-aRmUh0AMwcbdjJHnytg1e5h5ECcaWtIFQa6d9gI85AI=",
"owner": "numtide",
"repo": "treefmt-nix",
"rev": "13c913f5deb3a5c08bb810efd89dc8cb24dd968b",
"rev": "8d404a69efe76146368885110f29a2ca3700bee6",
"type": "github"
},
"original": {

View File

@@ -28,6 +28,8 @@
let
forAllSystems = nixpkgs.lib.genAttrs (import systems);
getGoVersion = system: nixpkgs.legacyPackages.${system}.go_1_24;
treefmtEval = forAllSystems (
system:
let
@@ -47,10 +49,14 @@
system:
let
pkgs = nixpkgs.legacyPackages.${system};
goEnv = gomod2nix.legacyPackages.${system}.mkGoEnv { pwd = ./.; };
goVersion = getGoVersion system;
goEnv = gomod2nix.legacyPackages.${system}.mkGoEnv {
pwd = ./.;
go = goVersion;
};
in
import ./nix/shell.nix {
inherit pkgs goEnv;
inherit pkgs goEnv goVersion;
inherit (gomod2nix.legacyPackages.${system}) gomod2nix;
}
);
@@ -59,10 +65,12 @@
system:
let
pkgs = nixpkgs.legacyPackages.${system};
goVersion = getGoVersion system;
in
{
default = self.packages.${system}.fabric;
fabric = pkgs.callPackage ./nix/pkgs/fabric {
go = goVersion;
inherit (gomod2nix.legacyPackages.${system}) buildGoApplication;
};
inherit (gomod2nix.legacyPackages.${system}) gomod2nix;

113
go.mod
View File

@@ -1,66 +1,84 @@
module github.com/danielmiessler/fabric
go 1.23.4
go 1.24.0
toolchain go1.24.2
require (
github.com/anaskhan96/soup v1.2.5
github.com/anthropics/anthropic-sdk-go v0.2.0-alpha.11
github.com/anthropics/anthropic-sdk-go v1.4.0
github.com/atotto/clipboard v0.1.4
github.com/gabriel-vasile/mimetype v1.4.8
github.com/gin-gonic/gin v1.10.0
github.com/go-git/go-git/v5 v5.13.2
github.com/aws/aws-sdk-go-v2 v1.36.4
github.com/aws/aws-sdk-go-v2/config v1.27.27
github.com/aws/aws-sdk-go-v2/service/bedrock v1.34.1
github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.30.0
github.com/gabriel-vasile/mimetype v1.4.9
github.com/gin-gonic/gin v1.10.1
github.com/go-git/go-git/v5 v5.16.2
github.com/go-shiori/go-readability v0.0.0-20250217085726-9f5bf5ca7612
github.com/google/generative-ai-go v0.19.0
github.com/google/generative-ai-go v0.20.1
github.com/jessevdk/go-flags v1.6.1
github.com/joho/godotenv v1.5.1
github.com/ollama/ollama v0.5.12
github.com/ollama/ollama v0.9.0
github.com/openai/openai-go v1.8.2
github.com/otiai10/copy v1.14.1
github.com/pkg/errors v0.9.1
github.com/samber/lo v1.49.1
github.com/sashabaranov/go-openai v1.38.0
github.com/samber/lo v1.50.0
github.com/sgaunet/perplexity-go/v2 v2.8.0
github.com/stretchr/testify v1.10.0
golang.org/x/text v0.23.0
google.golang.org/api v0.223.0
gopkg.in/yaml.v2 v2.4.0
golang.org/x/oauth2 v0.30.0
golang.org/x/text v0.26.0
google.golang.org/api v0.236.0
gopkg.in/yaml.v3 v3.0.1
)
require (
cloud.google.com/go v0.118.3 // indirect
cloud.google.com/go/ai v0.10.0 // indirect
cloud.google.com/go/auth v0.15.0 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
cloud.google.com/go/compute/metadata v0.6.0 // indirect
cloud.google.com/go/longrunning v0.6.4 // indirect
dario.cat/mergo v1.0.1 // indirect
cloud.google.com/go v0.121.2 // indirect
cloud.google.com/go/ai v0.12.1 // indirect
cloud.google.com/go/auth v0.16.2 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.8 // indirect
cloud.google.com/go/compute/metadata v0.7.0 // indirect
cloud.google.com/go/longrunning v0.6.7 // indirect
dario.cat/mergo v1.0.2 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
github.com/ProtonMail/go-crypto v1.1.5 // indirect
github.com/ProtonMail/go-crypto v1.3.0 // indirect
github.com/andybalholm/cascadia v1.3.3 // indirect
github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de // indirect
github.com/bytedance/sonic v1.12.9 // indirect
github.com/bytedance/sonic/loader v0.2.3 // indirect
github.com/cloudflare/circl v1.6.0 // indirect
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 // indirect
github.com/aws/aws-sdk-go-v2/credentials v1.17.27 // indirect
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 // indirect
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.35 // indirect
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.35 // indirect
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 // indirect
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 // indirect
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 // indirect
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 // indirect
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 // indirect
github.com/aws/smithy-go v1.22.2 // indirect
github.com/bytedance/sonic v1.13.3 // indirect
github.com/bytedance/sonic/loader v0.2.4 // indirect
github.com/cloudflare/circl v1.6.1 // indirect
github.com/cloudwego/base64x v0.1.5 // indirect
github.com/cyphar/filepath-securejoin v0.4.1 // indirect
github.com/davecgh/go-spew v1.1.1 // indirect
github.com/emirpasic/gods v1.18.1 // indirect
github.com/felixge/httpsnoop v1.0.4 // indirect
github.com/gin-contrib/sse v1.0.0 // indirect
github.com/gin-contrib/sse v1.1.0 // indirect
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 // indirect
github.com/go-git/go-billy/v5 v5.6.2 // indirect
github.com/go-logr/logr v1.4.2 // indirect
github.com/go-logr/logr v1.4.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/go-playground/locales v0.14.1 // indirect
github.com/go-playground/universal-translator v0.18.1 // indirect
github.com/go-playground/validator/v10 v10.25.0 // indirect
github.com/go-playground/validator/v10 v10.26.0 // indirect
github.com/go-shiori/dom v0.0.0-20230515143342-73569d674e1c // indirect
github.com/goccy/go-json v0.10.5 // indirect
github.com/gogs/chardet v0.0.0-20211120154057-b7413eaefb8f // indirect
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.6 // indirect
github.com/googleapis/gax-go/v2 v2.14.2 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
@@ -70,34 +88,33 @@ require (
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect
github.com/modern-go/reflect2 v1.0.2 // indirect
github.com/otiai10/mint v1.6.3 // indirect
github.com/pelletier/go-toml/v2 v2.2.3 // indirect
github.com/pelletier/go-toml/v2 v2.2.4 // indirect
github.com/pjbgf/sha1cd v0.3.2 // indirect
github.com/pmezard/go-difflib v1.0.0 // indirect
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 // indirect
github.com/sergi/go-diff v1.4.0 // indirect
github.com/skeema/knownhosts v1.3.1 // indirect
github.com/tidwall/gjson v1.18.0 // indirect
github.com/tidwall/match v1.1.1 // indirect
github.com/tidwall/pretty v1.2.1 // indirect
github.com/tidwall/sjson v1.2.5 // indirect
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
github.com/ugorji/go/codec v1.2.14 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 // indirect
go.opentelemetry.io/otel v1.34.0 // indirect
go.opentelemetry.io/otel/metric v1.34.0 // indirect
go.opentelemetry.io/otel/trace v1.34.0 // indirect
golang.org/x/arch v0.14.0 // indirect
golang.org/x/crypto v0.36.0 // indirect
golang.org/x/net v0.38.0 // indirect
golang.org/x/oauth2 v0.27.0 // indirect
golang.org/x/sync v0.12.0 // indirect
golang.org/x/sys v0.31.0 // indirect
golang.org/x/time v0.10.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250224174004-546df14abb99 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250224174004-546df14abb99 // indirect
google.golang.org/grpc v1.70.0 // indirect
google.golang.org/protobuf v1.36.5 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 // indirect
go.opentelemetry.io/otel v1.36.0 // indirect
go.opentelemetry.io/otel/metric v1.36.0 // indirect
go.opentelemetry.io/otel/trace v1.36.0 // indirect
golang.org/x/arch v0.18.0 // indirect
golang.org/x/crypto v0.39.0 // indirect
golang.org/x/net v0.41.0 // indirect
golang.org/x/sync v0.15.0 // indirect
golang.org/x/sys v0.33.0 // indirect
golang.org/x/time v0.12.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 // indirect
google.golang.org/grpc v1.73.0 // indirect
google.golang.org/protobuf v1.36.6 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
)

247
go.sum
View File

@@ -1,43 +1,73 @@
cloud.google.com/go v0.118.3 h1:jsypSnrE/w4mJysioGdMBg4MiW/hHx/sArFpaBWHdME=
cloud.google.com/go v0.118.3/go.mod h1:Lhs3YLnBlwJ4KA6nuObNMZ/fCbOQBPuWKPoE0Wa/9Vc=
cloud.google.com/go/ai v0.10.0 h1:hwj6CI6sMKubXodoJJGTy/c2T1RbbLGM6TL3QoAvzU8=
cloud.google.com/go/ai v0.10.0/go.mod h1:kvnt2KeHqX8+41PVeMRBETDyQAp/RFvBWGdx/aGjNMo=
cloud.google.com/go/auth v0.15.0 h1:Ly0u4aA5vG/fsSsxu98qCQBemXtAtJf+95z9HK+cxps=
cloud.google.com/go/auth v0.15.0/go.mod h1:WJDGqZ1o9E9wKIL+IwStfyn/+s59zl4Bi+1KQNVXLZ8=
cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M=
cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc=
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
cloud.google.com/go/longrunning v0.6.4 h1:3tyw9rO3E2XVXzSApn1gyEEnH2K9SynNQjMlBi3uHLg=
cloud.google.com/go/longrunning v0.6.4/go.mod h1:ttZpLCe6e7EXvn9OxpBRx7kZEB0efv8yBO6YnVMfhJs=
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
cloud.google.com/go v0.121.2 h1:v2qQpN6Dx9x2NmwrqlesOt3Ys4ol5/lFZ6Mg1B7OJCg=
cloud.google.com/go v0.121.2/go.mod h1:nRFlrHq39MNVWu+zESP2PosMWA0ryJw8KUBZ2iZpxbw=
cloud.google.com/go/ai v0.12.1 h1:m1n/VjUuHS+pEO/2R4/VbuuEIkgk0w67fDQvFaMngM0=
cloud.google.com/go/ai v0.12.1/go.mod h1:5vIPNe1ZQsVZqCliXIPL4QnhObQQY4d9hAGHdVc4iw4=
cloud.google.com/go/auth v0.16.2 h1:QvBAGFPLrDeoiNjyfVunhQ10HKNYuOwZ5noee0M5df4=
cloud.google.com/go/auth v0.16.2/go.mod h1:sRBas2Y1fB1vZTdurouM0AzuYQBMZinrUYL8EufhtEA=
cloud.google.com/go/auth/oauth2adapt v0.2.8 h1:keo8NaayQZ6wimpNSmW5OPc283g65QNIiLpZnkHRbnc=
cloud.google.com/go/auth/oauth2adapt v0.2.8/go.mod h1:XQ9y31RkqZCcwJWNSx2Xvric3RrU88hAYYbjDWYDL+c=
cloud.google.com/go/compute/metadata v0.7.0 h1:PBWF+iiAerVNe8UCHxdOt6eHLVc3ydFeOCw78U8ytSU=
cloud.google.com/go/compute/metadata v0.7.0/go.mod h1:j5MvL9PprKL39t166CoB1uVHfQMs4tFQZZcKwksXUjo=
cloud.google.com/go/longrunning v0.6.7 h1:IGtfDWHhQCgCjwQjV9iiLnUta9LBCo8R9QmAFsS/PrE=
cloud.google.com/go/longrunning v0.6.7/go.mod h1:EAFV3IZAKmM56TyiE6VAP3VoTzhZzySwI/YI1s/nRsY=
dario.cat/mergo v1.0.2 h1:85+piFYR1tMbRrLcDwR18y4UKJ3aH1Tbzi24VRW1TK8=
dario.cat/mergo v1.0.2/go.mod h1:E/hbnu0NxMFBjpMIE34DRGLWqDy0g5FuKDhCb31ngxA=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/ProtonMail/go-crypto v1.1.5 h1:eoAQfK2dwL+tFSFpr7TbOaPNUbPiJj4fLYwwGE1FQO4=
github.com/ProtonMail/go-crypto v1.1.5/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
github.com/anaskhan96/soup v1.2.5 h1:V/FHiusdTrPrdF4iA1YkVxsOpdNcgvqT1hG+YtcZ5hM=
github.com/anaskhan96/soup v1.2.5/go.mod h1:6YnEp9A2yywlYdM4EgDz9NEHclocMepEtku7wg6Cq3s=
github.com/ProtonMail/go-crypto v1.3.0 h1:ILq8+Sf5If5DCpHQp4PbZdS1J7HDFRXz/+xKBiRGFrw=
github.com/ProtonMail/go-crypto v1.3.0/go.mod h1:9whxjD8Rbs29b4XWbB8irEcE8KHMqaR2e7GWU1R+/PE=
github.com/andybalholm/cascadia v1.3.3 h1:AG2YHrzJIm4BZ19iwJ/DAua6Btl3IwJX+VI4kktS1LM=
github.com/andybalholm/cascadia v1.3.3/go.mod h1:xNd9bqTn98Ln4DwST8/nG+H0yuB8Hmgu1YHNnWw0GeA=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFIImctFaOjnTIavg87rW78vTPkQqLI8=
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/anthropics/anthropic-sdk-go v0.2.0-alpha.11 h1:O3/AMObKntZyu1KH6Xks6E0gbE8w6HVaKHE+/vXARzM=
github.com/anthropics/anthropic-sdk-go v0.2.0-alpha.11/go.mod h1:GJxtdOs9K4neo8Gg65CjJ7jNautmldGli5/OFNabOoo=
github.com/anthropics/anthropic-sdk-go v1.4.0 h1:fU1jKxYbQdQDiEXCxeW5XZRIOwKevn/PMg8Ay1nnUx0=
github.com/anthropics/anthropic-sdk-go v1.4.0/go.mod h1:AapDW22irxK2PSumZiQXYUFvsdQgkwIWlpESweWZI/c=
github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de h1:FxWPpzIjnTlhPwqqXc4/vE0f7GvRjuAsbW+HOIe8KnA=
github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de/go.mod h1:DCaWoUhZrYW9p1lxo/cm8EmUOOzAPSEZNGF2DK1dJgw=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/bytedance/sonic v1.12.9 h1:Od1BvK55NnewtGaJsTDeAOSnLVO2BTSLOe0+ooKokmQ=
github.com/bytedance/sonic v1.12.9/go.mod h1:uVvFidNmlt9+wa31S1urfwwthTWteBgG0hWuoKAXTx8=
github.com/aws/aws-sdk-go-v2 v1.36.4 h1:GySzjhVvx0ERP6eyfAbAuAXLtAda5TEy19E5q5W8I9E=
github.com/aws/aws-sdk-go-v2 v1.36.4/go.mod h1:LLXuLpgzEbD766Z5ECcRmi8AzSwfZItDtmABVkRLGzg=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10 h1:zAybnyUQXIZ5mok5Jqwlf58/TFE7uvd3IAsa1aF9cXs=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.10/go.mod h1:qqvMj6gHLR/EXWZw4ZbqlPbQUyenf4h82UQUlKc+l14=
github.com/aws/aws-sdk-go-v2/config v1.27.27 h1:HdqgGt1OAP0HkEDDShEl0oSYa9ZZBSOmKpdpsDMdO90=
github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg=
github.com/aws/aws-sdk-go-v2/credentials v1.17.27 h1:2raNba6gr2IfA0eqqiP2XiQ0UVOpGPgDSi0I9iAP+UI=
github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11 h1:KreluoV8FZDEtI6Co2xuNk/UqI9iwMrOx/87PBNIKqw=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.35 h1:o1v1VFfPcDVlK3ll1L5xHsaQAFdNtZ5GXnNR7SwueC4=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.35/go.mod h1:rZUQNYMNG+8uZxz9FOerQJ+FceCiodXvixpeRtdESrU=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.35 h1:R5b82ubO2NntENm3SAm0ADME+H630HomNJdgv+yZ3xw=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.35/go.mod h1:FuA+nmgMRfkzVKYDNEqQadvEMxtxl9+RLT9ribCwEMs=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0 h1:hT8rVHwugYE2lEfdFE0QWVo81lF7jMrYJVDWI+f+VxU=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
github.com/aws/aws-sdk-go-v2/service/bedrock v1.34.1 h1:sD4KqDKG8aOaMWaWTMB8l8VnLa/Di7XHb0Uf4plrndA=
github.com/aws/aws-sdk-go-v2/service/bedrock v1.34.1/go.mod h1:lrn8DOVFYFeaUZKxJ95T5eGDBjnhffgGz68Wq2sfBbA=
github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.30.0 h1:eMOwQ8ZZK+76+08RfxeaGUtRFN6wxmD1rvqovc2kq2w=
github.com/aws/aws-sdk-go-v2/service/bedrockruntime v1.30.0/go.mod h1:0b5Rq7rUvSQFYHI1UO0zFTV/S6j6DUyuykXA80C+YOI=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3 h1:dT3MqvGhSoaIhRseqw2I0yH81l7wiR2vjs57O51EAm8=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17 h1:HGErhhrxZlQ044RiM+WdoZxp0p+EGM62y3L6pwA4olE=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4 h1:BXx0ZIxvrJdSgSvKTZ+yRBeSqqgPM89VPlulEcl37tM=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4 h1:yiwVzJW2ZxZTurVbYWA7QOrAaCYQR72t0wrSBfoesUE=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw=
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3 h1:ZsDKRLXGWHk8WdtyYMoGNO7bTudrvuKpDKgMVRlepGE=
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ=
github.com/aws/smithy-go v1.22.2 h1:6D9hW43xKFrRx/tXXfAlIZc4JI+yQe6snnWcQyxSyLQ=
github.com/aws/smithy-go v1.22.2/go.mod h1:irrKGvNn1InZwb2d7fkIRNucdfwR8R+Ts3wxYa/cJHg=
github.com/bytedance/sonic v1.13.3 h1:MS8gmaH16Gtirygw7jV91pDCN33NyMrPbN7qiYhEsF0=
github.com/bytedance/sonic v1.13.3/go.mod h1:o68xyaF9u2gvVBuGHPlUVCy+ZfmNNO5ETf1+KgkJhz4=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/bytedance/sonic/loader v0.2.3 h1:yctD0Q3v2NOGfSWPLPvG2ggA2kV6TS6s4wioyEqssH0=
github.com/bytedance/sonic/loader v0.2.3/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/cloudflare/circl v1.6.0 h1:cr5JKic4HI+LkINy2lg3W2jF8sHCVTBncJr5gIIq7qk=
github.com/cloudflare/circl v1.6.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/bytedance/sonic/loader v0.2.4 h1:ZWCw4stuXUsn1/+zQDqeE7JKP+QO47tz7QCNan80NzY=
github.com/bytedance/sonic/loader v0.2.4/go.mod h1:N8A3vUdtUebEY2/VQC0MyhYeKUFosQU6FxH2JmUe6VI=
github.com/cloudflare/circl v1.6.1 h1:zqIqSPIndyBh1bjLVVDHMPpVKqp8Su/V+6MeDzzQBQ0=
github.com/cloudflare/circl v1.6.1/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/cloudwego/base64x v0.1.5 h1:XPciSp1xaq2VCSt6lF0phncD4koWyULpl5bUxbfCyP4=
github.com/cloudwego/base64x v0.1.5/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
@@ -46,18 +76,18 @@ github.com/cyphar/filepath-securejoin v0.4.1/go.mod h1:Sdj7gXlvMcPZsbhwhQ33GguGL
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/elazarl/goproxy v1.4.0 h1:4GyuSbFa+s26+3rmYNSuUVsx+HgPrV1bk1jXI0l9wjM=
github.com/elazarl/goproxy v1.4.0/go.mod h1:X/5W/t+gzDyLfHW4DrMdpjqYjpXsURlBt9lpBDxZZZQ=
github.com/elazarl/goproxy v1.7.2 h1:Y2o6urb7Eule09PjlhQRGNsqRfPmYI3KKQLFpCAV3+o=
github.com/elazarl/goproxy v1.7.2/go.mod h1:82vkLNir0ALaW14Rc399OTTjyNREgmdL2cVoIbS6XaE=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/gabriel-vasile/mimetype v1.4.8 h1:FfZ3gj38NjllZIeJAmMhr+qKL8Wu+nOoI3GqacKw1NM=
github.com/gabriel-vasile/mimetype v1.4.8/go.mod h1:ByKUIKGjh1ODkGM1asKUbQZOLGrPjydw3hYPU2YU9t8=
github.com/gin-contrib/sse v1.0.0 h1:y3bT1mUWUxDpW4JLQg/HnTqV4rozuW4tC9eFKTxYI9E=
github.com/gin-contrib/sse v1.0.0/go.mod h1:zNuFdwarAygJBht0NTKiSi3jRf6RbqeILZ9Sp6Slhe0=
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
github.com/gin-gonic/gin v1.10.0/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
github.com/gabriel-vasile/mimetype v1.4.9 h1:5k+WDwEsD9eTLL8Tz3L0VnmVh9QxGjRmjBvAG7U/oYY=
github.com/gabriel-vasile/mimetype v1.4.9/go.mod h1:WnSQhFKJuBlRyLiKohA/2DtIlPFAbguNaG7QCHcyGok=
github.com/gin-contrib/sse v1.1.0 h1:n0w2GMuUpWDVp7qSpvze6fAu9iRxJY4Hmj6AmBOU05w=
github.com/gin-contrib/sse v1.1.0/go.mod h1:hxRZ5gVpWMT7Z0B0gSNYqqsSCNIJMjzvm6fqCz9vjwM=
github.com/gin-gonic/gin v1.10.1 h1:T0ujvqyCSqRopADpgPgiTT63DUQVSfojyME59Ei63pQ=
github.com/gin-gonic/gin v1.10.1/go.mod h1:4PMNQiOhvDRa013RKVbsiNwoyezlm2rm0uX/T7kzp5Y=
github.com/gliderlabs/ssh v0.3.8 h1:a4YXD1V7xMF9g5nTkdfnja3Sxy1PVDCj1Zg4Wb8vY6c=
github.com/gliderlabs/ssh v0.3.8/go.mod h1:xYoytBv1sV0aL3CavoDuJIQNURXkkfPA/wxQ1pL1fAU=
github.com/go-git/gcfg v1.5.1-0.20230307220236-3a3c6141e376 h1:+zs/tPmkDkHx3U66DAb0lQFJrpS6731Oaa12ikc+DiI=
@@ -66,11 +96,11 @@ github.com/go-git/go-billy/v5 v5.6.2 h1:6Q86EsPXMa7c3YZ3aLAQsMA0VlWmy43r6FHqa/UN
github.com/go-git/go-billy/v5 v5.6.2/go.mod h1:rcFC2rAsp/erv7CMz9GczHcuD0D32fWzH+MJAU+jaUU=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399 h1:eMje31YglSBqCdIqdhKBW8lokaMrL3uTkpGYlE2OOT4=
github.com/go-git/go-git-fixtures/v4 v4.3.2-0.20231010084843-55a94097c399/go.mod h1:1OCfN199q1Jm3HZlxleg+Dw/mwps2Wbk9frAWm+4FII=
github.com/go-git/go-git/v5 v5.13.2 h1:7O7xvsK7K+rZPKW6AQR1YyNhfywkv7B8/FsP3ki6Zv0=
github.com/go-git/go-git/v5 v5.13.2/go.mod h1:hWdW5P4YZRjmpGHwRH2v3zkWcNl6HeXaXQEMGb3NJ9A=
github.com/go-git/go-git/v5 v5.16.2 h1:fT6ZIOjE5iEnkzKyxTHK1W4HGAsPhqEqiSAssSO77hM=
github.com/go-git/go-git/v5 v5.16.2/go.mod h1:4Ge4alE/5gPs30F2H1esi2gPd69R0C39lolkucHBOp8=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY=
github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/logr v1.4.3 h1:CjnDlHq8ikf6E492q6eKboGOC0T8CDaOvkHCIg8idEI=
github.com/go-logr/logr v1.4.3/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-playground/assert/v2 v2.2.0 h1:JvknZsQTYeFEAhQwI4qEt9cyV5ONwRHC+lYKSsYSR8s=
@@ -79,8 +109,8 @@ github.com/go-playground/locales v0.14.1 h1:EWaQ/wswjilfKLTECiXz7Rh+3BjFhfDFKv/o
github.com/go-playground/locales v0.14.1/go.mod h1:hxrqLVvrK65+Rwrd5Fc6F2O76J/NuW9t0sjnWqG1slY=
github.com/go-playground/universal-translator v0.18.1 h1:Bcnm0ZwsGyWbCzImXv+pAJnYK9S473LQFuzCbDbfSFY=
github.com/go-playground/universal-translator v0.18.1/go.mod h1:xekY+UJKNuX9WP91TpwSH2VMlDf28Uj24BCp08ZFTUY=
github.com/go-playground/validator/v10 v10.25.0 h1:5Dh7cjvzR7BRZadnsVOzPhWsrwUr0nmsZJxEAnFLNO8=
github.com/go-playground/validator/v10 v10.25.0/go.mod h1:GGzBIJMuE98Ic/kJsBXbz1x/7cByt++cQ+YOuDM5wus=
github.com/go-playground/validator/v10 v10.26.0 h1:SP05Nqhjcvz81uJaRfEV0YBSSSGMc/iMaVtFbr3Sw2k=
github.com/go-playground/validator/v10 v10.26.0/go.mod h1:I5QpIEbmr8On7W0TktmJAumgzX4CA1XNl4ZmDuVHKKo=
github.com/go-shiori/dom v0.0.0-20230515143342-73569d674e1c h1:wpkoddUomPfHiOziHZixGO5ZBS73cKqVzZipfrLmO1w=
github.com/go-shiori/dom v0.0.0-20230515143342-73569d674e1c/go.mod h1:oVDCh3qjJMLVUSILBRwrm+Bc6RNXGZYtoh9xdvf1ffM=
github.com/go-shiori/go-readability v0.0.0-20250217085726-9f5bf5ca7612 h1:BYLNYdZaepitbZreRIa9xeCQZocWmy/wj4cGIH0qyw0=
@@ -93,8 +123,8 @@ github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8 h1:f+oWsMOmNPc8J
github.com/golang/groupcache v0.0.0-20241129210726-2c02b8208cf8/go.mod h1:wcDNUvekVysuuOpQKo3191zZyTpiI6se1N1ULghS0sw=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/generative-ai-go v0.19.0 h1:R71szggh8wHMCUlEMsW2A/3T+5LdEIkiaHSYgSpUgdg=
github.com/google/generative-ai-go v0.19.0/go.mod h1:JYolL13VG7j79kM5BtHz4qwONHkeJQzOCkKXnpqtS/E=
github.com/google/generative-ai-go v0.20.1 h1:6dEIujpgN2V0PgLhr6c/M1ynRdc7ARtiIDPFzj45uNQ=
github.com/google/generative-ai-go v0.20.1/go.mod h1:TjOnZJmZKzarWbjUJgy+r3Ee7HGBRVLhOIgupnwR4Bg=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8=
github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU=
@@ -103,10 +133,10 @@ github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
github.com/googleapis/enterprise-certificate-proxy v0.3.6 h1:GW/XbdyBFQ8Qe+YAmFU9uHLo7OnF5tL52HFAgMmyrf4=
github.com/googleapis/enterprise-certificate-proxy v0.3.6/go.mod h1:MkHOF77EYAE7qfSuSS9PU6g4Nt4e11cnsDUowfwewLA=
github.com/googleapis/gax-go/v2 v2.14.2 h1:eBLnkZ9635krYIPD+ag1USrOAI0Nr0QYF3+/3GqO0k0=
github.com/googleapis/gax-go/v2 v2.14.2/go.mod h1:ON64QhlJkhVtSqp4v1uaK92VyZ2gmvDQsweuyLV+8+w=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4=
@@ -138,16 +168,18 @@ github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/ollama/ollama v0.5.12 h1:qM+k/ozyHLJzEQoAEPrUQ0qXqsgDEEdpIVwuwScrd2U=
github.com/ollama/ollama v0.5.12/go.mod h1:ibdmDvb/TjKY1OArBWIazL3pd1DHTk8eG2MMjEkWhiI=
github.com/ollama/ollama v0.9.0 h1:GvdGhi8G/QMnFrY0TMLDy1bXua+Ify8KTkFe4ZY/OZs=
github.com/ollama/ollama v0.9.0/go.mod h1:aio9yQ7nc4uwIbn6S0LkGEPgn8/9bNQLL1nHuH+OcD0=
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
github.com/onsi/gomega v1.34.1/go.mod h1:kU1QgUvBDLXBJq618Xvm2LUX6rSAfRaFRTcdOeDLwwY=
github.com/openai/openai-go v1.8.2 h1:UqSkJ1vCOPUpz9Ka5tS0324EJFEuOvMc+lA/EarJWP8=
github.com/openai/openai-go v1.8.2/go.mod h1:g461MYGXEXBVdV5SaR/5tNzNbSfwTBBefwc+LlDCK0Y=
github.com/otiai10/copy v1.14.1 h1:5/7E6qsUMBaH5AnQ0sSLzzTg1oTECmcCmT6lvF45Na8=
github.com/otiai10/copy v1.14.1/go.mod h1:oQwrEDDOci3IM8dJF0d8+jnbfPDllW6vUjNc3DoZm9I=
github.com/otiai10/mint v1.6.3 h1:87qsV/aw1F5as1eH1zS/yqHY85ANKVMgkDrf9rcxbQs=
github.com/otiai10/mint v1.6.3/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pelletier/go-toml/v2 v2.2.4 h1:mye9XuhQ6gvn5h28+VilKrrPoQVanw5PMw/TB0t5Ec4=
github.com/pelletier/go-toml/v2 v2.2.4/go.mod h1:2gIqNv+qfxSVS7cM2xJQKtLSTLUE9V8t9Stt+h56mCY=
github.com/pjbgf/sha1cd v0.3.2 h1:a9wb0bp1oC2TGwStyn0Umc/IGKQnEgF0vVaZ8QF8eo4=
github.com/pjbgf/sha1cd v0.3.2/go.mod h1:zQWigSxVmsHEZow5qaLtPYxpcKMMQpa09ixqBxuCS6A=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
@@ -155,31 +187,28 @@ github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINE
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/samber/lo v1.49.1 h1:4BIFyVfuQSEpluc7Fua+j1NolZHiEHEpaSEKdsH0tew=
github.com/samber/lo v1.49.1/go.mod h1:dO6KHFzUKXgP8LDhU0oI8d2hekjXnGOu0DB8Jecxd6o=
github.com/sashabaranov/go-openai v1.38.0 h1:hNN5uolKwdbpiqOn7l+Z2alch/0n0rSFyg4n+GZxR5k=
github.com/sashabaranov/go-openai v1.38.0/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adOgerWeFyZLUhAKRg=
github.com/rogpeppe/go-internal v1.14.1 h1:UQB4HGPB6osV0SQTLymcB4TgvyWu6ZyliaW0tI/otEQ=
github.com/rogpeppe/go-internal v1.14.1/go.mod h1:MaRKkUm5W0goXpeCfT7UZI6fk/L7L7so1lCWt35ZSgc=
github.com/samber/lo v1.50.0 h1:XrG0xOeHs+4FQ8gJR97zDz5uOFMW7OwFWiFVzqopKgY=
github.com/samber/lo v1.50.0/go.mod h1:RjZyNk6WSnUFRKK6EyOhsRJMqft3G+pg7dCWHQCWvsc=
github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/sgaunet/perplexity-go/v2 v2.8.0 h1:stnuVieniZMGo6qJLCV2JyR2uF7K5398YOA/ZZcgrSg=
github.com/sgaunet/perplexity-go/v2 v2.8.0/go.mod h1:MSks4RNuivCi0GqJyylhFdgSJFVEwZHjAhrf86Wkynk=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8=
github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
@@ -194,29 +223,29 @@ github.com/tidwall/sjson v1.2.5 h1:kLy8mja+1c9jlljvWTlSazM7cKDRfJuR/bOJhcY5NcY=
github.com/tidwall/sjson v1.2.5/go.mod h1:Fvgq9kS/6ociJEDnK0Fk1cpYF4FIW6ZF7LAe+6jwd28=
github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS4MhqMhdFk5YI=
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/ugorji/go/codec v1.2.14 h1:yOQvXCBc3Ij46LRkRoh4Yd5qK6LVOgi0bYOXfb7ifjw=
github.com/ugorji/go/codec v1.2.14/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0 h1:rgMkmiGfix9vFJDcDi1PK8WEQP4FLQwLDfhp5ZLpFeE=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.59.0/go.mod h1:ijPqXp5P6IRRByFVVg9DY8P5HkxkHE5ARIa+86aXPf4=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0 h1:CV7UdSGJt/Ao6Gp4CXckLxVRRsRgDHoI8XjbL3PDl8s=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.59.0/go.mod h1:FRmFuRJfag1IZ2dPkHnEoSFVgTVPUd2qf5Vi69hLb8I=
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
golang.org/x/arch v0.14.0 h1:z9JUEZWr8x4rR0OU6c4/4t6E6jOZ8/QBS2bBYBm4tx4=
golang.org/x/arch v0.14.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0 h1:q4XOmH/0opmeuJtPsbFNivyl7bCt7yRBbeEm2sC/XtQ=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.61.0/go.mod h1:snMWehoOh2wsEwnvvwtDyFCxVeDAODenXHtn5vzrKjo=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0 h1:F7Jx+6hwnZ41NSFTO5q4LYDtJRXBf2PD0rNBkeB/lus=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.61.0/go.mod h1:UHB22Z8QsdRDrnAtX4PntOl36ajSxcdUMt1sF7Y6E7Q=
go.opentelemetry.io/otel v1.36.0 h1:UumtzIklRBY6cI/lllNZlALOF5nNIzJVb16APdvgTXg=
go.opentelemetry.io/otel v1.36.0/go.mod h1:/TcFMXYjyRNh8khOAO9ybYkqaDBb/70aVwkNML4pP8E=
go.opentelemetry.io/otel/metric v1.36.0 h1:MoWPKVhQvJ+eeXWHFBOPoBOi20jh6Iq2CcCREuTYufE=
go.opentelemetry.io/otel/metric v1.36.0/go.mod h1:zC7Ks+yeyJt4xig9DEw9kuUFe5C3zLbVjV2PzT6qzbs=
go.opentelemetry.io/otel/sdk v1.36.0 h1:b6SYIuLRs88ztox4EyrvRti80uXIFy+Sqzoh9kFULbs=
go.opentelemetry.io/otel/sdk v1.36.0/go.mod h1:+lC+mTgD+MUWfjJubi2vvXWcVxyr9rmlshZni72pXeY=
go.opentelemetry.io/otel/sdk/metric v1.36.0 h1:r0ntwwGosWGaa0CrSt8cuNuTcccMXERFwHX4dThiPis=
go.opentelemetry.io/otel/sdk/metric v1.36.0/go.mod h1:qTNOhFDfKRwX0yXOqJYegL5WRaW376QbB7P4Pb0qva4=
go.opentelemetry.io/otel/trace v1.36.0 h1:ahxWNuqZjpdiFAyrIoQ4GIiAIhxAunQR6MUoKrsNd4w=
go.opentelemetry.io/otel/trace v1.36.0/go.mod h1:gQ+OnDZzrybY4k4seLzPAWNwVBBVlF2szhehOBB/tGA=
golang.org/x/arch v0.18.0 h1:WN9poc33zL4AzGxqf8VtpKUnGvMi8O9lhNyBMF/85qc=
golang.org/x/arch v0.18.0/go.mod h1:bdwinDaKcfZUGpH09BB7ZmOfhalA8lQdzl62l8gGWsk=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
@@ -224,17 +253,16 @@ golang.org/x/crypto v0.13.0/go.mod h1:y6Z2r+Rw4iayiXXAIxJIDAJ1zMW4yaTpebo8fPOliY
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
golang.org/x/crypto v0.23.0/go.mod h1:CKFgDieR+mRhux2Lsu27y0fO304Db0wZe70UKqHu0v8=
golang.org/x/crypto v0.31.0/go.mod h1:kDsLvtWBEx7MV9tJOj9bnXsPbxwJQ6csT/x4KIN4Ssk=
golang.org/x/crypto v0.36.0 h1:AnAEvhDddvBdpY+uR+MyHmuZzzNqXSe/GvuDeob5L34=
golang.org/x/crypto v0.36.0/go.mod h1:Y4J0ReaxCR1IMaabaSMugxJES1EpwhBHhv2bDHklZvc=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/crypto v0.39.0 h1:SHs+kF4LP+f+p14esP5jAoDpHU8Gu/v9lFRK6IT5imM=
golang.org/x/crypto v0.39.0/go.mod h1:L+Xg3Wf6HoL4Bn4238Z6ft6KfEpN0tJGo53AAPC632U=
golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa h1:t2QcU6V556bFjYgu4L6C+6VrCPyJZ+eyRsABUPs1mz4=
golang.org/x/exp v0.0.0-20250218142911-aa4b98e5adaa/go.mod h1:BHOTPb3L19zxehTsLoJXVaTktb06DFgmdW6Wb9s8jqk=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.12.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/mod v0.15.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
@@ -244,10 +272,10 @@ golang.org/x/net v0.15.0/go.mod h1:idbUs1IY1+zTqbi8yxTbhexhEEk5ur9LInksu6HrEpk=
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
golang.org/x/net v0.25.0/go.mod h1:JkAGAh7GEvH74S6FOH42FLoXpXbE/aqXSrIQjXgsiwM=
golang.org/x/net v0.33.0/go.mod h1:HXLR5J+9DxmrqMwG9qjGCxZ+zKXxBru04zlTvWlWuN4=
golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8=
golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8=
golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M=
golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8=
golang.org/x/net v0.41.0 h1:vBTly1HeNPEn3wtREYfy4GZ/NECgw2Cnl+nK6Nz3uvw=
golang.org/x/net v0.41.0/go.mod h1:B/K4NNqkfmg07DQYrbwvSluqCJOOXwUjeb/5lOisjbA=
golang.org/x/oauth2 v0.30.0 h1:dnDm7JmhM45NNpd8FDDeLhK6FwqbOf4MLCM9zb1BOHI=
golang.org/x/oauth2 v0.30.0/go.mod h1:B++QgG3ZKulg6sRPGD/mqlHQs5rB3Ml9erfeDY7xKlU=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@@ -255,8 +283,8 @@ golang.org/x/sync v0.3.0/go.mod h1:FU7BRWz2tNW+3quACPkgCx/L+uEAv1htQ0V83Z9Rj+Y=
golang.org/x/sync v0.6.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.7.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sync v0.12.0 h1:MHc5BpPuC30uJk597Ri8TV3CNZcTLu6B6z4lJy+g6Jw=
golang.org/x/sync v0.12.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sync v0.15.0 h1:KWH3jNZsfyT6xfAfKiz6MRNmd46ByHDYaZ7KSkCtdW8=
golang.org/x/sync v0.15.0/go.mod h1:1dzgHSNfp02xaA81J2MS99Qcpr2w7fw1gpm99rleRqA=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -273,8 +301,8 @@ golang.org/x/sys v0.12.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.20.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.28.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.31.0 h1:ioabZlmFYtWhL+TRYpcnNlLwhyxaM9kWTDEmfnprqik=
golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/sys v0.33.0 h1:q3i8TbbEz+JRD9ywIRlyRAQbM0qF7hu24q3teo2hbuw=
golang.org/x/sys v0.33.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k=
golang.org/x/telemetry v0.0.0-20240228155512-f48c80bd79b2/go.mod h1:TeRTkGYfJXctD9OcfyVLyj2J3IxLnKwHJR8f4D8a3YE=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
@@ -284,8 +312,8 @@ golang.org/x/term v0.12.0/go.mod h1:owVbMEjm3cBLCHdkQu9b1opXd4ETQWc3BhuQGKgXgvU=
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
golang.org/x/term v0.20.0/go.mod h1:8UkIAJTvZgivsXaD6/pH6U9ecQzZ45awqEOzuCvwpFY=
golang.org/x/term v0.27.0/go.mod h1:iMsnZpn0cago0GOrHO2+Y7u7JPn5AylBrcoWkElMTSM=
golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y=
golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g=
golang.org/x/term v0.32.0 h1:DR4lr0TjUs3epypdhTOkMmuF5CDFJ/8pOnbzMZPQ7bg=
golang.org/x/term v0.32.0/go.mod h1:uZG1FhGx848Sqfsq4/DlJr3xGGsYMu/L5GW4abiaEPQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
@@ -296,10 +324,10 @@ golang.org/x/text v0.13.0/go.mod h1:TvPlkZtksWOMsz7fbANvkp4WM8x/WCo/om8BMLbz+aE=
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.15.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY=
golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4=
golang.org/x/time v0.10.0 h1:3usCWA8tQn0L8+hFJQNgzpWbd89begxN66o1Ojdn5L4=
golang.org/x/time v0.10.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/text v0.26.0 h1:P42AVeLghgTYr4+xUnTRKDMqpar+PtX7KWuNQL21L8M=
golang.org/x/text v0.26.0/go.mod h1:QK15LZJUUQVJxhz7wXgxSy/CJaTFjd0G+YLonydOVQA=
golang.org/x/time v0.12.0 h1:ScB/8o8olJvc+CQPWrK3fPZNfh7qgwCrY0zJmoEQLSE=
golang.org/x/time v0.12.0/go.mod h1:CDIdPxbZBQxdj6cxyCIdrNogrJKMJ7pr37NYpMcMDSg=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
@@ -307,16 +335,18 @@ golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.13.0/go.mod h1:HvlwmtVNQAhOuCjW7xxvovg8wbNq7LwfXh/k7wXUl58=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.223.0 h1:JUTaWEriXmEy5AhvdMgksGGPEFsYfUKaPEYXd4c3Wvc=
google.golang.org/api v0.223.0/go.mod h1:C+RS7Z+dDwds2b+zoAk5hN/eSfsiCn0UDrYof/M4d2M=
google.golang.org/genproto/googleapis/api v0.0.0-20250224174004-546df14abb99 h1:ilJhrCga0AptpJZXmUYG4MCrx/zf3l1okuYz7YK9PPw=
google.golang.org/genproto/googleapis/api v0.0.0-20250224174004-546df14abb99/go.mod h1:Xsh8gBVxGCcbV8ZeTB9wI5XPyZ5RvC6V3CTeeplHbiA=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250224174004-546df14abb99 h1:ZSlhAUqC4r8TPzqLXQ0m3upBNZeF+Y8jQ3c4CR3Ujms=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250224174004-546df14abb99/go.mod h1:LuRYeWDFV6WOn90g357N17oMCaxpgCnbi/44qJvDn2I=
google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM=
google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
google.golang.org/api v0.236.0 h1:CAiEiDVtO4D/Qja2IA9VzlFrgPnK3XVMmRoJZlSWbc0=
google.golang.org/api v0.236.0/go.mod h1:X1WF9CU2oTc+Jml1tiIxGmWFK/UZezdqEu09gcxZAj4=
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2 h1:1tXaIXCracvtsRxSBsYDiSBN0cuJvM7QYW+MrpIRY78=
google.golang.org/genproto v0.0.0-20250505200425-f936aa4a68b2/go.mod h1:49MsLSx0oWMOZqcpB3uL8ZOkAh1+TndpJ8ONoCBWiZk=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822 h1:oWVWY3NzT7KJppx2UKhKmzPq4SRe0LdCijVRwvGeikY=
google.golang.org/genproto/googleapis/api v0.0.0-20250603155806-513f23925822/go.mod h1:h3c4v36UTKzUiuaOKQ6gr3S+0hovBtUrXzTG/i3+XEc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822 h1:fc6jSaCT0vBduLYZHYrBBNY4dsWuvgyff9noRNDdBeE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250603155806-513f23925822/go.mod h1:qQ0YXyHHx3XkvlzUtpXDkS29lDSafHMZBAZDc03LQ3A=
google.golang.org/grpc v1.73.0 h1:VIWSmpI2MegBtTuFt5/JWy2oXxtjJ/e89Z70ImfD2ok=
google.golang.org/grpc v1.73.0/go.mod h1:50sbHOUqWoCQGI8V2HQLJM0B+LMlIUjNSZmow7EVBQc=
google.golang.org/protobuf v1.36.6 h1:z1NpPI8ku2WgiWnf+t9wTPsn6eP1L7ksHUlkfLvd9xY=
google.golang.org/protobuf v1.36.6/go.mod h1:jduwjTPXsFjZGTmRluh+L6NjiWu7pchiJ2/5YcXBHnY=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@@ -324,7 +354,6 @@ gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EV
gopkg.in/warnings.v0 v0.1.2 h1:wFXVbFY8DY5/xOe1ECiWdKCzZlxgshcYVNkBHstARME=
gopkg.in/warnings.v0 v0.1.2/go.mod h1:jksf8JmL6Qr/oQM2OXTHunEvvTAsrWBLb6OOjuVWRNI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=

View File

@@ -1,6 +1,8 @@
{
lib,
buildGoApplication,
go,
installShellFiles,
}:
buildGoApplication {
@@ -17,6 +19,15 @@ buildGoApplication {
"-w"
];
inherit go;
nativeBuildInputs = [ installShellFiles ];
postInstall = ''
installShellCompletion --zsh ./completions/_fabric
installShellCompletion --bash ./completions/fabric.bash
installShellCompletion --fish ./completions/fabric.fish
'';
meta = with lib; {
description = "Fabric is an open-source framework for augmenting humans using AI. It provides a modular framework for solving specific problems using a crowdsourced set of AI prompts that can be used anywhere";
homepage = "https://github.com/danielmiessler/fabric";

View File

@@ -2,56 +2,101 @@ schema = 3
[mod]
[mod."cloud.google.com/go"]
version = "v0.118.3"
hash = "sha256-y3YHioDLx9/asf2AWuincnq4BVO2S/GQFxpa1dEpxKs="
version = "v0.121.2"
hash = "sha256-BCgGHxKti8slH98UDDurtgzX3lgcYEklsmj4ImPpwlc="
[mod."cloud.google.com/go/ai"]
version = "v0.10.0"
hash = "sha256-huE2q1HBA6d9FQ152HFQhOe9fX0QlLFVuFO3XAfln8U="
version = "v0.12.1"
hash = "sha256-wg3oLMS68E/v7EdNzywbjwEmpk+u6U8LTnIc1pq8edo="
[mod."cloud.google.com/go/auth"]
version = "v0.15.0"
hash = "sha256-N9xjLPDLhG5cqUx94tNccv74Q/fIlukWU6NbWpuNi+I="
version = "v0.16.2"
hash = "sha256-BAU9WGFKe0pd5Eu3l/Mbts+QeCOjS+lChr5hrPBCzdA="
[mod."cloud.google.com/go/auth/oauth2adapt"]
version = "v0.2.7"
hash = "sha256-U+pXaY0kPnSeBzHWxELZ75bZnb74nygwIVZDdXYcP5g="
version = "v0.2.8"
hash = "sha256-GoXFqAbp1WO1tDj07PF5EyxDYvCBP0l0qwxY2oV2hfc="
[mod."cloud.google.com/go/compute/metadata"]
version = "v0.6.0"
hash = "sha256-E8/cwio4xR8buCryR4HwR7+agb4M3zqgXSm7rBglmIY="
version = "v0.7.0"
hash = "sha256-jJZDW+hibqjMiY8OiJhgJALbGwEq+djLOxfYR7upQyE="
[mod."cloud.google.com/go/longrunning"]
version = "v0.6.4"
hash = "sha256-Q0JtsyxSgVwi91ZhvefpAq8fKbblRrtQ2bQhQYiTY48="
version = "v0.6.7"
hash = "sha256-9I0Nc2KWAEVoxDngNkqFUdASmZIAySfMEELlPh3Q3xA="
[mod."dario.cat/mergo"]
version = "v1.0.1"
hash = "sha256-wcG6+x0k6KzOSlaPA+1RFxa06/RIAePJTAjjuhLbImw="
version = "v1.0.2"
hash = "sha256-p6jdiHlLEfZES8vJnDywG4aVzIe16p0CU6iglglIweA="
[mod."github.com/Microsoft/go-winio"]
version = "v0.6.2"
hash = "sha256-tVNWDUMILZbJvarcl/E7tpSnkn7urqgSHa2Eaka5vSU="
[mod."github.com/ProtonMail/go-crypto"]
version = "v1.1.5"
hash = "sha256-N5Zn0f/NF3ezyGou2kRw9BwM25feJqnp7TPkRt6oK6I="
[mod."github.com/anaskhan96/soup"]
version = "v1.2.5"
hash = "sha256-t8yCyK2y7x2qaI/3Yw16q3zVFqu+3acLcPgTr1MIKWg="
version = "v1.3.0"
hash = "sha256-TUG+C4MyeWglOmiwiW2/NUVurFHXLgEPRd3X9uQ1NGI="
[mod."github.com/andybalholm/cascadia"]
version = "v1.3.3"
hash = "sha256-jv7ZshpSd7FZzKKN6hqlUgiR8C3y85zNIS/hq7g76Ho="
[mod."github.com/anthropics/anthropic-sdk-go"]
version = "v0.2.0-alpha.11"
hash = "sha256-0wl62e6AVhDY3KkoYrfAHFtBrwNC4nzqrR55iyCJlwk="
version = "v1.4.0"
hash = "sha256-4kwFw9gt/sRIlTo0fC2PbfLnCyc4lCOtmfQelhpORX8="
[mod."github.com/araddon/dateparse"]
version = "v0.0.0-20210429162001-6b43995a97de"
hash = "sha256-UuX84naeRGMsFOgIgRoBHG5sNy1CzBkWPKmd6VbLwFw="
[mod."github.com/atotto/clipboard"]
version = "v0.1.4"
hash = "sha256-ZZ7U5X0gWOu8zcjZcWbcpzGOGdycwq0TjTFh/eZHjXk="
[mod."github.com/aws/aws-sdk-go-v2"]
version = "v1.36.4"
hash = "sha256-Cpdphp8FQUbQlhAYvtPKDh1oZc84+/0bzLlx8CM1/BM="
[mod."github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream"]
version = "v1.6.10"
hash = "sha256-9+ZMhWxtsm7ZtZCjBV5PZkOR5rt3bCOznuv45Iwf55c="
[mod."github.com/aws/aws-sdk-go-v2/config"]
version = "v1.27.27"
hash = "sha256-jQmc1lJmVeTezSeFs6KL2HAvCkP9ZWMdVbG5ymJQrKs="
[mod."github.com/aws/aws-sdk-go-v2/credentials"]
version = "v1.17.27"
hash = "sha256-7ITZjIF0ZmmCG3u5d88IfsAj0KF1IFm9KhWFlC6RtQo="
[mod."github.com/aws/aws-sdk-go-v2/feature/ec2/imds"]
version = "v1.16.11"
hash = "sha256-uedtRd/SIcFJlYZg1jtJdIJViZq1Poks9/J2Bm9/Ehw="
[mod."github.com/aws/aws-sdk-go-v2/internal/configsources"]
version = "v1.3.35"
hash = "sha256-AyQ+eJvyhahypIAqPScdkn44MYwBcr9iyrMC1BRSeZI="
[mod."github.com/aws/aws-sdk-go-v2/internal/endpoints/v2"]
version = "v2.6.35"
hash = "sha256-c8K+Nk5XrFMWaaxVsyhKgyJBZhs3Hkhjr/dIDXWZfSQ="
[mod."github.com/aws/aws-sdk-go-v2/internal/ini"]
version = "v1.8.0"
hash = "sha256-v76jTAr4rEgS5en49ikLh6nuvclN+VjpOPj83ZQ3sLo="
[mod."github.com/aws/aws-sdk-go-v2/service/bedrock"]
version = "v1.34.1"
hash = "sha256-OK7t+ieq4pviCnnhfSytANBF5Lwdz4KxjN10CC5pXyY="
[mod."github.com/aws/aws-sdk-go-v2/service/bedrockruntime"]
version = "v1.30.0"
hash = "sha256-MsEQfbqIREtMikRFqBpLCqdAC4gfgPSNbk08k5OJTbo="
[mod."github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding"]
version = "v1.11.3"
hash = "sha256-TRhoRd7iY7K+pfdkSQLItyr52k2jO4TMYQ5vRGiOOMk="
[mod."github.com/aws/aws-sdk-go-v2/service/internal/presigned-url"]
version = "v1.11.17"
hash = "sha256-eUoYDAXcQNzCmwjXO9RWhrt0jGYlSjt2vQOlAlpIfoE="
[mod."github.com/aws/aws-sdk-go-v2/service/sso"]
version = "v1.22.4"
hash = "sha256-Q3tyDdJVq0BAstOYvCKPvNS4EHkhXt1pL/23KPQJMHM="
[mod."github.com/aws/aws-sdk-go-v2/service/ssooidc"]
version = "v1.26.4"
hash = "sha256-cPv6nmVPOjMUZjN2IeEiYQSzLeAOrfgGnSSvvhJ6iL4="
[mod."github.com/aws/aws-sdk-go-v2/service/sts"]
version = "v1.30.3"
hash = "sha256-4z/K4GPW9osiNM3SxFNZYsVPnSSU50Iuv29Sb2n4Fbk="
[mod."github.com/aws/smithy-go"]
version = "v1.22.2"
hash = "sha256-YdwVeW509cpqU357MjDM8ReL1vftkW8XIhSbJsbTh/s="
[mod."github.com/bytedance/sonic"]
version = "v1.12.9"
hash = "sha256-smlXGC4n6fkOiVR+A3VGd71xp+cYo42MSHuWq7H3jew="
version = "v1.13.3"
hash = "sha256-Nnt5b2NkIvSXhGERQmyI0ka28hbWi7A7Zn3dsAjPcEA="
[mod."github.com/bytedance/sonic/loader"]
version = "v0.2.3"
hash = "sha256-c0m1nl1jv76LVaUgFFNjZU9jss/hoSWXyCRimhRWYjM="
version = "v0.2.4"
hash = "sha256-rv9LnePpm4OspSVbfSoVbohXzhu+dxE1BH1gm3mTmTc="
[mod."github.com/cloudflare/circl"]
version = "v1.6.0"
hash = "sha256-a+SVfnHYC8Fb+NQLboNg5P9sry+WutzuNetVHFVAAo0="
version = "v1.6.1"
hash = "sha256-Dc69V12eIFnJoUNmwg6VKXHfAMijbAeEVSDe8AiOaLo="
[mod."github.com/cloudwego/base64x"]
version = "v0.1.5"
hash = "sha256-MyUYTveN48DhnL8mwAgCRuMExLct98uzSPsmYlfaa4I="
@@ -68,14 +113,14 @@ schema = 3
version = "v1.0.4"
hash = "sha256-c1JKoRSndwwOyOxq9ddCe+8qn7mG9uRq2o/822x5O/c="
[mod."github.com/gabriel-vasile/mimetype"]
version = "v1.4.8"
hash = "sha256-ElqfQtnoGHyVqtN0mJjeWakQ6N5x+nVaX3+uOV7Q5Xk="
version = "v1.4.9"
hash = "sha256-75uELLqb01djHTe7KdXvUidBK7SuejarYouEUuxaj8Q="
[mod."github.com/gin-contrib/sse"]
version = "v1.0.0"
hash = "sha256-xnaabOxDN+ojnHQC7mHd/876Z9nWFScW+JrMm1HWREw="
version = "v1.1.0"
hash = "sha256-2VP6zHEsPi0u2ZYpOTcLulwj1Gsmb6oA19qcP2/AzVM="
[mod."github.com/gin-gonic/gin"]
version = "v1.10.0"
hash = "sha256-esJasHrJtuTBwGPGAoc/XSb428J8va+tPGcZ0gTfsgc="
version = "v1.10.1"
hash = "sha256-D98+chAdjb6JcLPkscOr8TgTW87UqA4h3cnY0XIr16c="
[mod."github.com/go-git/gcfg"]
version = "v1.5.1-0.20230307220236-3a3c6141e376"
hash = "sha256-f4k0gSYuo0/q3WOoTxl2eFaj7WZpdz29ih6CKc8Ude8="
@@ -83,11 +128,11 @@ schema = 3
version = "v5.6.2"
hash = "sha256-VgbxcLkHjiSyRIfKS7E9Sn8OynCrMGUDkwFz6K2TVL4="
[mod."github.com/go-git/go-git/v5"]
version = "v5.13.2"
hash = "sha256-voZQHN2OSYcoQF2bIjsdRrHT5NohZ/8q9RrmY7j2Lbc="
version = "v5.16.2"
hash = "sha256-KdOf4KwJAJUIB/EcQH6wc7jpcABCISWur3vOTpAo+/c="
[mod."github.com/go-logr/logr"]
version = "v1.4.2"
hash = "sha256-/W6qGilFlZNTb9Uq48xGZ4IbsVeSwJiAMLw4wiNYHLI="
version = "v1.4.3"
hash = "sha256-Nnp/dEVNMxLp3RSPDHZzGbI8BkSNuZMX0I0cjWKXXLA="
[mod."github.com/go-logr/stdr"]
version = "v1.2.2"
hash = "sha256-rRweAP7XIb4egtT1f2gkz4sYOu7LDHmcJ5iNsJUd0sE="
@@ -98,8 +143,8 @@ schema = 3
version = "v0.18.1"
hash = "sha256-2/B2qP51zfiY+k8G0w0D03KXUc7XpWj6wKY7NjNP/9E="
[mod."github.com/go-playground/validator/v10"]
version = "v10.25.0"
hash = "sha256-198CQ0f+WC7UNxCCPg6rpogez6c5ivpignJNhx+z0W4="
version = "v10.26.0"
hash = "sha256-/jMKICp8LTcJVt+b4YRTnJM84r7HK6aT0oqO7Q8SRs8="
[mod."github.com/go-shiori/dom"]
version = "v0.0.0-20230515143342-73569d674e1c"
hash = "sha256-4lm9KZfR2XnfZU9KTG+4jqLYZqbfL74AMO4y3dKpIbg="
@@ -116,8 +161,8 @@ schema = 3
version = "v0.0.0-20241129210726-2c02b8208cf8"
hash = "sha256-AdLZ3dJLe/yduoNvZiXugZxNfmwJjNQyQGsIdzYzH74="
[mod."github.com/google/generative-ai-go"]
version = "v0.19.0"
hash = "sha256-x2K1nkRwtne9MeP5B8FpwavYqQx564go5LzmcBJ0KT4="
version = "v0.20.1"
hash = "sha256-9bSpEs4kByhgyTKiHdOY5muYjGBTluA1LvEjw2gSoLI="
[mod."github.com/google/s2a-go"]
version = "v0.1.9"
hash = "sha256-0AdSpSTso4bATmM/9qamWzKrVtOLDf7afvDhoiT/UpA="
@@ -125,11 +170,11 @@ schema = 3
version = "v1.6.0"
hash = "sha256-VWl9sqUzdOuhW0KzQlv0gwwUQClYkmZwSydHG2sALYw="
[mod."github.com/googleapis/enterprise-certificate-proxy"]
version = "v0.3.4"
hash = "sha256-RVHWa0I68CTegjlXnM/GlishoZhmmwG4z+9KBucAJ1A="
version = "v0.3.6"
hash = "sha256-hPMF0s+X4/ul98GvVuw/ZNOupEXhIDB1yvWymZWYEbU="
[mod."github.com/googleapis/gax-go/v2"]
version = "v2.14.1"
hash = "sha256-iRS/KsAVTePrvTlwA7vKcQnwY6Jz329WdgzFw0hF8wk="
version = "v2.14.2"
hash = "sha256-QyY7wuCkrOJCJIf9Q884KD/BC3vk/QtQLXeLeNPt750="
[mod."github.com/jbenet/go-context"]
version = "v0.0.0-20150711004518-d14ea06fba99"
hash = "sha256-VANNCWNNpARH/ILQV9sCQsBWgyL2iFT+4AHZREpxIWE="
@@ -161,8 +206,11 @@ schema = 3
version = "v1.0.2"
hash = "sha256-+W9EIW7okXIXjWEgOaMh58eLvBZ7OshW2EhaIpNLSBU="
[mod."github.com/ollama/ollama"]
version = "v0.5.12"
hash = "sha256-Uf4GZdD77RZ5KJtz3iYVRDVCHqEh0UEihzquO4/nrss="
version = "v0.9.0"
hash = "sha256-r2eU+kMG3tuJy2B43RXsfmeltzM9t05NEmNiJAW5qr4="
[mod."github.com/openai/openai-go"]
version = "v1.8.2"
hash = "sha256-O8aV3zEj6o8kIlzlkYaTW4RzvwR3qNUBYiN8SuTM1R0="
[mod."github.com/otiai10/copy"]
version = "v1.14.1"
hash = "sha256-8RR7u17SbYg9AeBXVHIv5ZMU+kHmOcx0rLUKyz6YtU0="
@@ -170,8 +218,8 @@ schema = 3
version = "v1.6.3"
hash = "sha256-/FT3dYP2+UiW/qe1pxQ7HiS8et4+KHGPIMhc+8mHvzw="
[mod."github.com/pelletier/go-toml/v2"]
version = "v2.2.3"
hash = "sha256-fE++SVgnCGdnFZoROHWuYjIR7ENl7k9KKxQrRTquv/o="
version = "v2.2.4"
hash = "sha256-8qQIPldbsS5RO8v/FW/se3ZsAyvLzexiivzJCbGRg2Q="
[mod."github.com/pjbgf/sha1cd"]
version = "v0.3.2"
hash = "sha256-jdbiRhU8xc1C5c8m7BSCj71PUXHY3f7TWFfxDKKpUMk="
@@ -182,14 +230,14 @@ schema = 3
version = "v1.0.0"
hash = "sha256-/FtmHnaGjdvEIKAJtrUfEhV7EVo5A/eYrtdnUkuxLDA="
[mod."github.com/samber/lo"]
version = "v1.49.1"
hash = "sha256-xMQS9Sx2Bpvwo/9JvSVkJ4RXYOSHm642WRqWA6y0AnU="
[mod."github.com/sashabaranov/go-openai"]
version = "v1.38.0"
hash = "sha256-p6C/7oTWgnRjZLNrLLdIzaXvm+1WCrUd1fjZkjuiz1s="
version = "v1.50.0"
hash = "sha256-KDFks82BKu39sGt0f972IyOkohV2U0r1YvsnlNLdugY="
[mod."github.com/sergi/go-diff"]
version = "v1.3.2-0.20230802210424-5b0b94c5c0d3"
hash = "sha256-UcLU83CPMbSoKI8RLvLJ7nvGaE2xRSL1RjoHCVkMzUM="
version = "v1.4.0"
hash = "sha256-rs9NKpv/qcQEMRg7CmxGdP4HGuFdBxlpWf9LbA9wS4k="
[mod."github.com/sgaunet/perplexity-go/v2"]
version = "v2.8.0"
hash = "sha256-w1S14Jf4/6LFODREmmiJvPtkZh4Sor81Rr1PqC5pIak="
[mod."github.com/skeema/knownhosts"]
version = "v1.3.1"
hash = "sha256-kjqQDzuncQNTuOYegqVZExwuOt/Z73m2ST7NZFEKixI="
@@ -212,8 +260,8 @@ schema = 3
version = "v0.15.1"
hash = "sha256-HLk6oUe7EoITrNvP0y8D6BtIgIcmDZYtb/xl/dufIoY="
[mod."github.com/ugorji/go/codec"]
version = "v1.2.12"
hash = "sha256-sp1LJ93UK7mFwgZqG8jxCgTCPgKR74HNU6XxX0Jfjm0="
version = "v1.2.14"
hash = "sha256-PoVXlCBE8SvMWpXx9FRsQOSAmE/+5SnPGr4m5BGoyIo="
[mod."github.com/xanzy/ssh-agent"]
version = "v0.3.3"
hash = "sha256-l3pGB6IdzcPA/HLk93sSN6NM2pKPy+bVOoacR5RC2+c="
@@ -221,65 +269,62 @@ schema = 3
version = "v1.1.0"
hash = "sha256-cA9qCCu8P1NSJRxgmpfkfa5rKyn9X+Y/9FSmSd5xjyo="
[mod."go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"]
version = "v0.59.0"
hash = "sha256-jItb6nG5/urw6Pv3zb8i5ywianqTQfrheyAIsPIQcnY="
version = "v0.61.0"
hash = "sha256-o5w9k3VbqP3gaXI3Aelw93LLHH53U4PnkYVwc3MaY3Y="
[mod."go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"]
version = "v0.59.0"
hash = "sha256-zeC30S2MV7W2xxS5rVfPGhZO4jcdPpxxfy3QvBkt/pQ="
version = "v0.61.0"
hash = "sha256-4pfXD7ErXhexSynXiEEQSAkWoPwHd7PEDE3M1Zi5gLM="
[mod."go.opentelemetry.io/otel"]
version = "v1.34.0"
hash = "sha256-hnuuTSxaf9yMO/23xWdcTGNzvnnJiqUiL4nzYwUV5bc="
version = "v1.36.0"
hash = "sha256-j8wojdCtKal3LKojanHA8KXXQ0FkbWONpO8tUxpJDko="
[mod."go.opentelemetry.io/otel/metric"]
version = "v1.34.0"
hash = "sha256-JklGKJiMf1fpsE9pmnuLUq26g6wVp173v4GWJ7Xp5s4="
version = "v1.36.0"
hash = "sha256-z6Uqi4HhUljWIYd58svKK5MqcGbpcac+/M8JeTrUtJ8="
[mod."go.opentelemetry.io/otel/trace"]
version = "v1.34.0"
hash = "sha256-u11KJ4WTDtcb0tVv7d/HOdhq8Ea+c1QPBO8MbsCQu9Q="
version = "v1.36.0"
hash = "sha256-owWD9x1lp8aIJqYt058BXPUsIMHdk3RI0escso0BxwA="
[mod."golang.org/x/arch"]
version = "v0.14.0"
hash = "sha256-9akWthLBB+Au/JIg3WKcSx1YAfHEHOCnQF62sJoMJG4="
version = "v0.18.0"
hash = "sha256-tUpUPERjmRi7zldj0oPlnbnBhEkcI9iQGvP1HqlsK10="
[mod."golang.org/x/crypto"]
version = "v0.36.0"
hash = "sha256-Np+suvZdMOXALDm4m8nDNa+QsvUV0rE0PEINuxCoKGM="
version = "v0.39.0"
hash = "sha256-FtwjbVoAhZkx7F2hmzi9Y0J87CVVhWcrZzun+zWQLzc="
[mod."golang.org/x/net"]
version = "v0.38.0"
hash = "sha256-iHmyxkZQLw1PsUZaXHMt84GrjbXPvkEc92sLoK3W++c="
version = "v0.41.0"
hash = "sha256-6/pi8rNmGvBFzkJQXkXkMfL1Bjydhg3BgAMYDyQ/Uvg="
[mod."golang.org/x/oauth2"]
version = "v0.27.0"
hash = "sha256-TBKV2c/m0SgPqrJSE0ltJXfImrYPafNuziLN25jgsYY="
version = "v0.30.0"
hash = "sha256-btD7BUtQpOswusZY5qIU90uDo38buVrQ0tmmQ8qNHDg="
[mod."golang.org/x/sync"]
version = "v0.12.0"
hash = "sha256-lPIbI6aXx+iKtFjPaXKNLEnuNfhHCVl7EQiE7alUvlM="
version = "v0.15.0"
hash = "sha256-Jf4ehm8H8YAWY6mM151RI5CbG7JcOFtmN0AZx4bE3UE="
[mod."golang.org/x/sys"]
version = "v0.31.0"
hash = "sha256-aulv5obCrhheMlSq7seUgP3C29nfZABwiQ4IBNisgME="
version = "v0.33.0"
hash = "sha256-wlOzIOUgAiGAtdzhW/KPl/yUVSH/lvFZfs5XOuJ9LOQ="
[mod."golang.org/x/text"]
version = "v0.23.0"
hash = "sha256-TiYX1K4DYpP1dEV06whOm43xyOntjrPFi+VAdncoeCY="
version = "v0.26.0"
hash = "sha256-N+27nBCyGvje0yCTlUzZoVZ0LRxx4AJ+eBlrFQVRlFQ="
[mod."golang.org/x/time"]
version = "v0.10.0"
hash = "sha256-vnlAME3gDR6R4cbCmSYAlR1Rjc0yUpkufTOPNvCdf6Q="
version = "v0.12.0"
hash = "sha256-Cp3oxrCMH2wyxjzr5SHVmyhgaoUuSl56Uy00Q7DYEpw="
[mod."google.golang.org/api"]
version = "v0.223.0"
hash = "sha256-sNLRocS4vcjPj0vsInI/ioZ29rSVdGD0bGz8ZzBSbus="
version = "v0.236.0"
hash = "sha256-tP1RSUSnQ4a0axgZQwEZgKF1E13nL02FSP1NPSZr0Rc="
[mod."google.golang.org/genproto/googleapis/api"]
version = "v0.0.0-20250224174004-546df14abb99"
hash = "sha256-8er5KyVDLmuuOZEDd8cHHTkpb/JifejdHwcHfqAD83o="
version = "v0.0.0-20250603155806-513f23925822"
hash = "sha256-0CS432v9zVhkVLqFpZtxBX8rvVqP67lb7qQ3es7RqIU="
[mod."google.golang.org/genproto/googleapis/rpc"]
version = "v0.0.0-20250224174004-546df14abb99"
hash = "sha256-l/2ByVhr10DBqSp5y1d8mtEY3++RUZKg89FCEptT0nQ="
version = "v0.0.0-20250603155806-513f23925822"
hash = "sha256-WK7iDtAhH19NPe3TywTQlGjDawNaDKWnxhFL9PgVUwM="
[mod."google.golang.org/grpc"]
version = "v1.70.0"
hash = "sha256-7SCJx6Y35O/0P3cFtELDXrOSOb+HshxaTQYdzv2gVmg="
version = "v1.73.0"
hash = "sha256-LfVlwip++q2DX70RU6CxoXglx1+r5l48DwlFD05G11c="
[mod."google.golang.org/protobuf"]
version = "v1.36.5"
hash = "sha256-isupBiQUrKPEFzK94k5cgzM3Ab5fMXp352/zcsXV1JU="
version = "v1.36.6"
hash = "sha256-lT5qnefI5FDJnowz9PEkAGylH3+fE+A3DJDkAyy9RMc="
[mod."gopkg.in/warnings.v0"]
version = "v0.1.2"
hash = "sha256-ATVL9yEmgYbkJ1DkltDGRn/auGAjqGOfjQyBYyUo8s8="
[mod."gopkg.in/yaml.v2"]
version = "v2.4.0"
hash = "sha256-uVEGglIedjOIGZzHW4YwN1VoRSTK8o0eGZqzd+TNdd0="
[mod."gopkg.in/yaml.v3"]
version = "v3.0.1"
hash = "sha256-FqL9TKYJ0XkNwJFnq9j0VvJ5ZUU1RvH/52h/f5bkYAU="

View File

@@ -1 +1 @@
"1.4.179"
"1.4.238"

View File

@@ -2,12 +2,13 @@
pkgs,
gomod2nix,
goEnv,
goVersion,
}:
{
default = pkgs.mkShell {
nativeBuildInputs = [
pkgs.go
goVersion
pkgs.gopls
pkgs.gotools
pkgs.go-tools

View File

@@ -22,19 +22,20 @@ Take a deep breath and think step by step about how to best accomplish this goal
This must be under the heading "INSIGHTFULNESS SCORE (0 = not very interesting and insightful to 10 = very interesting and insightful)".
- A rating of how emotional the debate was from 0 (very calm) to 5 (very emotional). This must be under the heading "EMOTIONALITY SCORE (0 (very calm) to 5 (very emotional))".
- A list of the participants of the debate and a score of their emotionality from 0 (very calm) to 5 (very emotional). This must be under the heading "PARTICIPANTS".
- A list of arguments attributed to participants with names and quotes. If possible, this should include external references that disprove or back up their claims.
- A list of arguments attributed to participants with names and quotes. Each argument summary must be EXACTLY 16 words. If possible, this should include external references that disprove or back up their claims.
It is IMPORTANT that these references are from trusted and verifiable sources that can be easily accessed. These sources have to BE REAL and NOT MADE UP. This must be under the heading "ARGUMENTS".
If possible, provide an objective assessment of the truth of these arguments. If you assess the truth of the argument, provide some sources that back up your assessment. The material you provide should be from reliable, verifiable, and trustworthy sources. DO NOT MAKE UP SOURCES.
- A list of agreements the participants have reached, attributed with names and quotes. This must be under the heading "AGREEMENTS".
- A list of disagreements the participants were unable to resolve and the reasons why they remained unresolved, attributed with names and quotes. This must be under the heading "DISAGREEMENTS".
- A list of possible misunderstandings and why they may have occurred, attributed with names and quotes. This must be under the heading "POSSIBLE MISUNDERSTANDINGS".
- A list of learnings from the debate. This must be under the heading "LEARNINGS".
- A list of takeaways that highlight ideas to think about, sources to explore, and actionable items. This must be under the heading "TAKEAWAYS".
- A list of agreements the participants have reached. Each agreement summary must be EXACTLY 16 words, followed by names and quotes. This must be under the heading "AGREEMENTS".
- A list of disagreements the participants were unable to resolve. Each disagreement summary must be EXACTLY 16 words, followed by names and quotes explaining why they remained unresolved. This must be under the heading "DISAGREEMENTS".
- A list of possible misunderstandings. Each misunderstanding summary must be EXACTLY 16 words, followed by names and quotes explaining why they may have occurred. This must be under the heading "POSSIBLE MISUNDERSTANDINGS".
- A list of learnings from the debate. Each learning must be EXACTLY 16 words. This must be under the heading "LEARNINGS".
- A list of takeaways that highlight ideas to think about, sources to explore, and actionable items. Each takeaway must be EXACTLY 16 words. This must be under the heading "TAKEAWAYS".
# OUTPUT INSTRUCTIONS
- Output all sections above.
- Use Markdown to structure your output.
- Do not use any markdown formatting (no asterisks, no bullet points, no headers).
- Keep all agreements, arguments, recommendations, learnings, and takeaways to EXACTLY 16 words each.
- When providing quotes, these quotes should clearly express the points you are using them for. If necessary, use multiple quotes.
# INPUT:

View File

@@ -8,19 +8,19 @@ Take a deep breath and think step by step about how to best accomplish this goal
- Consume the entire paper and think deeply about it.
- Map out all the claims and implications on a virtual whiteboard in your mind.
- Map out all the claims and implications on a giant virtual whiteboard in your mind.
# OUTPUT
- Extract a summary of the paper and its conclusions into a 25-word sentence called SUMMARY.
- Extract a summary of the paper and its conclusions into a 16-word sentence called SUMMARY.
- Extract the list of authors in a section called AUTHORS.
- Extract the list of organizations the authors are associated, e.g., which university they're at, with in a section called AUTHOR ORGANIZATIONS.
- Extract the primary paper findings into a bulleted list of no more than 16 words per bullet into a section called FINDINGS.
- Extract the most surprising and interesting paper findings into a 10 bullets of no more than 16 words per bullet into a section called FINDINGS.
- Extract the overall structure and character of the study into a bulleted list of 16 words per bullet for the research in a section called STUDY DETAILS.
- Extract the overall structure and character of the study into a bulleted list of 16 words per bullet for the research in a section called STUDY OVERVIEW.
- Extract the study quality by evaluating the following items in a section called STUDY QUALITY that has the following bulleted sub-sections:
@@ -76,7 +76,9 @@ END EXAMPLE CHART
- SUMMARY STATEMENT:
A final 25-word summary of the paper, its findings, and what we should do about it if it's true.
A final 16-word summary of the paper, its findings, and what we should do about it if it's true.
Also add 5 8-word bullets of how you got to that rating and conclusion / summary.
# RATING NOTES
@@ -84,21 +86,23 @@ A final 25-word summary of the paper, its findings, and what we should do about
- An A would be a paper that is novel, rigorous, empirical, and has no conflicts of interest.
- A paper could get an A if it's theoretical but everything else would have to be perfect.
- A paper could get an A if it's theoretical but everything else would have to be VERY good.
- The stronger the claims the stronger the evidence needs to be, as well as the transparency into the methodology. If the paper makes strong claims, but the evidence or transparency is weak, then the RIGOR score should be lowered.
- Remove at least 1 grade (and up to 2) for papers where compelling data is provided but it's not clear what exact tests were run and/or how to reproduce those tests.
- Do not relax this transparency requirement for papers that claim security reasons.
- If a paper does not clearly articulate its methodology in a way that's replicable, lower the RIGOR and overall score significantly.
- Do not relax this transparency requirement for papers that claim security reasons. If they didn't show their work we have to assume the worst given the reproducibility crisis..
- Remove up to 1-3 grades for potential conflicts of interest indicated in the report.
# ANALYSIS INSTRUCTIONS
- Tend towards being more critical. Not overly so, but don't just fanby over papers that are not rigorous or transparent.
# OUTPUT INSTRUCTIONS
- Output all sections above.
- After deeply considering all the sections above and how they interact with each other, output all sections above.
- Ensure the scoring looks closely at the reproducibility and transparency of the methodology, and that it doesn't give a pass to papers that don't provide the data or methodology for safety or other reasons.
@@ -108,7 +112,7 @@ Known [-2--------] Novel
Weak [-------8--] Rigorous
Theoretical [--3-------] Empirical
- For the findings and other analysis sections, write at the 9th-grade reading level. This means using short sentences and simple words/concepts to explain everything.
- For the findings and other analysis sections, and in fact all writing, write in the clear, approachable style of Paul Graham.
- Ensure there's a blank line between each bullet of output.
@@ -120,4 +124,3 @@ Theoretical [--3-------] Empirical
# INPUT:
INPUT:

View File

@@ -0,0 +1,122 @@
# IDENTITY and PURPOSE
You are a research paper analysis service focused on determining the primary findings of the paper and analyzing its scientific rigor and quality.
Take a deep breath and think step by step about how to best accomplish this goal using the following steps.
# STEPS
- Consume the entire paper and think deeply about it.
- Map out all the claims and implications on a virtual whiteboard in your mind.
# FACTORS TO CONSIDER
- Extract a summary of the paper and its conclusions into a 25-word sentence called SUMMARY.
- Extract the list of authors in a section called AUTHORS.
- Extract the list of organizations the authors are associated, e.g., which university they're at, with in a section called AUTHOR ORGANIZATIONS.
- Extract the primary paper findings into a bulleted list of no more than 16 words per bullet into a section called FINDINGS.
- Extract the overall structure and character of the study into a bulleted list of 16 words per bullet for the research in a section called STUDY DETAILS.
- Extract the study quality by evaluating the following items in a section called STUDY QUALITY that has the following bulleted sub-sections:
- STUDY DESIGN: (give a 15 word description, including the pertinent data and statistics.)
- SAMPLE SIZE: (give a 15 word description, including the pertinent data and statistics.)
- CONFIDENCE INTERVALS (give a 15 word description, including the pertinent data and statistics.)
- P-VALUE (give a 15 word description, including the pertinent data and statistics.)
- EFFECT SIZE (give a 15 word description, including the pertinent data and statistics.)
- CONSISTENCE OF RESULTS (give a 15 word description, including the pertinent data and statistics.)
- METHODOLOGY TRANSPARENCY (give a 15 word description of the methodology quality and documentation.)
- STUDY REPRODUCIBILITY (give a 15 word description, including how to fully reproduce the study.)
- Data Analysis Method (give a 15 word description, including the pertinent data and statistics.)
- Discuss any Conflicts of Interest in a section called CONFLICTS OF INTEREST. Rate the conflicts of interest as NONE DETECTED, LOW, MEDIUM, HIGH, or CRITICAL.
- Extract the researcher's analysis and interpretation in a section called RESEARCHER'S INTERPRETATION, in a 15-word sentence.
- In a section called PAPER QUALITY output the following sections:
- Novelty: 1 - 10 Rating, followed by a 15 word explanation for the rating.
- Rigor: 1 - 10 Rating, followed by a 15 word explanation for the rating.
- Empiricism: 1 - 10 Rating, followed by a 15 word explanation for the rating.
- Rating Chart: Create a chart like the one below that shows how the paper rates on all these dimensions.
- Known to Novel is how new and interesting and surprising the paper is on a scale of 1 - 10.
- Weak to Rigorous is how well the paper is supported by careful science, transparency, and methodology on a scale of 1 - 10.
- Theoretical to Empirical is how much the paper is based on purely speculative or theoretical ideas or actual data on a scale of 1 - 10. Note: Theoretical papers can still be rigorous and novel and should not be penalized overall for being Theoretical alone.
EXAMPLE CHART for 7, 5, 9 SCORES (fill in the actual scores):
Known [------7---] Novel
Weak [----5-----] Rigorous
Theoretical [--------9-] Empirical
END EXAMPLE CHART
- FINAL SCORE:
- A - F based on the scores above, conflicts of interest, and the overall quality of the paper. On a separate line, give a 15-word explanation for the grade.
- SUMMARY STATEMENT:
A final 25-word summary of the paper, its findings, and what we should do about it if it's true.
# RATING NOTES
- If the paper makes claims and presents stats but doesn't show how it arrived at these stats, then the Methodology Transparency would be low, and the RIGOR score should be lowered as well.
- An A would be a paper that is novel, rigorous, empirical, and has no conflicts of interest.
- A paper could get an A if it's theoretical but everything else would have to be perfect.
- The stronger the claims the stronger the evidence needs to be, as well as the transparency into the methodology. If the paper makes strong claims, but the evidence or transparency is weak, then the RIGOR score should be lowered.
- Remove at least 1 grade (and up to 2) for papers where compelling data is provided but it's not clear what exact tests were run and/or how to reproduce those tests.
- Do not relax this transparency requirement for papers that claim security reasons.
- If a paper does not clearly articulate its methodology in a way that's replicable, lower the RIGOR and overall score significantly.
- Remove up to 1-3 grades for potential conflicts of interest indicated in the report.
- Ensure the scoring looks closely at the reproducibility and transparency of the methodology, and that it doesn't give a pass to papers that don't provide the data or methodology for safety or other reasons.
# OUTPUT INSTRUCTIONS
Output only the following—not all the sections above.
Use Markdown bullets with dashes for the output (no bold or italics (asterisks)).
- The Title of the Paper, starting with the word TITLE:
- A 16-word sentence summarizing the paper's main claim, in the style of Paul Graham, starting with the word SUMMARY: which is not part of the 16 words.
- A 32-word summary of the implications stated or implied by the paper, in the style of Paul Graham, starting with the word IMPLICATIONS: which is not part of the 32 words.
- A 32-word summary of the primary recommendation stated or implied by the paper, in the style of Paul Graham, starting with the word RECOMMENDATION: which is not part of the 32 words.
- A 32-word bullet covering the authors of the paper and where they're out of, in the style of Paul Graham, starting with the word AUTHORS: which is not part of the 32 words.
- A 32-word bullet covering the methodology, including the type of research, how many studies it looked at, how many experiments, the p-value, etc. In other words the various aspects of the research that tell us the amount and type of rigor that went into the paper, in the style of Paul Graham, starting with the word METHODOLOGY: which is not part of the 32 words.
- A 32-word bullet covering any potential conflicts or bias that can logically be inferred by the authors, their affiliations, the methodology, or any other related information in the paper, in the style of Paul Graham, starting with the word CONFLICT/BIAS: which is not part of the 32 words.
- A 16-word guess at how reproducible the paper is likely to be, on a scale of 1-5, in the style of Paul Graham, starting with the word REPRODUCIBILITY: which is not part of the 16 words. Output the score as n/5, not spelled out. Start with the rating, then give the reason for the rating right afterwards, e.g.: "2/5 — The paper ...".
- In the markdown, don't use formatting like bold or italics. Make the output maximally readable in plain text.
- Do not output warnings or notes—just output the requested sections.
# INPUT:
INPUT:

View File

@@ -0,0 +1,24 @@
# IDENTITY and PURPOSE
You are an expert Terraform plan analyser. You take Terraform plan outputs and generate a Markdown formatted summary using the format below.
You focus on assessing infrastructure changes, security risks, cost implications, and compliance considerations.
## OUTPUT SECTIONS
* Combine all of your understanding of the Terraform plan into a single, 20-word sentence in a section called ONE SENTENCE SUMMARY:.
* Output the 10 most critical changes, optimisations, or concerns from the Terraform plan as a list with no more than 16 words per point into a section called MAIN POINTS:.
* Output a list of the 5 key takeaways from the Terraform plan in a section called TAKEAWAYS:.
## OUTPUT INSTRUCTIONS
* Create the output using the formatting above.
* You only output human-readable Markdown.
* Output numbered lists, not bullets.
* Do not output warnings or notes—just the requested sections.
* Do not repeat items in the output sections.
* Do not start items with the same opening words.
## INPUT
INPUT:

View File

@@ -0,0 +1,49 @@
# IDENTITY
You are a superintelligent expert on content of all forms, with deep understanding of which topics, categories, themes, and tags apply to any piece of content.
# GOAL
Your goal is to output a JSON object called tags, with the following tags applied if the content is significantly about their topic.
- **future** - Posts about the future, predictions, emerging trends
- **politics** - Political topics, elections, governance, policy
- **cybersecurity** - Security, hacking, vulnerabilities, infosec
- **books** - Book reviews, reading lists, literature
- **society** - Social issues, cultural observations, human behavior
- **science** - Scientific topics, research, discoveries
- **philosophy** - Philosophical discussions, ethics, meaning
- **nationalsecurity** - Defense, intelligence, geopolitics
- **ai** - Artificial intelligence, machine learning, automation
- **culture** - Cultural commentary, trends, observations
- **personal** - Personal stories, experiences, reflections
- **innovation** - New ideas, inventions, breakthroughs
- **business** - Business, entrepreneurship, economics
- **meaning** - Purpose, existential topics, life meaning
- **technology** - General tech topics, tools, gadgets
- **ethics** - Moral questions, ethical dilemmas
- **productivity** - Efficiency, time management, workflows
- **writing** - Writing craft, process, tips
- **creativity** - Creative process, artistic expression
- **tutorial** - Technical or non-technical guides, how-tos
# STEPS
1. Deeply understand the content and its themes and categories and topics.
2. Evaluate the list of tags above.
3. Determine which tags apply to the content.
4. Output the "tags" JSON object.
# NOTES
- It's ok, and quite normal, for multiple tags to apply—which is why this is tags and not categories
- All AI posts should have the technology tag, and that's ok. But not all technology posts are about AI, and therefore the AI tag needs to be evaluated separately. That goes for all potentially nested or conflicted tags.
- Be a bit conservative in applying tags. If a piece of content is only tangentially related to a tag, don't include it.
# OUTPUT INSTRUCTIONS
- Output ONLY the JSON object, and nothing else.
- That means DO NOT OUTPUT the ```json format indicator. ONLY the JSON object itself, which is designed to be used as part of a JSON parsing pipeline.

View File

@@ -16,349 +16,279 @@ The goal of this exercise are to:
CONTENT SUMMARY
$100M Offers by Alex Hormozi
$100M Offers, Alex Hormozi shows you “how to make offers so good people will
Introduction
In his book, feel stupid saying no.
” The offer is “the starting point of any conversation to initiate a
transaction with a customer.”
Alex Hormozi shows you how to make profitable offers by “reliably turning advertising dollars
into (enormous) profits using a combination of pricing, value, guarantees, and naming
strategies.” Combining these factors in the right amounts will result in a Grand Slam Offer. “The
good news is that in business, you only need to hit one Grand Slam Offer to retire forever.”
Introduction: $100M Offers
In his book, Alex Hormozi shows you “how to make offers so good people feel stupid saying no."
The offer is “the starting point of any conversation to initiate a transaction with a customer.”
Alex Hormozi shows you how to make profitable offers by “reliably turning advertising dollars into (enormous) profits using a combination of pricing, value, guarantees, and naming strategies.” Combining these factors in the right amounts will result in a Grand Slam Offer. “The good news is that in business, you only need to hit one Grand Slam Offer to retire forever.”
Section I: How We Got Here
In Section I of $100M Offers, Alex Hormozi introduces his personal story from debt to success
along with the concept of the “Grand Slam Offer.”
In Section I of $100M Offers, Alex Hormozi introduces his personal story from debt to success along with the concept of the “Grand Slam Offer.”
Chapter 1. How We Got Here
Alex Hormozi begins with his story from Christmas Eve in 2016. He was on the verge of going
broke. But a few days later, he hit a grand slam in early January of 2017. In $100M Offers, Alex
Hormozi shares this vital skill of making offers, as it was life-changing for him, and he wants to
deliver for you.
Alex Hormozi begins with his story from Christmas Eve in 2016. He was on the verge of going broke. But a few days later, he hit a grand slam in early January of 2017. In $100M Offers, Alex Hormozi shares this vital skill of making offers, as it was life-changing for him, and he wants to deliver for you.
Chapter 2. Grand Slam Offers
In Chapter 2 of $100M Offers, Alex Hormozi introduces the concept of the “Grand Slam Offer.”
Travis Jones states that the secret to sales is to “Make people an offer so good they would feel
stupid saying no.” Further, to have a business, we need to make our prospects an offer:
Offer “the goods and services you agree to provide, how you accept payment, and the terms
of the agreement”
Offers start the process of customer acquisition and earning money, and they can range from
nothing to a grand slam:
• No offer? No business. No life.
• Bad offer? Negative profit. No business. Miserable life.
• Decent offer? No profit. Stagnating business. Stagnating life.
• Good offer? Some profit. Okay business. Okay life.
• Grand Slam Offer? Fantastic profit. Insane business. Freedom.
In Chapter 2 of $100M Offers, Alex Hormozi introduces the concept of the “Grand Slam Offer.” Travis Jones states that the secret to sales is to “Make people an offer so good they would feel stupid saying no.” Further, to have a business, we need to make our prospects an offer:
Offer “the goods and services you agree to provide, how you accept payment, and the terms of the agreement”
Offers start the process of customer acquisition and earning money, and they can range from nothing to a grand slam:
- No offer? No business. No life.
- Bad offer? Negative profit. No business. Miserable life.
- Decent offer? No profit. Stagnating business. Stagnating life.
- Good offer? Some profit. Okay business. Okay life.
- Grand Slam Offer? Fantastic profit. Insane business. Freedom.
There are two significant issues that most entrepreneurs face:
1. Not Enough Clients
2. Not Enough Cash or excess profit at the end of the month
$100M Offers by Alex Hormozi |
Section II: Pricing
In Section II of $100M Offers, Alex Hormozi shows you “How to charge lots of money for stuff.”
Chapter 3. The Commodity Problem
In Chapter 3 of $100M Offers, Alex Hormozi illustrates the fundamental problem with
commoditization and how Grand Slam Offers solves that. You are either growing or dying, as
maintenance is a myth. Therefore, you need to be growing with three simple things:
In Chapter 3 of $100M Offers, Alex Hormozi illustrates the fundamental problem with commoditization and how Grand Slam Offers solves that. You are either growing or dying, as maintenance is a myth. Therefore, you need to be growing with three simple things:
1. Get More Customers
2. 3. Increase their Average Purchase Value
Get Them to Buy More Times
2. Increase their average purchase value
3. Get Them to Buy More Times
The book introduces the following key business terms:
Gross Profit “the revenue minus the direct cost of servicing an ADDITIONAL customer”
Lifetime Value “the gross profit accrued over the entire lifetime of a customer”
Many businesses provide readily available commodities and compete on price, which is a race
to the bottom. However, you should sell your products based on value with a grand slam offer:
Grand Slam Offer “an offer you present to the marketplace that cannot be compared to any
other product or service available, combining an attractive promotion, an unmatchable value
proposition, a premium price, and an unbeatable guarantee with a money model (payment
terms) that allows you to get paid to get new customers . . . forever removing the cash
constraint on business growth”
This offer gets you out of the pricing war and into a category of one, which results in more
customers, at higher ticket prices, for less money. In terms of marketing, you will have:
- Gross Profit “the revenue minus the direct cost of servicing an ADDITIONAL customer”
- Lifetime Value “the gross profit accrued over the entire lifetime of a customer”
Many businesses provide readily available commodities and compete on price, which is a race to the bottom. However, you should sell your products based on value with a grand slam offer:
Grand Slam Offer “an offer you present to the marketplace that cannot be compared to any other product or service available, combining an attractive promotion, an unmatchable value proposition, a premium price, and an unbeatable guarantee with a money model (payment terms) that allows you to get paid to get new customers . . . forever removing the cash constraint on business growth”.
This offer gets you out of the pricing war and into a category of one, which results in more customers, at higher ticket prices, for less money. In terms of marketing, you will have:
1. Increased Response Rates
2. Increased Conversion
3. Premium Prices
Chapter 4. Finding The Right Market -- A Starving Crowd
In Chapter 4 of $100M Offers, Alex Hormozi focuses on finding the correct market to apply our
pricing strategies. You should avoid choosing a bad market. Instead, you can pick a great market
with demand by looking at four indicators:
1. 2. 3. 4. Massive Pain: Your prospects must have a desperate need, not want, for your offer.
Purchasing Power: Your prospects must afford or access the money needed to buy.
Easy to Target: Your audience should be in easy-to-target markets.
Growing: The market should be growing to make things move faster.
$100M Offers by Alex Hormozi |
First, start with the three primary markets resembling the core human pains: Health, Wealth,
and Relationships. Then, find a subgroup in one of these larger markets that is growing, has the
buying power, and is easy to target. Ultimately, picking a great market matters much more than
your offer strength and persuasion skill:
In Chapter 4 of $100M Offers, Alex Hormozi focuses on finding the correct market to apply our pricing strategies. You should avoid choosing a bad market. Instead, you can pick a great market with demand by looking at four indicators:
1. Massive Pain: Your prospects must have a desperate need, not want, for your offer.
2. Purchasing Power: Your prospects must afford or access the money needed to buy.
3. Easy to Target: Your audience should be in easy-to-target markets.
4. Growing: The market should be growing to make things move faster.
First, start with the three primary markets resembling the core human pains: Health, Wealth, and Relationships. Then, find a subgroup in one of these larger markets that is growing, has the buying power, and is easy to target. Ultimately, picking a great market matters much more than your offer strength and persuasion skill:
Starving Crowd (market) > Offer Strength > Persuasion Skills
Next, you need to commit to a niche until you have found a great offer. The niches will make
you more money as you can charge more for a similar product. In the process of committing,
you will try out many offers and failures. Therefore, you must be resilient, as you will eventually
succeed.
If you find a crazy niche market, take advantage of it. And if you can pair the niche with a Grand
Slam Offer, you will probably never need to work again.
Next, you need to commit to a niche until you have found a great offer. The niches will make you more money as you can charge more for a similar product. In the process of committing, you will try out many offers and failures. Therefore, you must be resilient, as you will eventually succeed.
If you find a crazy niche market, take advantage of it. And if you can pair the niche with a Grand Slam Offer, you will probably never need to work again.
Chapter 5. Pricing: Charge What Its Worth
In Chapter 5 of $100M Offers, Alex Hormozi advocates that you charge a premium as it allows
you to do things no one else can to make your clients successful.
Warren Buffet has said, “Price is what you pay. Value is what you get.” Thus, people buy to get
a deal for what they are getting (value) is worth more than what they are giving in exchange for
it (price).” When someone perceives the value dipping lower than the price, they stop buying.
Avoid lowering prices to improve the price-value gap because you will fall into a vicious cycle,
and your business will lose money and impact. Instead, you want to improve the gap by raising
your price after sufficiently increasing the value to the customer. As a result, the virtuous cycle
works for you and your business profits significantly.
$100M Offers by Alex Hormozi |
Further, you must have clients fully committed by offering a service where they must pay high
enough and take action required to achieve results or solve issues. Higher levels of investment
correlate to a higher likelihood of accomplishing the positive outcome.
$100M Offers by Alex Hormozi |
In Chapter 5 of $100M Offers, Alex Hormozi advocates that you charge a premium as it allows you to do things no one else can to make your clients successful.
Warren Buffet has said, “Price is what you pay. Value is what you get.” Thus, people buy to get a deal for what they are getting (value) is worth more than what they are giving in exchange for it (price).”
When someone perceives the value dipping lower than the price, they stop buying.
Avoid lowering prices to improve the price-value gap because you will fall into a vicious cycle, and your business will lose money and impact. Instead, you want to improve the gap by raising your price after sufficiently increasing the value to the customer. As a result, the virtuous cycle works for you and your business profits significantly.
Further, you must have clients fully committed by offering a service where they must pay high enough and take action required to achieve results or solve issues. Higher levels of investment correlate to a higher likelihood of accomplishing the positive outcome.
Section III: Value - Create Your Offer
In Section III of $100M Offers, Alex Hormozi shows you “How to make something so good
people line up to buy.”
In Section III of $100M Offers, Alex Hormozi shows you “How to make something so good people line up to buy.”
Chapter 6. The Value Equation
In Chapter 6 of $100M Offers, Alex Hormozi introduces the value equation. Most entrepreneurs
think that charging a lot is wrong, but you should “charge as much money for your products or
services as humanly possible.” However, never charge more than what they are worth.
You must understand the value to charge the most for your goods and services. Further, you
should price them much more than the cost of fulfillment. The Value Equation quantifies the
four variables that create the value for any offer:
Value is based on the perception of reality. Thus, your prospect must perceive the first two
factors increasing and the second two factors decreasing to perceive value in their mind:
1. 2. 3. 4. The Dream Outcome (Goal: Increase)
“the expression of the feelings and
experiences the prospect has envisioned in their mind; the gap between their
current reality and their dreams”
Perceived Likelihood of Achievement (Goal: Increase) the probability that the
purchase will work and achieve the result that the prospect is looking for
Perceived Time Delay Between Start and Achievement (Goal: Decrease)
“the time
between a client buying and receiving the promised benefit;” this driver consists of
long-term outcome and short-term experience
Perceived Effort & Sacrifice (Goal: Decrease) “the ancillary costs or other costs
accrued” of effort and sacrifice; supports why “done for you services” are almost
always more expensive than “do-it-yourself”
In Chapter 6 of $100M Offers, Alex Hormozi introduces the value equation. Most entrepreneurs think that charging a lot is wrong, but you should “charge as much money for your products or services as humanly possible.” However, never charge more than what they are worth.
You must understand the value to charge the most for your goods and services. Further, you should price them much more than the cost of fulfillment. The Value Equation quantifies the four variables that create the value for any offer:
Value is based on the perception of reality. Thus, your prospect must perceive the first two factors increasing and the second two factors decreasing to perceive value in their mind:
1. The Dream Outcome (Goal: Increase) “the expression of the feelings and experiences the prospect has envisioned in their mind; the gap between their current reality and their dreams”
2. Perceived Likelihood of Achievement (Goal: Increase) the probability that the purchase will work and achieve the result that the prospect is looking for
3. Perceived Time Delay Between Start and Achievement (Goal: Decrease) “the time between a client buying and receiving the promised benefit;” this driver consists of long-term outcome and short-term experience
4. Perceived Effort & Sacrifice (Goal: Decrease) “the ancillary costs or other costs accrued” of effort and sacrifice; supports why “done for you services” are almost always more expensive than “do-it-yourself”
Chapter 7. Free Goodwill
In Chapter 7, Alex Hormozi asks you to leave a review of $100M Offers if you have gotten value
so far to help reach more people.
$100M Offers by Alex Hormozi |
“People who help others (with zero expectation) experience higher levels of fulfillment, live
longer, and make more money.” And so, “if you introduce something valuable to someone,
they associate that value with you.”
In Chapter 7, Alex Hormozi asks you to leave a review of $100M Offers if you have gotten value so far to help reach more people.
“People who help others (with zero expectation) experience higher levels of fulfillment, live longer, and make more money.” And so, “if you introduce something valuable to someone, they associate that value with you.”
Chapter 8. The Thought Process
In Chapter 8 of $100M Offers, Alex Hormozi shows you the difference between convergent and
divergent problem solving:
Convergent problem solving where there are many known variables with unchanging
conditions to converge on a singular answer
• Divergent problem solving in which there are many solutions to a singular problem
with known variables, unknown variables, and dynamic conditions
Exercise: Set a timer for 2 minutes and “write down as many different uses of a brick as you can
possibly think of.”
This exercise illustrates that “every offer has building blocks, the pieces that when combined
make an offer irresistible.” You need to use divergent thinking to determine how to combine
the elements to provide value.
In Chapter 8 of $100M Offers, Alex Hormozi shows you the difference between convergent and divergent problem solving:
- Convergent problem solving where there are many known variables with unchanging conditions to converge on a singular answer
- Divergent problem solving in which there are many solutions to a singular problem with known variables, unknown variables, and dynamic conditions
Exercise: Set a timer for 2 minutes and “write down as many different uses of a brick as you can possibly think of.”
This exercise illustrates that “every offer has building blocks, the pieces that when combined make an offer irresistible.” You need to use divergent thinking to determine how to combine the elements to provide value.
Chapter 9. Creating Your Grand Slam Offer Part I: Problems & Solutions
In Chapter 9 of $100M Offers, Alex Hormozi helps you craft the problems and solutions of your
Grand Slam Offer:
Step #1: Identify Dream Outcome: When thinking about the dream outcome, you need to
determine what your customer experiences when they arrive at the destination.
Step #2: List the Obstacles Encountered: Think of all the problems that prevent them from
achieving their outcome or continually reaching it. Each problem has four negative elements
that align with the four value drivers.
Step #3: List the Obstacles as Solutions: Transform our problems into solutions by determining
what is needed to solve each problem. Then, name each of the solutions.
In Chapter 9 of $100M Offers, Alex Hormozi helps you craft the problems and solutions of your Grand Slam Offer:
Step #1: Identify Dream Outcome: When thinking about the dream outcome, you need to determine what your customer experiences when they arrive at the destination.
Step #2: List the Obstacles Encountered: Think of all the problems that prevent them from achieving their outcome or continually reaching it. Each problem has four negative elements that align with the four value drivers.
Step #3: List the Obstacles as Solutions: Transform our problems into solutions by determining what is needed to solve each problem. Then, name each of the solutions.
Chapter 10. Creating Your Grand Slam Offer Part II: Trim & Stack
In Chapter 10 of $100M Offers, Alex Hormozi helps you tactically determine what you do or
provide for your client in your Grand Slam Offer. Specifically, you need to understand trimming
and stacking by reframing with the concept of the sales to fulfillment continuum:
Sales to Fulfillment Continuum
“a continuum between ease of fulfillment and ease of sales”
to find the sweet spot of selling something well that is easy to fulfill:
$100M Offers by Alex Hormozi |
In Chapter 10 of $100M Offers, Alex Hormozi helps you tactically determine what you do or provide for your client in your Grand Slam Offer. Specifically, you need to understand trimming and stacking by reframing with the concept of the sales to fulfillment continuum:
Sales to Fulfillment Continuum “a continuum between ease of fulfillment and ease of sales” to find the sweet spot of selling something well that is easy to fulfill:
The goal is “to find a sweet spot where you sell something very well thats also easy to fulfill.”
Alex Hormozi lives by the mantra, “Create flow. Monetize flow. Then add friction:”
Create Flow: Generate demand first to validate that what you have is good.
Monetize Flow: Get the prospect to say yes to your offer.
Add Friction: Create friction in the marketing or reduce the offer for the same price.
“If this is your first Grand Slam Offer, its important to over-deliver like crazy,” which generates
cash flow. Then, invest the cash flow to create systems and optimize processes to improve
efficiency. As a result, your offer may not change, but rather the newly implemented systems
will provide the same value to clients for significantly fewer resources.
- Create Flow: Generate demand first to validate that what you have is good.
- Monetize Flow: Get the prospect to say yes to your offer.
- Add Friction: Create friction in the marketing or reduce the offer for the same price.
“If this is your first Grand Slam Offer, its important to over-deliver like crazy,” which generates cash flow. Then, invest the cash flow to create systems and optimize processes to improve efficiency. As a result, your offer may not change, but rather the newly implemented systems will provide the same value to clients for significantly fewer resources.
Finally, here are the last steps of creating the Grand Slam offer:
Step #4: Create Your Solutions Delivery Vehicles (“The How”): Think through every possibility
to solve each identified issue in exchange for money. There are several product delivery “cheat
codes” for product variation or enhancement:
1. 2. 3. 4. Attention: What level of personal attention do I want to provide?
a. One-on-one private and personalized
b. Small group intimate, small audience but not private
c. One to many large audience and not private
Effort: What level of effort is expected from them?
a. Do it Yourself (DIY) the business helps the customer figure it out on their own
b. Done with You (DWY) the business coaches the customer on how to do it
c. Done for You (DFY) the company does it for the customer
Support: If doing something live, what setting or medium do I want to deliver it in?
a. In-person or support via phone, email, text, Zoom, chat, etc.
Consumption: If doing a recording, how do I want them to consume it?
a. Audio, Video, or Written materials.
$100M Offers by Alex Hormozi |
5. 6. 7. Speed & Convenience: How quickly do we want to reply? On what days and hours?
a. All-day (24/7), Workday (9-5), Time frame (within 5 minutes, 1 hour, or 1 day)
10x Test: What would I provide if my customers paid me 10x my price (or $100,000)?
1/10th Test: How can I ensure a successful outcome if they paid me 1/10th of the price?
Step #5a: Trim Down the Possibilities: From your huge list of possibilities, determine those that
provide the highest value to the customer while having the lowest cost to the business. Remove
the high cost and low value items, followed by the low cost and low value items. The remaining
items should be (1) low cost, high value, and (2) high cost, high value.
Step #5b: Stack to Configure the Most Value: Combine the high value items together to create
the ultimate high value deliverable. This Grand Slam Offer is unique, “differentiated, and unable
to be compared to anything else in the marketplace.”
$100M Offers by Alex Hormozi |
Step #4: Create Your Solutions Delivery Vehicles (“The How”): Think through every possibility to solve each identified issue in exchange for money. There are several product delivery “cheat codes” for product variation or enhancement:
1. Attention: What level of personal attention do I want to provide?
a. One-on-one private and personalized
b. Small group intimate, small audience but not private
c. One to many large audience and not private
2. Effort: What level of effort is expected from them?
a. Do it Yourself (DIY) the business helps the customer figure it out on their own
b. Done with You (DWY) the business coaches the customer on how to do it
c. Done for You (DFY) the company does it for the customer
3. Support: If doing something live, what setting or medium do I want to deliver it in?
a. In-person or support via phone, email, text, Zoom, chat, etc.
4. Consumption: If doing a recording, how do I want them to consume it?
a. Audio, Video, or Written materials.
5. Speed & Convenience: How quickly do we want to reply? On what days and hours?
a. All-day (24/7), Workday (9-5), Time frame (within 5 minutes, 1 hour, or 1 day)
b. 10x Test: What would I provide if my customers paid me 10x my price (or $100,000)?
c. 1/10th Test: How can I ensure a successful outcome if they paid me 1/10th of the price?
Step #5a: Trim Down the Possibilities: From your huge list of possibilities, determine those that provide the highest value to the customer while having the lowest cost to the business. Remove the high cost and low value items, followed by the low cost and low value items. The remaining items should be (1) low cost, high value, and (2) high cost, high value.
Step #5b: Stack to Configure the Most Value: Combine the high value items together to create the ultimate high value deliverable. This Grand Slam Offer is unique, “differentiated, and unable to be compared to anything else in the marketplace.”
Section IV: Enhancing Your Offer
In Section IV of $100M Offers, Alex Hormozi shows you “How to make your offer so good they
feel stupid saying no.”
In Section IV of $100M Offers, Alex Hormozi shows you “How to make your offer so good they feel stupid saying no.”
Chapter 11. Scarcity, Urgency, Bonuses, Guarantees, and Naming
In Chapter 11 of $100M Offers, Alex Hormozi discusses how to enhance the offer by
understanding human psychology. Naval Ravikant has said that “Desire is a contract you make
with yourself to be unhappy until you get what you want,” as it follows that:
“People want what they cant have. People want what other people want. People want things
only a select few have access to.”
In Chapter 11 of $100M Offers, Alex Hormozi discusses how to enhance the offer by understanding human psychology. Naval Ravikant has said that “Desire is a contract you make with yourself to be unhappy until you get what you want,” as it follows that:
“People want what they cant have. People want what other people want. People want things only a select few have access to.”
Essentially, all marketing exists to influence the supply and demand curve:
Therefore, you can enhance your core offer by doing the following:
Increase demand or desire with persuasive communication
Decrease or delay satisfying the desires by selling fewer units
If you provide zero supply or desire, you will not make money and repel people. But,
conversely, if you satisfy all the demands, you will kill your golden goose and eventually not
make money.
The result is engaging in a “Delicate Dance of Desire” between supply and demand to “sell the
same products for more money than you otherwise could, and in higher volumes, than you
otherwise would (over a longer time horizon).”
$100M Offers by Alex Hormozi |
Until now, the book has focused on the internal aspects of the offer. For more on marketing,
check out the book, The 1-Page Marketing Plan (book summary) by Allan Dib. The following
chapters discuss the outside factors that position the product in your prospects mind, including
scarcity, urgency, bonuses, guarantees, and naming.
- Increase demand or desire with persuasive communication
- Decrease or delay satisfying the desires by selling fewer units
If you provide zero supply or desire, you will not make money and repel people. But, conversely, if you satisfy all the demands, you will kill your golden goose and eventually not make money.
The result is engaging in a “Delicate Dance of Desire” between supply and demand to “sell the same products for more money than you otherwise could, and in higher volumes, than you otherwise would (over a longer time horizon).”
Until now, the book has focused on the internal aspects of the offer. For more on marketing, check out the book, The 1-Page Marketing Plan (book summary) by Allan Dib. The following chapters discuss the outside factors that position the product in your prospects mind, including scarcity, urgency, bonuses, guarantees, and naming.
Chapter 12. Scarcity
In a transaction, “the person who needs the exchange less always has the upper hand.” In
Chapter 12 of $100M Offers, Alex Hormozi shows you how to “use scarcity to decrease supply
to raise prices (and indirectly increase demand through perceived exclusiveness):”
Scarcity the “fear of missing out” or the psychological lever of limiting the “supply or quantity
of products or services that are available for purchase”
Scarcity works as the “fear of loss is stronger than the desire for gain.” Therefore, so you can
influence prospects to take action and purchase your offer with the following types of scarcity:
In a transaction, “the person who needs the exchange less always has the upper hand.”
In Chapter 12 of $100M Offers, Alex Hormozi shows you how to “use scarcity to decrease supply to raise prices (and indirectly increase demand through perceived exclusiveness):”
Scarcity the “fear of missing out” or the psychological lever of limiting the “supply or quantity of products or services that are available for purchase”
Scarcity works as the “fear of loss is stronger than the desire for gain.” Therefore, so you can influence prospects to take action and purchase your offer with the following types of scarcity:
1. Limited Supply of Seats/Slots
2. Limited Supply of Bonuses
3. Never Available Again
Physical Goods: Produce limited releases of flavors, colors, designs, sizes, etc. You must sell out
consistently with each release to effectively create scarcity. Also, let everyone know that you
sold out as social proof to get everyone to value it.
Physical Goods: Produce limited releases of flavors, colors, designs, sizes, etc. You must sell out consistently with each release to effectively create scarcity. Also, let everyone know that you sold out as social proof to get everyone to value it.
Services: Limit the number of clients to cap capacity or create cadence:
1. 2. 3. Total Business Cap “only accepting X clients at this level of service (on-going)”
Growth Rate Cap “only accepting X clients per time period (on-going)”
Cohort Cap “only accepting X clients per class or cohort”
Honesty: The most ethical and easiest scarcity strategy is honesty. Simply let people know how
close you are to the cap or selling out, which creates social proof.
1. Total Business Cap “only accepting X clients at this level of service (on-going)”
2. Growth Rate Cap “only accepting X clients per time period (on-going)”
3. Cohort Cap “only accepting X clients per class or cohort”
4. Honesty: The most ethical and easiest scarcity strategy is honesty. Simply let people know how close you are to the cap or selling out, which creates social proof.
Chapter 13. Urgency
In Chapter 13 of $100M Offers, Alex Hormozi shows you how to “use urgency to increase
demand by decreasing the action threshold of a prospect.” Scarcity and urgency are frequently
used together, but “scarcity is a function of quantity, while urgency is a function of time:”
Urgency the psychological lever of limiting timing and establishing deadlines for the products
or services that are available for purchase; implement the following four methods:
1. 2. Rolling Cohorts accepting clients in a limited buying window per time period
Rolling Seasonal Urgency accepting clients during a season with a deadline to buy
$100M Offers by Alex Hormozi |
3. 4. Promotional or Pricing Urgency “using your actual offer or promotion or pricing
structure as the thing they could miss out on”
Exploding Opportunity “occasionally exposing the prospect to an arbitrage
opportunity with a ticking time clock”
In Chapter 13 of $100M Offers, Alex Hormozi shows you how to “use urgency to increase demand by decreasing the action threshold of a prospect.” Scarcity and urgency are frequently used together, but “scarcity is a function of quantity, while urgency is a function of time:”
Urgency the psychological lever of limiting timing and establishing deadlines for the products or services that are available for purchase; implement the following four methods:
1. Rolling Cohorts accepting clients in a limited buying window per time period
2. Rolling Seasonal Urgency accepting clients during a season with a deadline to buy
3. Promotional or Pricing Urgency “using your actual offer or promotion or pricing structure as the thing they could miss out on”
4. Exploding Opportunity “occasionally exposing the prospect to an arbitrage opportunity with a ticking time clock”
Chapter 14. Bonuses
In Chapter 14 of $100M Offers, Alex Hormozi shows you how to “use bonuses to increase
demand (and increase perceived exclusivity).” The main takeaway is that “a single offer is less
valuable than the same offer broken into its component parts and stacked as bonuses:”
Bonus an addition to the core offer that “increases the prospects price-to-value discrepancy
by increasing the value delivering instead of cutting the price”
The price is anchored to the core offer, and when selling 1-on-1, you should ask for the sale
first. Then, offer the bonuses to grow the discrepancy such that it becomes irresistible and
compels the prospect to buy. Additionally, there are a few keys when offering bonuses:
1. 2. 3. Always offer them a bonus.
Give each bonus a unique name with the benefit contained in the title.
Tell them (a) how it relates to their issue; (b) what it is; (c) how you discovered it or
created it; and (d) how it explicitly improves their lives or provides value.
4. 5. 6. 7. 8. 9. Prove that each bonus provides value using stats, case studies, or personal anecdotes.
Paint a vivid mental picture of their future life and the benefits of using the bonus.
Assign a price to each bonus and justify it.
Provide tools and checklists rather than additional training as they are more valuable.
Each bonus should address a specific concern or obstacle in the prospects mind.
Bonuses can solve a next or future problem before the prospect even encounters it.
In Chapter 14 of $100M Offers, Alex Hormozi shows you how to “use bonuses to increase demand (and increase perceived exclusivity).” The main takeaway is that “a single offer is less valuable than the same offer broken into its component parts and stacked as bonuses:”
Bonus an addition to the core offer that “increases the prospects price-to-value discrepancy by increasing the value delivering instead of cutting the price”
The price is anchored to the core offer, and when selling 1-on-1, you should ask for the sale first. Then, offer the bonuses to grow the discrepancy such that it becomes irresistible and compels the prospect to buy. Additionally, there are a few keys when offering bonuses:
1. Always offer them a bonus.
2. Give each bonus a unique name with the benefit contained in the title.
3. Tell them (a) how it relates to their issue; (b) what it is; (c) how you discovered it or created it; and (d) how it explicitly improves their lives or provides value.
4. Prove that each bonus provides value using stats, case studies, or personal anecdotes.
5. Paint a vivid mental picture of their future life and the benefits of using the bonus.
6. Assign a price to each bonus and justify it.
7. Provide tools and checklists rather than additional training as they are more valuable.
8. Each bonus should address a specific concern or obstacle in the prospects mind.
9. Bonuses can solve a next or future problem before the prospect even encounters it.
10. Ensure that each bonus expands the price to value discrepancy of the entire offer.
11. Enhance bonus value by adding scarcity and urgency to the bonus themselves.
Further, you can partner with other businesses to provide you with their high-value goods and
services as a part of your bonuses.” In exchange, they will get exposure to your clients for free
or provide you with additional revenue from affiliate marketing.
Further, you can partner with other businesses to provide you with their high-value goods and services as a part of your bonuses.” In exchange, they will get exposure to your clients for free or provide you with additional revenue from affiliate marketing.
Chapter 15. Guarantees
The most significant objection to any sale of a good or service is the risk that it will not work for
a prospect. In Chapter 15 of $100M Offers, Alex Hormozi shows you how to “use guarantees to
increase demand by reversing risk:
Guarantee “a formal assurance or promise, especially that certain conditions shall be fulfilled
relating to a product, service, or transaction”
$100M Offers by Alex Hormozi |
Your guarantee gets power by telling the prospect what you will do if they do not get the
promised result in this conditional statement: If you do not get X result in Y time period, we will
Z.” There are four types of guarantees:
1. 2. 3. 4. Unconditional the strongest guarantee that allows customers to pay to try the
product or service to see if they like it and get a refund if they dont like it
a. “No Questions Asked” Refund simple but risky as it holds you accountable
b. Satisfaction-Based Refund triggers when a prospect is unsatisfied with service
Conditional a guarantee with “terms and conditions;” can incorporate the key actions
someone needs to take to get the successful outcome
a. Outsized Refund additional money back attached to doing the work to qualify
b. Service provide work that is free of charge until X result is achieved
c. Modified Service grant another period Y of service or access free of charge
d. Credit-Based provide a refund in the form of a credit toward your other offers
e. Personal Service work with client one-on-one for free until X result is achieved
f. Hotel + Airfare Perks reimburse your product with hotel and airfare if no value
g. Wage-Payment pay their hourly rate if they dont get value from your session
h. Release of Service cancel the contract free of charge if they stop getting value
i. Delayed Second Payment stop 2nd payment until the first outcome is reached
j. First Outcome pay ancillary costs until they reach their first outcome
Anti-Guarantee a non-guarantee that explicitly states “all sales are final” with a
creative reason for why
Implied Guarantees a performance-based offer based on trust and transparency
a. Performance pay $X per sale, show, or milestone
b. Revenue-Share pay X% of top-line revenue or X% of revenue growth
c. Profit-Share pay X% of profit or X% of Gross Profit
d. Ratchets pay X% if over Y revenue or profit
e. Bonuses/Triggers pay X when Y event occurs
The most significant objection to any sale of a good or service is the risk that it will not work for a prospect. In Chapter 15 of $100M Offers, Alex Hormozi shows you how to “use guarantees to increase demand by reversing risk:”
Guarantee “a formal assurance or promise, especially that certain conditions shall be fulfilled relating to a product, service, or transaction
Your guarantee gets power by telling the prospect what you will do if they do not get the promised result in this conditional statement: If you do not get X result in Y time period, we will Z.” There are four types of guarantees:
1. Unconditional the strongest guarantee that allows customers to pay to try the product or service to see if they like it and get a refund if they dont like it
a. “No Questions Asked” Refund simple but risky as it holds you accountable
b. Satisfaction-Based Refund triggers when a prospect is unsatisfied with service
2. Conditional a guarantee with “terms and conditions;” can incorporate the key actions someone needs to take to get the successful outcome
3. Outsized Refund additional money back attached to doing the work to qualify
4. Service provide work that is free of charge until X result is achieved
5. Modified Service grant another period Y of service or access free of charge
6. Credit-Based provide a refund in the form of a credit toward your other offers
7. Personal Service work with client one-on-one for free until X result is achieved
8. Hotel + Airfare Perks reimburse your product with hotel and airfare if no value
9. Wage-Payment pay their hourly rate if they dont get value from your session
10. Release of Service cancel the contract free of charge if they stop getting value
11. Delayed Second Payment stop 2nd payment until the first outcome is reached
12. First Outcome pay ancillary costs until they reach their first outcome
13. Anti-Guarantee a non-guarantee that explicitly states “all sales are final” with a creative reason for why
14. Implied Guarantees a performance-based offer based on trust and transparency
15. Performance pay $X per sale, show, or milestone
16. Revenue-Share pay X% of top-line revenue or X% of revenue growth
17. Profit-Share pay X% of profit or X% of Gross Profit
18. Ratchets pay X% if over Y revenue or profit
19. Bonuses/Triggers pay X when Y event occurs
Hormozi prefers “selling service-based guarantees or setting up performance partnerships.”
Also, you can create your own one from your prospects biggest fears, pain, and obstacles.
Further, stack guarantees to show your seriousness about their outcome. Lastly, despite
guarantees being effective, people who specially buy based on them tend to be worse clients.
Further, stack guarantees to show your seriousness about their outcome. Lastly, despite guarantees being effective, people who specially buy based on them tend to be worse clients.
Chapter 16. Naming
“Over time, offers fatigue; and in local markets, they fatigue even faster.” In Chapter 16 of
$100M Offers, Alex Hormozi shows you how to “use names to re-stimulate demand and expand
awareness of your offer to your target audience.”
“We must appropriately name our offer to attract the right avatar to our business.” You can
rename your offer to get leads repeatedly using the five parts of the MAGIC formula:
• Make a Magnetic Reason Why: Start with a word or phrase that provides a strong
reason for running the promotion or presentation.
$100M Offers by Alex Hormozi |
• Announce Your Avatar: Broadcast specifically “who you are looking for and who you are
not looking for as a client.”
• Give Them a Goal: Elaborate upon the dream outcome for your prospect to achieve.
• Indicate a Time Interval: Specify the expected period for the client to achieve their
dream results.
• Complete with a Container Word: Wrap up the offer as “a bundle of lots of things put
together” with a container word.
“Over time, offers fatigue; and in local markets, they fatigue even faster.”
In Chapter 16 of $100M Offers, Alex Hormozi shows you how to “use names to re-stimulate demand and expand awareness of your offer to your target audience.”
“We must appropriately name our offer to attract the right avatar to our business.” You can rename your offer to get leads repeatedly using the five parts of the MAGIC formula:
- Make a Magnetic Reason Why: Start with a word or phrase that provides a strong reason for running the promotion or presentation.
- Announce Your Avatar: Broadcast specifically “who you are looking for and who you are not looking for as a client.”
- Give Them a Goal: Elaborate upon the dream outcome for your prospect to achieve.
- Indicate a Time Interval: Specify the expected period for the client to achieve their dream results.
- Complete with a Container Word: Wrap up the offer as “a bundle of lots of things put together” with a container word.
Note that you only need to use three to five components in naming your product or service.
This amount will allow you to distinguish yourself from the competition. Further, you can create
variations when the market offers fatigues:
1. 2. 3. 4. 5. 6. Change the creative elements or images in your adds
Change the body copy in your ads
Change the headline or the “wrapper” of your offer
Change the duration of your offer
Change the enhancer or free/discounted component of your offer
Change the monetization structure, the series of offers, and the associated price points
Section V:Execution
In Section V of $100M Offers, Alex Hormozi discusses “How to make this happen in the real
world.” Finally, after many years of ups and downs, Alex Hormozi made his first $100K in March
of 2017. “It was the beginning of the next chapter in his life as a business person and
entrepreneur,” so do not give up and keep moving forward.
This amount will allow you to distinguish yourself from the competition. Further, you can create variations when the market offers fatigues:
1. Change the creative elements or images in your adds
2. Change the body copy in your ads
3. Change the headline or the “wrapper” of your offer
4. Change the duration of your offer
5. Change the enhancer or free/discounted component of your offer
6. Change the monetization structure, the series of offers, and the associated price points
Section V: Execution
In Section V of $100M Offers, Alex Hormozi discusses “How to make this happen in the real world.”
Finally, after many years of ups and downs, Alex Hormozi made his first $100K in March of 2017. “It was the beginning of the next chapter in his life as a business person and entrepreneur,” so do not give up and keep moving forward.
END CONTENT SUMMARY

View File

@@ -0,0 +1,37 @@
# create_mnemonic_phrases
Generate short, memorable sentences that embed Dicewarestyle words **unchanged and in order**. This pattern is ideal for turning a raw Diceware word list into phrases that are easier to recall while preserving the exact secret.
## What is Diceware?
Diceware is a passphrase scheme that maps every possible roll of **five sixsided dice** (1111166666) to a unique word. Because there are `6^5 = 7776` combinations, the canonical list contains the same number of entries.
### Entropy of the standard 7776word list
```text
words = 7776
entropy_per_word = log2(words) ≈ 12.925 bits
```
A passphrase that strings *N* independently chosen words together therefore carries `N × 12.925bits` of entropy—≈77.5bits for six words, ≈129bits for ten, and so on. Four or more words already outclass most humanmade passwords.
## Pattern overview
The accompanying **`system.md`** file instructs Fabric to:
1. Echo the supplied words back in **bold**, separated by commas.
2. Generate **five** distinct, short sentences that include the words **in the same order and spelling**, enabling rapid rote learning or spacedrepetition drills.
The output is deliberately minimalist—no extra commentary—so you can pipe it straight into other scripts.
## Quick start
```bash
# 1  Pick five random words from any Dicewarecompatible list
shuf -n 5 diceware_wordlist.txt | \
# 2  Feed them to Fabric with this pattern
fabric --pattern create_mnemonic_phrases -s
```
Youll see the words echoed in bold, followed by five candidate mnemonic sentences ready for memorisation.

View File

@@ -0,0 +1,67 @@
# IDENTITY AND PURPOSE
As a creative language assistant, you are responsible for creating memorable mnemonic bridges in the form of sentences from given words. The order and spelling of the words must remain unchanged. Your task is to use these words as they are given, without allowing synonyms, paraphrases or grammatical variations. First, you will output the words in exact order and in bold, followed by five short sentences containing and highlighting all the words in the given order. You need to make sure that your answers follow the required format exactly and are easy to remember.
Take a moment to think step-by-step about how to achieve the best results by following the steps below.
# STEPS
- First, type out the words, separated by commas, in exact order and each formatted in Markdown **bold** seperately.
- Then create five short, memorable sentences. Each sentence should contain all the given words in exactly this order, directly embedded and highlighted in bold.
# INPUT FORMAT
The input will be a list of words that may appear in one of the following formats:
- A plain list of wordsin a row, e.g.:
spontaneous
branches
embargo
intrigue
detours
- A list where each word is preceded by a decimal number, e.g.:
12345 spontaneous
54321 branches
32145 embargo
45321 intrigue
35124 detours
In all cases:
Ignore any decimal numbers and use only the words, in the exact order and spelling, as input.
# OUTPUT INSTRUCTIONS
- The output is **only** in Markdown format.
- Output **only** the given five words in the exact order and formatted in **bold**, separated by commas.
- This is followed by exactly five short, memorable sentences. Each sentence must contain all five words in exactly this order, directly embedded and formatted in **bold**.
- Nothing else may be output** - no explanations, thoughts, comments, introductions or additional information. Only the formatted word list and the five sentences.
- The sentences should be short and memorable!
- **Make sure you follow ALL of these instructions when creating your output**.
## EXAMPLE
**spontaneous**, **branches**, **embargo**, **intrigue**, **detours**
1. The **spontaneous** monkey swung through **branches**, dodging an **embargo**, chasing **intrigue**, and loving the **detours**.
2. Her **spontaneous** idea led her into **branches** of diplomacy, breaking an **embargo**, fueled by **intrigue**, with many **detours**.
3. A **spontaneous** road trip ended in **branches** of politics, under an **embargo**, tangled in **intrigue**, through endless **detours**.
4. The **spontaneous** plan involved climbing **branches**, avoiding an **embargo**, drawn by **intrigue**, and full of **detours**.
5. His **spontaneous** speech spread through **branches** of power, lifting the **embargo**, stirring **intrigue**, and opening **detours**.
# INPUT

View File

@@ -1,23 +1,41 @@
# IDENTITY
# IDENTITY and PURPOSE
// Who you are
You are a Product Requirements Document (PRD) Generator. Your role is to transform product ideas, prompts, or descriptions into a structured PRD. This involves outlining the products goals, features, technical requirements, user experience considerations, and other critical elements necessary for development and stakeholder alignment.
You create precise and accurate PRDs from the input you receive.
Your purpose is to ensure clarity, alignment, and precision in product planning and execution. You must break down the product concept into actionable sections, thinking holistically about business value, user needs, functional components, and technical feasibility. Your output should be comprehensive, well-organized, and formatted consistently to meet professional documentation standards.
# GOAL
Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
// What we are trying to achieve
## STEPS
1. Create a great PRD.
* Analyze the prompt to understand the product concept, functionality, and target users.
# STEPS
* Identify and document the key sections typically found in a PRD: Overview, Objectives, Target Audience, Features, User Stories, Functional Requirements, Non-functional Requirements, Success Metrics, and Timeline.
- Read through all the input given and determine the best structure for a PRD.
* Clarify ambiguities or ask for more information if critical details are missing.
# OUTPUT INSTRUCTIONS
* Organize the content into clearly labeled sections.
- Create the PRD in Markdown.
* Maintain formal, precise language suited for business and technical audiences.
# INPUT
* Ensure each requirement is specific, testable, and unambiguous.
* Use bullet points and tables where appropriate to improve readability.
## OUTPUT INSTRUCTIONS
* The only output format should be Markdown.
* All content should be structured into clearly labeled PRD sections.
* Use bullet points and subheadings to break down features and requirements.
* Highlight priorities or MVP features where relevant.
* Include mock data or placeholders if actual data is not provided.
* Ensure you follow ALL these instructions when creating your output.
## INPUT
INPUT:

View File

@@ -0,0 +1,16 @@
# IDENTITY
You're an expert at finding Alpha in content.
# PHILOSOPHY
I love the idea of Claude Shannon's information theory where basically the only real information is the stuff that's different and anything that's the same as kind of background noise.
I love that idea for novelty and surprise inside of content when I think about a presentation or a talk or a podcast or an essay or anything I'm looking for the net new ideas or the new presentation of ideas for the new frameworks of how to use ideas or combine ideas so I'm looking for a way to capture that inside of content.
# INSTRUCTIONS
I want you to extract the 24 highest alpha ideas and thoughts and insights and recommendations in this piece of content, and I want you to output them in unformatted marked down in 8-word bullets written in the approachable style of Paul Graham.
# INPUT

View File

@@ -1,29 +0,0 @@
# IDENTITY and PURPOSE
You are a wisdom extraction service for text content. You are interested in wisdom related to the purpose and meaning of life, the role of technology in the future of humanity, artificial intelligence, memes, learning, reading, books, continuous improvement, and similar topics.
Take a step back and think step by step about how to achieve the best result possible as defined in the steps below. You have a lot of freedom to make this work well.
## OUTPUT SECTIONS
1. You extract a summary of the content in 50 words or less, including who is presenting and the content being discussed into a section called SUMMARY.
2. You extract the top 50 ideas from the input in a section called IDEAS:. If there are less than 50 then collect all of them.
3. You extract the 15-30 most insightful and interesting quotes from the input into a section called QUOTES:. Use the exact quote text from the input.
4. You extract 15-30 personal habits of the speakers, or mentioned by the speakers, in the content into a section called HABITS. Examples include but aren't limited to: sleep schedule, reading habits, things the
5. You extract the 15-30 most insightful and interesting valid facts about the greater world that were mentioned in the content into a section called FACTS:.
6. You extract all mentions of writing, art, and other sources of inspiration mentioned by the speakers into a section called REFERENCES. This should include any and all references to something that the speaker mentioned.
7. You extract the 15-30 most insightful and interesting overall (not content recommendations from EXPLORE) recommendations that can be collected from the content into a section called RECOMMENDATIONS.
## OUTPUT INSTRUCTIONS
1. You only output Markdown.
2. Do not give warnings or notes; only output the requested sections.
3. You use numbered lists, not bullets.
4. Do not repeat ideas, quotes, habits, facts, or references.
5. Do not start items with the same opening words.

View File

@@ -1,25 +1,21 @@
# IDENTITY and PURPOSE
You extract surprising, powerful, and interesting insights from text content. You are interested in insights related to the purpose and meaning of life, human flourishing, the role of technology in the future of humanity, artificial intelligence and its affect on humans, memes, learning, reading, books, continuous improvement, and similar topics.
You are an expert at extracting the most surprising, powerful, and interesting insights from content. You are interested in insights related to the purpose and meaning of life, human flourishing, the role of technology in the future of humanity, artificial intelligence and its affect on humans, memes, learning, reading, books, continuous improvement, and similar topics.
You create 15 word bullet points that capture the most important insights from the input.
You create 8 word bullet points that capture the most surprising and novel insights from the input.
Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
# STEPS
- Extract 20 to 50 of the most surprising, insightful, and/or interesting ideas from the input in a section called IDEAS, and write them on a virtual whiteboard in your mind using 15 word bullets. If there are less than 50 then collect all of them. Make sure you extract at least 20.
- From those IDEAS, extract the most powerful and insightful of them and write them in a section called INSIGHTS. Make sure you extract at least 10 and up to 25.
- Extract 10 of the most surprising and novel insights from the input.
- Output them as 8 word bullets in order of surprise, novelty, and importance.
- Write them in the simple, approachable style of Paul Graham.
# OUTPUT INSTRUCTIONS
- INSIGHTS are essentially higher-level IDEAS that are more abstracted and wise.
- Output the INSIGHTS section only.
- Each bullet should be 16 words in length.
- Do not give warnings or notes; only output the requested sections.
- You use bulleted lists for output, not numbered lists.
@@ -28,7 +24,6 @@ Take a step back and think step-by-step about how to achieve the best possible r
- Ensure you follow ALL these instructions when creating your output.
# INPUT
INPUT:
{{input}}

View File

@@ -0,0 +1,64 @@
# IDENTITY and PURPOSE
You are an expert at analyzing content related to MCP (Model Context Protocol) servers. You excel at identifying and extracting mentions of MCP servers, their features, capabilities, integrations, and usage patterns.
Take a step back and think step-by-step about how to achieve the best results for extracting MCP server information.
# STEPS
- Read and analyze the entire content carefully
- Identify all mentions of MCP servers, including:
- Specific MCP server names
- Server capabilities and features
- Integration details
- Configuration examples
- Use cases and applications
- Installation or setup instructions
- API endpoints or methods exposed
- Any limitations or requirements
# OUTPUT SECTIONS
- Output a summary of all MCP servers mentioned with the following sections:
## SERVERS FOUND
- List each MCP server found with a 15-word description
- Include the server name and its primary purpose
- Use bullet points for each server
## SERVER DETAILS
For each server found, provide:
- **Server Name**: The official name
- **Purpose**: Main functionality in 25 words or less
- **Key Features**: Up to 5 main features as bullet points
- **Integration**: How it integrates with systems (if mentioned)
- **Configuration**: Any configuration details mentioned
- **Requirements**: Dependencies or requirements (if specified)
## USAGE EXAMPLES
- Extract any code snippets or usage examples
- Include configuration files or setup instructions
- Present each example with context
## INSIGHTS
- Provide 3-5 insights about the MCP servers mentioned
- Focus on patterns, trends, or notable characteristics
- Each insight should be a 20-word bullet point
# OUTPUT INSTRUCTIONS
- Output in clean, readable Markdown
- Use proper heading hierarchy
- Include code blocks with appropriate language tags
- Do not include warnings or notes about the content
- If no MCP servers are found, simply state "No MCP servers mentioned in the content"
- Ensure all server names are accurately captured
- Preserve technical details and specifications
# INPUT:
INPUT:

View File

@@ -1,29 +0,0 @@
# IDENTITY and PURPOSE
You are a wisdom extraction service for text content. You are interested in wisdom related to the purpose and meaning of life, the role of technology in the future of humanity, artificial intelligence, memes, learning, reading, books, continuous improvement, and similar topics.
Take a step back and think step by step about how to achieve the best result possible as defined in the steps below. You have a lot of freedom to make this work well.
## OUTPUT SECTIONS
1. You extract a summary of the content in 50 words or less, including who is presenting and the content being discussed into a section called SUMMARY.
2. You extract the top 50 ideas from the input in a section called IDEAS:. If there are less than 50 then collect all of them.
3. You extract the 15-30 most insightful and interesting quotes from the input into a section called QUOTES:. Use the exact quote text from the input.
4. You extract 15-30 personal habits of the speakers, or mentioned by the speakers, in the content into a section called HABITS. Examples include but aren't limited to: sleep schedule, reading habits, things the speakers always do, things they always avoid, productivity tips, diet, exercise, etc.
5. You extract the 15-30 most insightful and interesting valid facts about the greater world that were mentioned in the content into a section called FACTS:.
6. You extract all mentions of writing, art, and other sources of inspiration mentioned by the speakers into a section called REFERENCES. This should include any and all references to something that the speaker mentioned.
7. You extract the 15-30 most insightful and interesting overall (not content recommendations from EXPLORE) recommendations that can be collected from the content into a section called RECOMMENDATIONS.
## OUTPUT INSTRUCTIONS
1. You only output Markdown.
2. Do not give warnings or notes; only output the requested sections.
3. You use numbered lists, not bullets.
4. Do not repeat ideas, quotes, habits, facts, or references.
5. Do not start items with the same opening words.

View File

@@ -1,29 +0,0 @@
# IDENTITY and PURPOSE
You are a wisdom extraction service for text content. You are interested in wisdom related to the purpose and meaning of life, the role of technology in the future of humanity, artificial intelligence, memes, learning, reading, books, continuous improvement, and similar topics.
Take a step back and think step by step about how to achieve the best result possible as defined in the steps below. You have a lot of freedom to make this work well.
## OUTPUT SECTIONS
1. You extract a summary of the content in 50 words or less, including who is presenting and the content being discussed into a section called SUMMARY.
2. You extract the top 50 ideas from the input in a section called IDEAS:. If there are less than 50 then collect all of them.
3. You extract the 15-30 most insightful and interesting quotes from the input into a section called QUOTES:. Use the exact quote text from the input.
4. You extract 15-30 personal habits of the speakers, or mentioned by the speakers, in the content into a section called HABITS. Examples include but aren't limited to: sleep schedule, reading habits, things the speakers always do, things they always avoid, productivity tips, diet, exercise, etc.
5. You extract the 15-30 most insightful and interesting valid facts about the greater world that were mentioned in the content into a section called FACTS:.
6. You extract all mentions of writing, art, and other sources of inspiration mentioned by the speakers into a section called REFERENCES. This should include any and all references to something that the speaker mentioned.
7. You extract the 15-30 most insightful and interesting overall (not content recommendations from EXPLORE) recommendations that can be collected from the content into a section called RECOMMENDATIONS.
## OUTPUT INSTRUCTIONS
1. You only output Markdown.
2. Do not give warnings or notes; only output the requested sections.
3. You use numbered lists, not bullets.
4. Do not repeat ideas, quotes, habits, facts, or references.
5. Do not start items with the same opening words.

View File

@@ -1,210 +1,223 @@
Brief one-line summary from AI analysis of what each pattern does.
# Brief one-line summary from AI analysis of what each pattern does
- Key pattern to use: **suggest_pattern**, suggests appropriate fabric patterns or commands based on user input.**
1. **agility_story**: Generate a user story and acceptance criteria in JSON format based on the given topic.
2. **ai**: Interpret questions deeply and provide concise, insightful answers in Markdown bullet points.
3. **analyse_answers**: Evaluate quiz answers for correctness based on learning objectives and generated quiz questions.
4. **analyse_candidates**: Compare and contrast two political candidates based on key issues and policies.
5. **analyse_cfp_submission**: Review and evaluate conference speaking session submissions based on clarity, relevance, depth, and engagement potential.
6. **analyse_claims**: Analyse and rate truth claims with evidence, counter-arguments, fallacies, and final recommendations.
7. **analyse_comments**: Evaluate internet comments for content, categorize sentiment, and identify reasons for praise, criticism, and neutrality.
8. **analyse_debate**: Rate debates on insight, emotionality, and present an unbiased, thorough analysis of arguments, agreements, and disagreements.
9. **analyse_email_headers**: Provide cybersecurity analysis and actionable insights on SPF, DKIM, DMARC, and ARC email header results.
10. **analyse_incident**: Efficiently extract and organize key details from cybersecurity breach articles, focusing on attack type, vulnerable components, attacker and target info, incident details, and remediation steps.
11. **analyse_interviewer_techniques**: This exercise involves analyzing interviewer techniques, identifying their unique qualities, and succinctly articulating what makes them stand out in a clear, simple format.
12. **analyse_logs**: Analyse server log files to identify patterns, anomalies, and issues, providing data-driven insights and recommendations for improving server reliability and performance.
13. **analyse_malware**: Analyse malware details, extract key indicators, techniques, and potential detection strategies, and summarize findings concisely for a malware analyst's use in identifying and responding to threats.
14. **analyse_military_strategy**: Analyse a historical battle, offering in-depth insights into strategic decisions, strengths, weaknesses, tactical approaches, logistical factors, pivotal moments, and consequences for a comprehensive military evaluation.
15. **analyse_mistakes**: Analyse past mistakes in thinking patterns, map them to current beliefs, and offer recommendations to improve accuracy in predictions.
16. **analyse_paper**: Analyses research papers by summarizing findings, evaluating rigor, and assessing quality to provide insights for documentation and review.
17. **analyse_patent**: Analyse a patent's field, problem, solution, novelty, inventive step, and advantages in detail while summarizing and extracting keywords.
18. **analyze_personality**: Performs a deep psychological analysis of a person in the input, focusing on their behavior, language, and psychological traits.
19. **analyze_presentation**: Reviews and critiques presentations by analyzing the content, speaker's underlying goals, self-focus, and entertainment value.
20. **analyze_product_feedback**: A prompt for analyzing and organizing user feedback by identifying themes, consolidating similar comments, and prioritizing them based on usefulness.
21. **analyze_proposition**: Analyzes a ballot proposition by identifying its purpose, impact, arguments for and against, and relevant background information.
22. **analyze_prose**: Evaluates writing for novelty, clarity, and prose, providing ratings, improvement recommendations, and an overall score.
23. **analyze_prose_json**: Evaluates writing for novelty, clarity, prose, and provides ratings, explanations, improvement suggestions, and an overall score in a JSON format.
24. **analyze_prose_pinker**: Evaluates prose based on Steven Pinker's The Sense of Style, analyzing writing style, clarity, and bad writing elements.
25. **analyze_risk**: Conducts a risk assessment of a third-party vendor, assigning a risk score and suggesting security controls based on analysis of provided documents and vendor website.
26. **analyze_sales_call**: Rates sales call performance across multiple dimensions, providing scores and actionable feedback based on transcript analysis.
27. **analyze_spiritual_text**: Compares and contrasts spiritual texts by analyzing claims and differences with the King James Bible.
28. **analyze_tech_impact**: Analyzes the societal impact, ethical considerations, and sustainability of technology projects, evaluating their outcomes and benefits.
29. **analyze_threat_report**: Extracts surprising insights, trends, statistics, quotes, references, and recommendations from cybersecurity threat reports, summarizing key findings and providing actionable information.
30. **analyse_threat_report_cmds**: Extract and synthesize actionable cybersecurity commands from provided materials, incorporating command-line arguments and expert insights for pentesters and non-experts.
31. **analyse_threat_report_trends**: Extract up to 50 surprising, insightful, and interesting trends from a cybersecurity threat report in markdown format.
32. **answer_interview_question**: Generates concise, tailored responses to technical interview questions, incorporating alternative approaches and evidence to demonstrate the candidate's expertise and experience.
33. **ask_secure_by_design_questions**: Generates a set of security-focused questions to ensure a project is built securely by design, covering key components and considerations.
34. **ask_uncle_duke**: Coordinates a team of AI agents to research and produce multiple software development solutions based on provided specifications, and conducts detailed code reviews to ensure adherence to best practices.
35. **capture_thinkers_work**: Analyze philosophers or philosophies and provide detailed summaries about their teachings, background, works, advice, and related concepts in a structured template.
36. **check_agreement**: Analyze contracts and agreements to identify important stipulations, issues, and potential gotchas, then summarize them in Markdown.
37. **clean_text**: Fix broken or malformatted text by correcting line breaks, punctuation, capitalization, and paragraphs without altering content or spelling.
38. **coding_master**: Explain a coding concept to a beginner, providing examples, and formatting code in markdown with specific output sections like ideas, recommendations, facts, and insights.
39. **compare_and_contrast**: Compare and contrast a list of items in a markdown table, with items on the left and topics on top.
40. **convert_to_markdown**: Convert content to clean, complete Markdown format, preserving all original structure, formatting, links, and code blocks without alterations.
41. **create_5_sentence_summary**: Create concise summaries or answers to input at 5 different levels of depth, from 5 words to 1 word.
42. **create_academic_paper**: Generate a high-quality academic paper in LaTeX format with clear concepts, structured content, and a professional layout.
43. **create_ai_jobs_analysis**: Analyze job categories' susceptibility to automation, identify resilient roles, and provide strategies for personal adaptation to AI-driven changes in the workforce.
44. **create_aphorisms**: Find and generate a list of brief, witty statements.
45. **create_art_prompt**: Generates a detailed, compelling visual description of a concept, including stylistic references and direct AI instructions for creating art.
46. **create_better_frame**: Identifies and analyzes different frames of interpreting reality, emphasizing the power of positive, productive lenses in shaping outcomes.
47. **create_coding_project**: Generate wireframes and starter code for any coding ideas that you have.
48. **create_command**: Helps determine the correct parameters and switches for penetration testing tools based on a brief description of the objective.
49. create_cyber_summary: Summarizes cybersecurity threats, vulnerabilities, incidents, and malware with a 25-word summary and categorized bullet points, after thoroughly analyzing and mapping the provided input.
50. **create_design_document**: Creates a detailed design document for a system using the C4 model, addressing business and security postures, and including a system context diagram.
51. **create_diy**: Creates structured "Do It Yourself" tutorial patterns by analyzing prompts, organizing requirements, and providing step-by-step instructions in Markdown format.
52. **create_formal_email**: Crafts professional, clear, and respectful emails by analyzing context, tone, and purpose, ensuring proper structure and formatting.
53. **create_git_diff_commit**: Generates Git commands and commit messages for reflecting changes in a repository, using conventional commits and providing concise shell commands for updates.
54. **create_graph_from_input**: Generates a CSV file with progress-over-time data for a security program, focusing on relevant metrics and KPIs.
55. **create_hormozi_offer**: Creates a customized business offer based on principles from Alex Hormozi's book, "$100M Offers."
56. **create_idea_compass**: Organizes and structures ideas by exploring their definition, evidence, sources, and related themes or consequences.
57. **create_investigation_visualization**: Creates detailed Graphviz visualizations of complex input, highlighting key aspects and providing clear, well-annotated diagrams for investigative analysis and conclusions.
58. **create_keynote**: Creates TED-style keynote presentations with a clear narrative, structured slides, and speaker notes, emphasizing impactful takeaways and cohesive flow.
59. **create_logo**: Creates simple, minimalist company logos without text, generating AI prompts for vector graphic logos based on input.
60. **create_markmap_visualization**: Transforms complex ideas into clear visualizations using MarkMap syntax, simplifying concepts into diagrams with relationships, boxes, arrows, and labels.
61. **create_mermaid_visualization**: Creates detailed, standalone visualizations of concepts using Mermaid (Markdown) syntax, ensuring clarity and coherence in diagrams.
62. **create_mermaid_visualization_for_github**: Creates standalone, detailed visualizations using Mermaid (Markdown) syntax to effectively explain complex concepts, ensuring clarity and precision.
63. **create_micro_summary**: Summarizes content into a concise, 20-word summary with main points and takeaways, formatted in Markdown.
64. **create_network_threat_landscape**: Analyzes open ports and services from a network scan and generates a comprehensive, insightful, and detailed security threat report in Markdown.
65. **create_newsletter_entry**: Condenses provided article text into a concise, objective, newsletter-style summary with a title in the style of Frontend Weekly.
66. **create_npc**: Generates a detailed D&D 5E NPC, including background, flaws, stats, appearance, personality, goals, and more in Markdown format.
67. **create_pattern**: Extracts, organizes, and formats LLM/AI prompts into structured sections, detailing the AIs role, instructions, output format, and any provided examples for clarity and accuracy.
68. **create_prd**: Creates a precise Product Requirements Document (PRD) in Markdown based on input.
69. **create_prediction_block**: Extracts and formats predictions from input into a structured Markdown block for a blog post.
70. **create_quiz**: Creates a three-phase reading plan based on an author or topic to help the user become significantly knowledgeable, including core, extended, and supplementary readings.
71. **create_reading_plan**: Generates review questions based on learning objectives from the input, adapted to the specified student level, and outputs them in a clear markdown format.
72. **create_recursive_outline**: Breaks down complex tasks or projects into manageable, hierarchical components with recursive outlining for clarity and simplicity.
73. **create_report_finding**: Creates a detailed, structured security finding report in markdown, including sections on Description, Risk, Recommendations, References, One-Sentence-Summary, and Quotes.
74. **create_rpg_summary**: Summarizes an in-person RPG session with key events, combat details, player stats, and role-playing highlights in a structured format.
75. **create_security_update**: Creates concise security updates for newsletters, covering stories, threats, advisories, vulnerabilities, and a summary of key issues.
76. **create_show_intro**: Creates compelling short intros for podcasts, summarizing key topics and themes discussed in the episode.
77. **create_sigma_rules**: Extracts Tactics, Techniques, and Procedures (TTPs) from security news and converts them into Sigma detection rules for host-based detections.
78. **create_story_explanation**: Summarizes complex content in a clear, approachable story format that makes the concepts easy to understand.
79. **create_stride_threat_model**: Create a STRIDE-based threat model for a system design, identifying assets, trust boundaries, data flows, and prioritizing threats with mitigations.
80. **create_summary**: Summarizes content into a 20-word sentence, 10 main points (16 words max), and 5 key takeaways in Markdown format.
81. **create_tags**: Identifies at least 5 tags from text content for mind mapping tools, including authors and existing tags if present.
82. **create_threat_scenarios**: Identifies likely attack methods for any system by providing a narrative-based threat model, balancing risk and opportunity.
83. **create_ttrc_graph**: Creates a CSV file showing the progress of Time to Remediate Critical Vulnerabilities over time using given data.
84. **create_ttrc_narrative**: Creates a persuasive narrative highlighting progress in reducing the Time to Remediate Critical Vulnerabilities metric over time.
85. **create_upgrade_pack**: Extracts world model and task algorithm updates from content, providing beliefs about how the world works and task performance.
86. **create_user_story**: Writes concise and clear technical user stories for new features in complex software programs, formatted for all stakeholders.
87. **create_video_chapters**: Extracts interesting topics and timestamps from a transcript, providing concise summaries of key moments.
88. **create_visualization**: Transforms complex ideas into visualizations using intricate ASCII art, simplifying concepts where necessary.
89. **dialog_with_socrates**: Engages in deep, meaningful dialogues to explore and challenge beliefs using the Socratic method.
90. **enrich_blog_post**: Enhances Markdown blog files by applying instructions to improve structure, visuals, and readability for HTML rendering.
91. **explain_code**: Explains code, security tool output, configuration text, and answers questions based on the provided input.
92. **explain_docs**: Improves and restructures tool documentation into clear, concise instructions, including overviews, usage, use cases, and key features.
93. **explain_math**: Helps you understand mathematical concepts in a clear and engaging way.
94. **explain_project**: Summarizes project documentation into clear, concise sections covering the project, problem, solution, installation, usage, and examples.
95. **explain_terms**: Produces a glossary of advanced terms from content, providing a definition, analogy, and explanation of why each term matters.
96. **export_data_as_csv**: Extracts and outputs all data structures from the input in properly formatted CSV data.
97. **extract_algorithm_update_recommendations**: Extracts concise, practical algorithm update recommendations from the input and outputs them in a bulleted list.
98. **extract_article_wisdom**: Extracts surprising, insightful, and interesting information from content, categorizing it into sections like summary, ideas, quotes, facts, references, and recommendations.
99. **extract_book_ideas**: Extracts and outputs 50 to 100 of the most surprising, insightful, and interesting ideas from a book's content.
100. **extract_book_recommendations**: Extracts and outputs 50 to 100 practical, actionable recommendations from a book's content.
101. **extract_business_ideas**: Extracts top business ideas from content and elaborates on the best 10 with unique differentiators.
102. **extract_controversial_ideas**: Extracts and outputs controversial statements and supporting quotes from the input in a structured Markdown list.
103. **extract_core_message**: Extracts and outputs a clear, concise sentence that articulates the core message of a given text or body of work.
104. **extract_ctf_writeup**: Extracts a short writeup from a warstory-like text about a cyber security engagement.
105. **extract_extraordinary_claims**: Extracts and outputs a list of extraordinary claims from conversations, focusing on scientifically disputed or false statements.
106. **extract_ideas**: Extracts and outputs all the key ideas from input, presented as 15-word bullet points in Markdown.
107. **extract_insights**: Extracts and outputs the most powerful and insightful ideas from text, formatted as 16-word bullet points in the INSIGHTS section, also IDEAS section.
108. **extract_insights_dm**: Extracts and outputs all valuable insights and a concise summary of the content, including key points and topics discussed.
109. **extract_instructions**: Extracts clear, actionable step-by-step instructions and main objectives from instructional video transcripts, organizing them into a concise list.
110. **extract_jokes**: Extracts jokes from text content, presenting each joke with its punchline in separate bullet points.
111. **extract_latest_video**: Extracts the latest video URL from a YouTube RSS feed and outputs the URL only.
112. **extract_main_idea**: Extracts the main idea and key recommendation from the input, summarizing them in 15-word sentences.
113. **extract_most_redeeming_thing**: Extracts the most redeeming aspect from an input, summarizing it in a single 15-word sentence.
114. **extract_patterns**: Extracts and analyzes recurring, surprising, and insightful patterns from input, providing detailed analysis and advice for builders.
115. **extract_poc**: Extracts proof of concept URLs and validation methods from security reports, providing the URL and command to run.
116. **extract_predictions**: Extracts predictions from input, including specific details such as date, confidence level, and verification method.
117. **extract_primary_problem**: Extracts the primary problem with the world as presented in a given text or body of work.
118. **extract_primary_solution**: Extracts the primary solution for the world as presented in a given text or body of work.
119. **extract_product_features**: Extracts and outputs a list of product features from the provided input in a bulleted format.
120. **extract_questions**: Extracts and outputs all questions asked by the interviewer in a conversation or interview.
121. **extract_recipe**: Extracts and outputs a recipe with a short meal description, ingredients with measurements, and preparation steps.
122. **extract_recommendations**: Extracts and outputs concise, practical recommendations from a given piece of content in a bulleted list.
123. **extract_references**: Extracts and outputs a bulleted list of references to art, stories, books, literature, and other sources from content.
124. **extract_skills**: Extracts and classifies skills from a job description into a table, separating each skill and classifying it as either hard or soft.
125. **extract_song_meaning**: Analyzes a song to provide a summary of its meaning, supported by detailed evidence from lyrics, artist commentary, and fan analysis.
126. **extract_sponsors** Extracts and lists official sponsors and potential sponsors from a provided transcript.
127. **extract_videoid**: Extracts and outputs the video ID from any given URL.
128. **extract_wisdom**: Extracts surprising, insightful, and interesting information from text on topics like human flourishing, AI, learning, and more.
129. **extract_wisdom_agents**: Extracts valuable insights, ideas, quotes, and references from content, emphasizing topics like human flourishing, AI, learning, and technology.
130. **extract_wisdom_dm**: Extracts all valuable, insightful, and thought-provoking information from content, focusing on topics like human flourishing, AI, learning, and technology.
131. **extract_wisdom_nometa**: Extracts insights, ideas, quotes, habits, facts, references, and recommendations from content, focusing on human flourishing, AI, technology, and related topics.
132. **find_hidden_message**: Extracts overt and hidden political messages, justifications, audience actions, and a cynical analysis from content.
133. **find_logical_fallacies**: Identifies and analyzes fallacies in arguments, classifying them as formal or informal with detailed reasoning.
134. **get_wow_per_minute**: Determines the wow-factor of content per minute based on surprise, novelty, insight, value, and wisdom, measuring how rewarding the content is for the viewer.
135. **get_youtube_rss**: Returns the RSS URL for a given YouTube channel based on the channel ID or URL.
136. **humanize**: Rewrites AI-generated text to sound natural, conversational, and easy to understand, maintaining clarity and simplicity.
137. **identify_dsrp_distinctions**: Encourages creative, systems-based thinking by exploring distinctions, boundaries, and their implications, drawing on insights from prominent systems thinkers.
138. **identify_dsrp_perspectives**: Explores the concept of distinctions in systems thinking, focusing on how boundaries define ideas, influence understanding, and reveal or obscure insights.
139. **identify_dsrp_relationships**: Encourages exploration of connections, distinctions, and boundaries between ideas, inspired by systems thinkers to reveal new insights and patterns in complex systems.
140. **identify_dsrp_systems**: Encourages organizing ideas into systems of parts and wholes, inspired by systems thinkers to explore relationships and how changes in organization impact meaning and understanding.
141. **identify_job_stories**: Identifies key job stories or requirements for roles.
142. **improve_academic_writing**: Refines text into clear, concise academic language while improving grammar, coherence, and clarity, with a list of changes.
143. **improve_prompt**: Improves an LLM/AI prompt by applying expert prompt writing strategies for better results and clarity.
144. **improve_report_finding**: Improves a penetration test security finding by providing detailed descriptions, risks, recommendations, references, quotes, and a concise summary in markdown format.
145. **improve_writing**: Refines text by correcting grammar, enhancing style, improving clarity, and maintaining the original meaning. skills.
146. **judge_output**: Evaluates Honeycomb queries by judging their effectiveness, providing critiques and outcomes based on language nuances and analytics relevance.
147. **label_and_rate**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
148. **md_callout**: Classifies content and generates a markdown callout based on the provided text, selecting the most appropriate type.
149. **official_pattern_template**: Template to use if you want to create new fabric patterns.
150. **prepare_7s_strategy**: Prepares a comprehensive briefing document from 7S's strategy capturing organizational profile, strategic elements, and market dynamics with clear, concise, and organized content.
151. **provide_guidance**: Provides psychological and life coaching advice, including analysis, recommendations, and potential diagnoses, with a compassionate and honest tone.
152. **rate_ai_response**: Rates the quality of AI responses by comparing them to top human expert performance, assigning a letter grade, reasoning, and providing a 1-100 score based on the evaluation.
153. **rate_ai_result**: Assesses the quality of AI/ML/LLM work by deeply analyzing content, instructions, and output, then rates performance based on multiple dimensions, including coverage, creativity, and interdisciplinary thinking.
154. **rate_content**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
155. **rate_value**: Produces the best possible output by deeply analyzing and understanding the input and its intended purpose.
156. **raw_query**: Fully digests and contemplates the input to produce the best possible result based on understanding the sender's intent.
157. **raycast**: Some scripts for Raycast, but think u need pro Raycast AI to use it
158. **recommend_artists**: Recommends a personalized festival schedule with artists aligned to your favorite styles and interests, including rationale.
159. **recommend_pipeline_upgrades**: Optimizes vulnerability-checking pipelines by incorporating new information and improving their efficiency, with detailed explanations of changes.
160. **recommend_talkpanel_topics**: Produces a clean set of proposed talks or panel talking points for a person based on their interests and goals, formatted for submission to a conference organizer.
161. **refine_design_document**: Refines a design document based on a design review by analyzing, mapping concepts, and implementing changes using valid Markdown.
162. **review_design**: Reviews and analyzes architecture design, focusing on clarity, component design, system integrations, security, performance, scalability, and data management.
163. **sanitize_broken_html_to_markdown**: Converts messy HTML into clean, properly formatted Markdown, applying custom styling and ensuring compatibility with Vite.
164. **show_fabric_options_markmap**: Visualizes the functionality of the Fabric framework by representing its components, commands, and features based on the provided input.
165. **solve_with_cot**: Provides detailed, step-by-step responses with chain of thought reasoning, using structured thinking, reflection, and output sections.
166. **suggest_pattern**: Suggests appropriate fabric patterns or commands based on user input, providing clear explanations and options for users.
167. **summarize**: Summarizes content into a 20-word sentence, main points, and takeaways, formatted with numbered lists in Markdown.
168. **summarize_debate**: Summarizes debates, identifies primary disagreement, extracts arguments, and provides analysis of evidence and argument strength to predict outcomes.
169. **summarize_git_changes**: Summarizes recent project updates from the last 7 days, focusing on key changes with enthusiasm.
170. **summarize_git_diff**: Summarizes and organizes Git diff changes with clear, succinct commit messages and bullet points.
171. **summarize_lecture**: Extracts relevant topics, definitions, and tools from lecture transcripts, providing structured summaries with timestamps and key takeaways.
172. **summarize_legislation**: Summarizes complex political proposals and legislation by analyzing key points, proposed changes, and providing balanced, positive, and cynical characterizations.
173. **summarize_meeting**: Analyzes meeting transcripts to extract a structured summary, including an overview, key points, tasks, decisions, challenges, timeline, references, and next steps.
174. **summarize_micro**: Summarizes content into a 20-word sentence, 3 main points, and 3 takeaways, formatted in clear, concise Markdown.
175. **summarize_newsletter**: Extracts the most meaningful, interesting, and useful content from a newsletter, summarizing key sections such as content, opinions, tools, companies, and follow-up items in clear, structured Markdown.
176. **summarize_paper**: Summarizes an academic paper by detailing its title, authors, technical approach, distinctive features, experimental setup, results, advantages, limitations, and conclusion in a clear, structured format using human-readable Markdown.
177. **summarize_prompt**: Summarizes AI chat prompts by describing the primary function, unique approach, and expected output in a concise paragraph. The summary is focused on the prompt's purpose without unnecessary details or formatting.
178. **summarize_pull-requests**: Summarizes pull requests for a coding project by providing a summary and listing the top PRs with human-readable descriptions.
179. **summarize_rpg_session**: Summarizes a role-playing game session by extracting key events, combat stats, character changes, quotes, and more.
180. **t_analyse_challenge_handling**: Provides 8-16 word bullet points evaluating how well challenges are being addressed, calling out any lack of effort.
181. **t_check_metrics**: Analyzes deep context from the TELOS file and input instruction, then provides a wisdom-based output while considering metrics and KPIs to assess recent improvements.
182. **t_create_h3_career**: Summarizes context and produces wisdom-based output by deeply analyzing both the TELOS File and the input instruction, considering the relationship between the two.
183. **t_create_opening_sentences**: Describes from TELOS file the persons identity, goals, and actions in 4 concise, 32-word bullet points, humbly.
184. **t_describe_life_outlook**: Describes from TELOS file a person's life outlook in 5 concise, 16-word bullet points.
185. **t_extract_intro_sentences**: Summarizes from TELOS file a person's identity, work, and current projects in 5 concise and grounded bullet points.
186. **t_extract_panel_topics**: Creates 5 panel ideas with titles and descriptions based on deep context from a TELOS file and input.
187. **t_find_blindspots**: Identify potential blindspots in thinking, frames, or models that may expose the individual to error or risk.
188. **t_find_negative_thinking**: Analyze a TELOS file and input to identify negative thinking in documents or journals, followed by tough love encouragement.
189. **t_find_neglected_goals**: Analyze a TELOS file and input instructions to identify goals or projects that have not been worked on recently.
190. **t_give_encouragement**: Analyze a TELOS file and input instructions to evaluate progress, provide encouragement, and offer recommendations for continued effort.
191. **t_red_team_thinking**: Analyze a TELOS file and input instructions to red-team thinking, models, and frames, then provide recommendations for improvement.
192. **t_threat_model_plans**: Analyze a TELOS file and input instructions to create threat models for a life plan and recommend improvements.
193. **t_visualize_mission_goals_projects**: Analyze a TELOS file and input instructions to create an ASCII art diagram illustrating the relationship of missions, goals, and projects.
194. **t_year_in_review**: Analyze a TELOS file to create insights about a person or entity, then summarize accomplishments and visualizations in bullet points.
195. **to_flashcards**: Create Anki flashcards from a given text, focusing on concise, optimized questions and answers without external context.
196. **transcribe_minutes**: Extracts (from meeting transcription) meeting minutes, identifying actionables, insightful ideas, decisions, challenges, and next steps in a structured format.
197. **translate**: Translates sentences or documentation into the specified language code while maintaining the original formatting and tone.
198. **tweet**: Provides a step-by-step guide on crafting engaging tweets with emojis, covering Twitter basics, account creation, features, and audience targeting.
199. **write_essay**: Writes concise, clear essays in the style of Paul Graham, focusing on simplicity, clarity, and illumination of the provided topic.
200. **write_hackerone_report**: Generates concise, clear, and reproducible bug bounty reports, detailing vulnerability impact, steps to reproduce, and exploit details for triagers.
201. **write_latex**: Generates syntactically correct LaTeX code for a new.tex document, ensuring proper formatting and compatibility with pdflatex.
202. **write_micro_essay**: Writes concise, clear, and illuminating essays on the given topic in the style of Paul Graham.
203. **write_nuclei_template_rule**: Generates Nuclei YAML templates for detecting vulnerabilities using HTTP requests, matchers, extractors, and dynamic data extraction.
204. **write_pull-request**: Drafts detailed pull request descriptions, explaining changes, providing reasoning, and identifying potential bugs from the git diff command output.
205. **write_semgrep_rule**: Creates accurate and working Semgrep rules based on input, following syntax guidelines and specific language considerations.
206. **youtube_summary**: Create concise, timestamped Youtube video summaries that highlight key points.
3. **analyze_answers**: Evaluate quiz answers for correctness based on learning objectives and generated quiz questions.
4. **analyze_bill**: Analyzes legislation to identify overt and covert goals, examining bills for hidden agendas and true intentions.
5. **analyze_bill_short**: Provides a concise analysis of legislation, identifying overt and covert goals in a brief, structured format.
6. **analyze_candidates**: Compare and contrast two political candidates based on key issues and policies.
7. **analyze_cfp_submission**: Review and evaluate conference speaking session submissions based on clarity, relevance, depth, and engagement potential.
8. **analyze_claims**: Analyse and rate truth claims with evidence, counter-arguments, fallacies, and final recommendations.
9. **analyze_comments**: Evaluate internet comments for content, categorize sentiment, and identify reasons for praise, criticism, and neutrality.
10. **analyze_debate**: Rate debates on insight, emotionality, and present an unbiased, thorough analysis of arguments, agreements, and disagreements.
11. **analyze_email_headers**: Provide cybersecurity analysis and actionable insights on SPF, DKIM, DMARC, and ARC email header results.
12. **analyze_incident**: Efficiently extract and organize key details from cybersecurity breach articles, focusing on attack type, vulnerable components, attacker and target info, incident details, and remediation steps.
13. **analyze_interviewer_techniques**: This exercise involves analyzing interviewer techniques, identifying their unique qualities, and succinctly articulating what makes them stand out in a clear, simple format.
14. **analyze_logs**: Analyse server log files to identify patterns, anomalies, and issues, providing data-driven insights and recommendations for improving server reliability and performance.
15. **analyze_malware**: Analyse malware details, extract key indicators, techniques, and potential detection strategies, and summarize findings concisely for a malware analyst's use in identifying and responding to threats.
16. **analyze_military_strategy**: Analyse a historical battle, offering in-depth insights into strategic decisions, strengths, weaknesses, tactical approaches, logistical factors, pivotal moments, and consequences for a comprehensive military evaluation.
17. **analyze_mistakes**: Analyse past mistakes in thinking patterns, map them to current beliefs, and offer recommendations to improve accuracy in predictions.
18. **analyze_paper**: Analyses research papers by summarizing findings, evaluating rigor, and assessing quality to provide insights for documentation and review.
19. **analyze_paper_simple**: Analyzes academic papers with a focus on primary findings, research quality, and study design evaluation.
20. **analyze_patent**: Analyse a patent's field, problem, solution, novelty, inventive step, and advantages in detail while summarizing and extracting keywords.
21. **analyze_personality**: Performs a deep psychological analysis of a person in the input, focusing on their behavior, language, and psychological traits.
22. **analyze_presentation**: Reviews and critiques presentations by analyzing the content, speaker's underlying goals, self-focus, and entertainment value.
23. **analyze_product_feedback**: A prompt for analyzing and organizing user feedback by identifying themes, consolidating similar comments, and prioritizing them based on usefulness.
24. **analyze_proposition**: Analyzes a ballot proposition by identifying its purpose, impact, arguments for and against, and relevant background information.
25. **analyze_prose**: Evaluates writing for novelty, clarity, and prose, providing ratings, improvement recommendations, and an overall score.
26. **analyze_prose_json**: Evaluates writing for novelty, clarity, prose, and provides ratings, explanations, improvement suggestions, and an overall score in a JSON format.
27. **analyze_prose_pinker**: Evaluates prose based on Steven Pinker's The Sense of Style, analyzing writing style, clarity, and bad writing elements.
28. **analyze_risk**: Conducts a risk assessment of a third-party vendor, assigning a risk score and suggesting security controls based on analysis of provided documents and vendor website.
29. **analyze_sales_call**: Rates sales call performance across multiple dimensions, providing scores and actionable feedback based on transcript analysis.
30. **analyze_spiritual_text**: Compares and contrasts spiritual texts by analyzing claims and differences with the King James Bible.
31. **analyze_tech_impact**: Analyzes the societal impact, ethical considerations, and sustainability of technology projects, evaluating their outcomes and benefits.
32. **analyze_terraform_plan**: Analyzes Terraform plan outputs to assess infrastructure changes, security risks, cost implications, and compliance considerations.
33. **analyze_threat_report**: Extracts surprising insights, trends, statistics, quotes, references, and recommendations from cybersecurity threat reports, summarizing key findings and providing actionable information.
34. **analyze_threat_report_cmds**: Extract and synthesize actionable cybersecurity commands from provided materials, incorporating command-line arguments and expert insights for pentesters and non-experts.
35. **analyze_threat_report_trends**: Extract up to 50 surprising, insightful, and interesting trends from a cybersecurity threat report in markdown format.
36. **answer_interview_question**: Generates concise, tailored responses to technical interview questions, incorporating alternative approaches and evidence to demonstrate the candidate's expertise and experience.
37. **ask_secure_by_design_questions**: Generates a set of security-focused questions to ensure a project is built securely by design, covering key components and considerations.
38. **ask_uncle_duke**: Coordinates a team of AI agents to research and produce multiple software development solutions based on provided specifications, and conducts detailed code reviews to ensure adherence to best practices.
39. **capture_thinkers_work**: Analyze philosophers or philosophies and provide detailed summaries about their teachings, background, works, advice, and related concepts in a structured template.
40. **check_agreement**: Analyze contracts and agreements to identify important stipulations, issues, and potential gotchas, then summarize them in Markdown.
41. **clean_text**: Fix broken or malformatted text by correcting line breaks, punctuation, capitalization, and paragraphs without altering content or spelling.
42. **coding_master**: Explain a coding concept to a beginner, providing examples, and formatting code in markdown with specific output sections like ideas, recommendations, facts, and insights.
43. **compare_and_contrast**: Compare and contrast a list of items in a markdown table, with items on the left and topics on top.
44. **convert_to_markdown**: Convert content to clean, complete Markdown format, preserving all original structure, formatting, links, and code blocks without alterations.
45. **create_5_sentence_summary**: Create concise summaries or answers to input at 5 different levels of depth, from 5 words to 1 word.
46. **create_academic_paper**: Generate a high-quality academic paper in LaTeX format with clear concepts, structured content, and a professional layout.
47. **create_ai_jobs_analysis**: Analyze job categories' susceptibility to automation, identify resilient roles, and provide strategies for personal adaptation to AI-driven changes in the workforce.
48. **create_aphorisms**: Find and generate a list of brief, witty statements.
49. **create_art_prompt**: Generates a detailed, compelling visual description of a concept, including stylistic references and direct AI instructions for creating art.
50. **create_better_frame**: Identifies and analyzes different frames of interpreting reality, emphasizing the power of positive, productive lenses in shaping outcomes.
51. **create_coding_feature**: Generates secure and composable code features using modern technology and best practices from project specifications.
52. **create_coding_project**: Generate wireframes and starter code for any coding ideas that you have.
53. **create_command**: Helps determine the correct parameters and switches for penetration testing tools based on a brief description of the objective.
54. **create_cyber_summary**: Summarizes cybersecurity threats, vulnerabilities, incidents, and malware with a 25-word summary and categorized bullet points, after thoroughly analyzing and mapping the provided input.
55. **create_design_document**: Creates a detailed design document for a system using the C4 model, addressing business and security postures, and including a system context diagram.
56. **create_diy**: Creates structured "Do It Yourself" tutorial patterns by analyzing prompts, organizing requirements, and providing step-by-step instructions in Markdown format.
57. **create_excalidraw_visualization**: Creates complex Excalidraw diagrams to visualize relationships between concepts and ideas in structured format.
58. **create_flash_cards**: Creates flashcards for key concepts, definitions, and terms with question-answer format for educational purposes.
59. **create_formal_email**: Crafts professional, clear, and respectful emails by analyzing context, tone, and purpose, ensuring proper structure and formatting.
60. **create_git_diff_commit**: Generates Git commands and commit messages for reflecting changes in a repository, using conventional commits and providing concise shell commands for updates.
61. **create_graph_from_input**: Generates a CSV file with progress-over-time data for a security program, focusing on relevant metrics and KPIs.
62. **create_hormozi_offer**: Creates a customized business offer based on principles from Alex Hormozi's book, "$100M Offers."
63. **create_idea_compass**: Organizes and structures ideas by exploring their definition, evidence, sources, and related themes or consequences.
64. **create_investigation_visualization**: Creates detailed Graphviz visualizations of complex input, highlighting key aspects and providing clear, well-annotated diagrams for investigative analysis and conclusions.
65. **create_keynote**: Creates TED-style keynote presentations with a clear narrative, structured slides, and speaker notes, emphasizing impactful takeaways and cohesive flow.
66. **create_loe_document**: Creates detailed Level of Effort documents for estimating work effort, resources, and costs for tasks or projects.
67. **create_logo**: Creates simple, minimalist company logos without text, generating AI prompts for vector graphic logos based on input.
68. **create_markmap_visualization**: Transforms complex ideas into clear visualizations using MarkMap syntax, simplifying concepts into diagrams with relationships, boxes, arrows, and labels.
69. **create_mermaid_visualization**: Creates detailed, standalone visualizations of concepts using Mermaid (Markdown) syntax, ensuring clarity and coherence in diagrams.
70. **create_mermaid_visualization_for_github**: Creates standalone, detailed visualizations using Mermaid (Markdown) syntax to effectively explain complex concepts, ensuring clarity and precision.
71. **create_micro_summary**: Summarizes content into a concise, 20-word summary with main points and takeaways, formatted in Markdown.
72. **create_mnemonic_phrases**: Creates memorable mnemonic sentences from given words to aid in memory retention and learning.
73. **create_network_threat_landscape**: Analyzes open ports and services from a network scan and generates a comprehensive, insightful, and detailed security threat report in Markdown.
74. **create_newsletter_entry**: Condenses provided article text into a concise, objective, newsletter-style summary with a title in the style of Frontend Weekly.
75. **create_npc**: Generates a detailed D&D 5E NPC, including background, flaws, stats, appearance, personality, goals, and more in Markdown format.
76. **create_pattern**: Extracts, organizes, and formats LLM/AI prompts into structured sections, detailing the AI's role, instructions, output format, and any provided examples for clarity and accuracy.
77. **create_prd**: Creates a precise Product Requirements Document (PRD) in Markdown based on input.
78. **create_prediction_block**: Extracts and formats predictions from input into a structured Markdown block for a blog post.
79. **create_quiz**: Creates a three-phase reading plan based on an author or topic to help the user become significantly knowledgeable, including core, extended, and supplementary readings.
80. **create_reading_plan**: Generates review questions based on learning objectives from the input, adapted to the specified student level, and outputs them in a clear markdown format.
81. **create_recursive_outline**: Breaks down complex tasks or projects into manageable, hierarchical components with recursive outlining for clarity and simplicity.
82. **create_report_finding**: Creates a detailed, structured security finding report in markdown, including sections on Description, Risk, Recommendations, References, One-Sentence-Summary, and Quotes.
83. **create_rpg_summary**: Summarizes an in-person RPG session with key events, combat details, player stats, and role-playing highlights in a structured format.
84. **create_security_update**: Creates concise security updates for newsletters, covering stories, threats, advisories, vulnerabilities, and a summary of key issues.
85. **create_show_intro**: Creates compelling short intros for podcasts, summarizing key topics and themes discussed in the episode.
86. **create_sigma_rules**: Extracts Tactics, Techniques, and Procedures (TTPs) from security news and converts them into Sigma detection rules for host-based detections.
87. **create_story_explanation**: Summarizes complex content in a clear, approachable story format that makes the concepts easy to understand.
88. **create_stride_threat_model**: Create a STRIDE-based threat model for a system design, identifying assets, trust boundaries, data flows, and prioritizing threats with mitigations.
89. **create_summary**: Summarizes content into a 20-word sentence, 10 main points (16 words max), and 5 key takeaways in Markdown format.
90. **create_tags**: Identifies at least 5 tags from text content for mind mapping tools, including authors and existing tags if present.
91. **create_threat_scenarios**: Identifies likely attack methods for any system by providing a narrative-based threat model, balancing risk and opportunity.
92. **create_ttrc_graph**: Creates a CSV file showing the progress of Time to Remediate Critical Vulnerabilities over time using given data.
93. **create_ttrc_narrative**: Creates a persuasive narrative highlighting progress in reducing the Time to Remediate Critical Vulnerabilities metric over time.
94. **create_upgrade_pack**: Extracts world model and task algorithm updates from content, providing beliefs about how the world works and task performance.
95. **create_user_story**: Writes concise and clear technical user stories for new features in complex software programs, formatted for all stakeholders.
96. **create_video_chapters**: Extracts interesting topics and timestamps from a transcript, providing concise summaries of key moments.
97. **create_visualization**: Transforms complex ideas into visualizations using intricate ASCII art, simplifying concepts where necessary.
98. **dialog_with_socrates**: Engages in deep, meaningful dialogues to explore and challenge beliefs using the Socratic method.
99. **enrich_blog_post**: Enhances Markdown blog files by applying instructions to improve structure, visuals, and readability for HTML rendering.
100. **explain_code**: Explains code, security tool output, configuration text, and answers questions based on the provided input.
101. **explain_docs**: Improves and restructures tool documentation into clear, concise instructions, including overviews, usage, use cases, and key features.
102. **explain_math**: Helps you understand mathematical concepts in a clear and engaging way.
103. **explain_project**: Summarizes project documentation into clear, concise sections covering the project, problem, solution, installation, usage, and examples.
104. **explain_terms**: Produces a glossary of advanced terms from content, providing a definition, analogy, and explanation of why each term matters.
105. **export_data_as_csv**: Extracts and outputs all data structures from the input in properly formatted CSV data.
106. **extract_algorithm_update_recommendations**: Extracts concise, practical algorithm update recommendations from the input and outputs them in a bulleted list.
107. **extract_article_wisdom**: Extracts surprising, insightful, and interesting information from content, categorizing it into sections like summary, ideas, quotes, facts, references, and recommendations.
108. **extract_book_ideas**: Extracts and outputs 50 to 100 of the most surprising, insightful, and interesting ideas from a book's content.
109. **extract_book_recommendations**: Extracts and outputs 50 to 100 practical, actionable recommendations from a book's content.
110. **extract_business_ideas**: Extracts top business ideas from content and elaborates on the best 10 with unique differentiators.
111. **extract_controversial_ideas**: Extracts and outputs controversial statements and supporting quotes from the input in a structured Markdown list.
112. **extract_core_message**: Extracts and outputs a clear, concise sentence that articulates the core message of a given text or body of work.
113. **extract_ctf_writeup**: Extracts a short writeup from a warstory-like text about a cyber security engagement.
114. **extract_domains**: Extracts domains and URLs from content to identify sources used for articles, newsletters, and other publications.
115. **extract_extraordinary_claims**: Extracts and outputs a list of extraordinary claims from conversations, focusing on scientifically disputed or false statements.
116. **extract_ideas**: Extracts and outputs all the key ideas from input, presented as 15-word bullet points in Markdown.
117. **extract_insights**: Extracts and outputs the most powerful and insightful ideas from text, formatted as 16-word bullet points in the INSIGHTS section, also IDEAS section.
118. **extract_insights_dm**: Extracts and outputs all valuable insights and a concise summary of the content, including key points and topics discussed.
119. **extract_instructions**: Extracts clear, actionable step-by-step instructions and main objectives from instructional video transcripts, organizing them into a concise list.
120. **extract_jokes**: Extracts jokes from text content, presenting each joke with its punchline in separate bullet points.
121. **extract_latest_video**: Extracts the latest video URL from a YouTube RSS feed and outputs the URL only.
122. **extract_main_activities**: Extracts key events and activities from transcripts or logs, providing a summary of what happened.
123. **extract_main_idea**: Extracts the main idea and key recommendation from the input, summarizing them in 15-word sentences.
124. **extract_most_redeeming_thing**: Extracts the most redeeming aspect from an input, summarizing it in a single 15-word sentence.
125. **extract_patterns**: Extracts and analyzes recurring, surprising, and insightful patterns from input, providing detailed analysis and advice for builders.
126. **extract_poc**: Extracts proof of concept URLs and validation methods from security reports, providing the URL and command to run.
127. **extract_predictions**: Extracts predictions from input, including specific details such as date, confidence level, and verification method.
128. **extract_primary_problem**: Extracts the primary problem with the world as presented in a given text or body of work.
129. **extract_primary_solution**: Extracts the primary solution for the world as presented in a given text or body of work.
130. **extract_product_features**: Extracts and outputs a list of product features from the provided input in a bulleted format.
131. **extract_questions**: Extracts and outputs all questions asked by the interviewer in a conversation or interview.
132. **extract_recipe**: Extracts and outputs a recipe with a short meal description, ingredients with measurements, and preparation steps.
133. **extract_recommendations**: Extracts and outputs concise, practical recommendations from a given piece of content in a bulleted list.
134. **extract_references**: Extracts and outputs a bulleted list of references to art, stories, books, literature, and other sources from content.
135. **extract_skills**: Extracts and classifies skills from a job description into a table, separating each skill and classifying it as either hard or soft.
136. **extract_song_meaning**: Analyzes a song to provide a summary of its meaning, supported by detailed evidence from lyrics, artist commentary, and fan analysis.
137. **extract_sponsors**: Extracts and lists official sponsors and potential sponsors from a provided transcript.
138. **extract_videoid**: Extracts and outputs the video ID from any given URL.
139. **extract_wisdom**: Extracts surprising, insightful, and interesting information from text on topics like human flourishing, AI, learning, and more.
140. **extract_wisdom_agents**: Extracts valuable insights, ideas, quotes, and references from content, emphasizing topics like human flourishing, AI, learning, and technology.
141. **extract_wisdom_dm**: Extracts all valuable, insightful, and thought-provoking information from content, focusing on topics like human flourishing, AI, learning, and technology.
142. **extract_wisdom_nometa**: Extracts insights, ideas, quotes, habits, facts, references, and recommendations from content, focusing on human flourishing, AI, technology, and related topics.
143. **find_female_life_partner**: Analyzes criteria for finding a female life partner and provides clear, direct, and poetic descriptions.
144. **find_hidden_message**: Extracts overt and hidden political messages, justifications, audience actions, and a cynical analysis from content.
145. **find_logical_fallacies**: Identifies and analyzes fallacies in arguments, classifying them as formal or informal with detailed reasoning.
146. **get_wow_per_minute**: Determines the wow-factor of content per minute based on surprise, novelty, insight, value, and wisdom, measuring how rewarding the content is for the viewer.
147. **get_youtube_rss**: Returns the RSS URL for a given YouTube channel based on the channel ID or URL.
148. **humanize**: Rewrites AI-generated text to sound natural, conversational, and easy to understand, maintaining clarity and simplicity.
149. **identify_dsrp_distinctions**: Encourages creative, systems-based thinking by exploring distinctions, boundaries, and their implications, drawing on insights from prominent systems thinkers.
150. **identify_dsrp_perspectives**: Explores the concept of distinctions in systems thinking, focusing on how boundaries define ideas, influence understanding, and reveal or obscure insights.
151. **identify_dsrp_relationships**: Encourages exploration of connections, distinctions, and boundaries between ideas, inspired by systems thinkers to reveal new insights and patterns in complex systems.
152. **identify_dsrp_systems**: Encourages organizing ideas into systems of parts and wholes, inspired by systems thinkers to explore relationships and how changes in organization impact meaning and understanding.
153. **identify_job_stories**: Identifies key job stories or requirements for roles.
154. **improve_academic_writing**: Refines text into clear, concise academic language while improving grammar, coherence, and clarity, with a list of changes.
155. **improve_prompt**: Improves an LLM/AI prompt by applying expert prompt writing strategies for better results and clarity.
156. **improve_report_finding**: Improves a penetration test security finding by providing detailed descriptions, risks, recommendations, references, quotes, and a concise summary in markdown format.
157. **improve_writing**: Refines text by correcting grammar, enhancing style, improving clarity, and maintaining the original meaning. skills.
158. **judge_output**: Evaluates Honeycomb queries by judging their effectiveness, providing critiques and outcomes based on language nuances and analytics relevance.
159. **label_and_rate**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
160. **md_callout**: Classifies content and generates a markdown callout based on the provided text, selecting the most appropriate type.
161. **official_pattern_template**: Template to use if you want to create new fabric patterns.
162. **prepare_7s_strategy**: Prepares a comprehensive briefing document from 7S's strategy capturing organizational profile, strategic elements, and market dynamics with clear, concise, and organized content.
163. **provide_guidance**: Provides psychological and life coaching advice, including analysis, recommendations, and potential diagnoses, with a compassionate and honest tone.
164. **rate_ai_response**: Rates the quality of AI responses by comparing them to top human expert performance, assigning a letter grade, reasoning, and providing a 1-100 score based on the evaluation.
165. **rate_ai_result**: Assesses the quality of AI/ML/LLM work by deeply analyzing content, instructions, and output, then rates performance based on multiple dimensions, including coverage, creativity, and interdisciplinary thinking.
166. **rate_content**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
167. **rate_value**: Produces the best possible output by deeply analyzing and understanding the input and its intended purpose.
168. **raw_query**: Fully digests and contemplates the input to produce the best possible result based on understanding the sender's intent.
169. **recommend_artists**: Recommends a personalized festival schedule with artists aligned to your favorite styles and interests, including rationale.
170. **recommend_pipeline_upgrades**: Optimizes vulnerability-checking pipelines by incorporating new information and improving their efficiency, with detailed explanations of changes.
171. **recommend_talkpanel_topics**: Produces a clean set of proposed talks or panel talking points for a person based on their interests and goals, formatted for submission to a conference organizer.
172. **refine_design_document**: Refines a design document based on a design review by analyzing, mapping concepts, and implementing changes using valid Markdown.
173. **review_design**: Reviews and analyzes architecture design, focusing on clarity, component design, system integrations, security, performance, scalability, and data management.
174. **sanitize_broken_html_to_markdown**: Converts messy HTML into clean, properly formatted Markdown, applying custom styling and ensuring compatibility with Vite.
175. **show_fabric_options_markmap**: Visualizes the functionality of the Fabric framework by representing its components, commands, and features based on the provided input.
176. **solve_with_cot**: Provides detailed, step-by-step responses with chain of thought reasoning, using structured thinking, reflection, and output sections.
177. **suggest_pattern**: Suggests appropriate fabric patterns or commands based on user input, providing clear explanations and options for users.
178. **summarize**: Summarizes content into a 20-word sentence, main points, and takeaways, formatted with numbered lists in Markdown.
179. **summarize_board_meeting**: Creates formal meeting notes from board meeting transcripts for corporate governance documentation.
180. **summarize_debate**: Summarizes debates, identifies primary disagreement, extracts arguments, and provides analysis of evidence and argument strength to predict outcomes.
181. **summarize_git_changes**: Summarizes recent project updates from the last 7 days, focusing on key changes with enthusiasm.
182. **summarize_git_diff**: Summarizes and organizes Git diff changes with clear, succinct commit messages and bullet points.
183. **summarize_lecture**: Extracts relevant topics, definitions, and tools from lecture transcripts, providing structured summaries with timestamps and key takeaways.
184. **summarize_legislation**: Summarizes complex political proposals and legislation by analyzing key points, proposed changes, and providing balanced, positive, and cynical characterizations.
185. **summarize_meeting**: Analyzes meeting transcripts to extract a structured summary, including an overview, key points, tasks, decisions, challenges, timeline, references, and next steps.
186. **summarize_micro**: Summarizes content into a 20-word sentence, 3 main points, and 3 takeaways, formatted in clear, concise Markdown.
187. **summarize_newsletter**: Extracts the most meaningful, interesting, and useful content from a newsletter, summarizing key sections such as content, opinions, tools, companies, and follow-up items in clear, structured Markdown.
188. **summarize_paper**: Summarizes an academic paper by detailing its title, authors, technical approach, distinctive features, experimental setup, results, advantages, limitations, and conclusion in a clear, structured format using human-readable Markdown.
189. **summarize_prompt**: Summarizes AI chat prompts by describing the primary function, unique approach, and expected output in a concise paragraph. The summary is focused on the prompt's purpose without unnecessary details or formatting.
190. **summarize_pull-requests**: Summarizes pull requests for a coding project by providing a summary and listing the top PRs with human-readable descriptions.
191. **summarize_rpg_session**: Summarizes a role-playing game session by extracting key events, combat stats, character changes, quotes, and more.
192. **t_analyze_challenge_handling**: Provides 8-16 word bullet points evaluating how well challenges are being addressed, calling out any lack of effort.
193. **t_check_metrics**: Analyzes deep context from the TELOS file and input instruction, then provides a wisdom-based output while considering metrics and KPIs to assess recent improvements.
194. **t_create_h3_career**: Summarizes context and produces wisdom-based output by deeply analyzing both the TELOS File and the input instruction, considering the relationship between the two.
195. **t_create_opening_sentences**: Describes from TELOS file the person's identity, goals, and actions in 4 concise, 32-word bullet points, humbly.
196. **t_describe_life_outlook**: Describes from TELOS file a person's life outlook in 5 concise, 16-word bullet points.
197. **t_extract_intro_sentences**: Summarizes from TELOS file a person's identity, work, and current projects in 5 concise and grounded bullet points.
198. **t_extract_panel_topics**: Creates 5 panel ideas with titles and descriptions based on deep context from a TELOS file and input.
199. **t_find_blindspots**: Identify potential blindspots in thinking, frames, or models that may expose the individual to error or risk.
200. **t_find_negative_thinking**: Analyze a TELOS file and input to identify negative thinking in documents or journals, followed by tough love encouragement.
201. **t_find_neglected_goals**: Analyze a TELOS file and input instructions to identify goals or projects that have not been worked on recently.
202. **t_give_encouragement**: Analyze a TELOS file and input instructions to evaluate progress, provide encouragement, and offer recommendations for continued effort.
203. **t_red_team_thinking**: Analyze a TELOS file and input instructions to red-team thinking, models, and frames, then provide recommendations for improvement.
204. **t_threat_model_plans**: Analyze a TELOS file and input instructions to create threat models for a life plan and recommend improvements.
205. **t_visualize_mission_goals_projects**: Analyze a TELOS file and input instructions to create an ASCII art diagram illustrating the relationship of missions, goals, and projects.
206. **t_year_in_review**: Analyze a TELOS file to create insights about a person or entity, then summarize accomplishments and visualizations in bullet points.
207. **to_flashcards**: Create Anki flashcards from a given text, focusing on concise, optimized questions and answers without external context.
208. **transcribe_minutes**: Extracts (from meeting transcription) meeting minutes, identifying actionables, insightful ideas, decisions, challenges, and next steps in a structured format.
209. **translate**: Translates sentences or documentation into the specified language code while maintaining the original formatting and tone.
210. **tweet**: Provides a step-by-step guide on crafting engaging tweets with emojis, covering Twitter basics, account creation, features, and audience targeting.
211. **write_essay**: Writes essays in the style of a specified author, embodying their unique voice, vocabulary, and approach. Uses `author_name` variable.
212. **write_essay_pg**: Writes concise, clear essays in the style of Paul Graham, focusing on simplicity, clarity, and illumination of the provided topic.
213. **write_hackerone_report**: Generates concise, clear, and reproducible bug bounty reports, detailing vulnerability impact, steps to reproduce, and exploit details for triagers.
214. **write_latex**: Generates syntactically correct LaTeX code for a new.tex document, ensuring proper formatting and compatibility with pdflatex.
215. **write_micro_essay**: Writes concise, clear, and illuminating essays on the given topic in the style of Paul Graham.
216. **write_nuclei_template_rule**: Generates Nuclei YAML templates for detecting vulnerabilities using HTTP requests, matchers, extractors, and dynamic data extraction.
217. **write_pull-request**: Drafts detailed pull request descriptions, explaining changes, providing reasoning, and identifying potential bugs from the git diff command output.
218. **write_semgrep_rule**: Creates accurate and working Semgrep rules based on input, following syntax guidelines and specific language considerations.
219. **youtube_summary**: Create concise, timestamped Youtube video summaries that highlight key points.

View File

@@ -1,27 +0,0 @@
#!/bin/bash
# Required parameters:
# @raycast.schemaVersion 1
# @raycast.title Capture Thinkers Work
# @raycast.mode fullOutput
# Optional parameters:
# @raycast.icon 🧠
# @raycast.argument1 { "type": "text", "placeholder": "Input text", "optional": false, "percentEncoded": true}
# Documentation:
# @raycast.description Run fabric capture_thinkers_work on the input text
# @raycast.author Daniel Miessler
# @raycast.authorURL https://github.com/danielmiessler
# Set PATH to include common locations and $HOME/go/bin
PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$HOME/go/bin:$PATH"
# Use the PATH to find and execute fabric
if command -v fabric >/dev/null 2>&1; then
fabric -sp capture_thinkers_work "${1}"
else
echo "Error: fabric command not found in PATH"
echo "Current PATH: $PATH"
exit 1
fi

View File

@@ -1,27 +0,0 @@
#!/bin/bash
# Required parameters:
# @raycast.schemaVersion 1
# @raycast.title Create Story Explanation
# @raycast.mode fullOutput
# Optional parameters:
# @raycast.icon 🧠
# @raycast.argument1 { "type": "text", "placeholder": "Input text", "optional": false, "percentEncoded": true}
# Documentation:
# @raycast.description Run fabric create_story_explanation on the input text
# @raycast.author Daniel Miessler
# @raycast.authorURL https://github.com/danielmiessler
# Set PATH to include common locations and $HOME/go/bin
PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$HOME/go/bin:$PATH"
# Use the PATH to find and execute fabric
if command -v fabric >/dev/null 2>&1; then
fabric -sp create_story_explanation "${1}"
else
echo "Error: fabric command not found in PATH"
echo "Current PATH: $PATH"
exit 1
fi

View File

@@ -1,27 +0,0 @@
#!/bin/bash
# Required parameters:
# @raycast.schemaVersion 1
# @raycast.title Extract Primary Problem
# @raycast.mode fullOutput
# Optional parameters:
# @raycast.icon 🧠
# @raycast.argument1 { "type": "text", "placeholder": "Input text", "optional": false, "percentEncoded": true}
# Documentation:
# @raycast.description Run fabric extract_primary_problem on the input text
# @raycast.author Daniel Miessler
# @raycast.authorURL https://github.com/danielmiessler
# Set PATH to include common locations and $HOME/go/bin
PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$HOME/go/bin:$PATH"
# Use the PATH to find and execute fabric
if command -v fabric >/dev/null 2>&1; then
fabric -sp extract_primary_problem "${1}"
else
echo "Error: fabric command not found in PATH"
echo "Current PATH: $PATH"
exit 1
fi

View File

@@ -1,27 +0,0 @@
#!/bin/bash
# Required parameters:
# @raycast.schemaVersion 1
# @raycast.title Extract Wisdom
# @raycast.mode fullOutput
# Optional parameters:
# @raycast.icon 🧠
# @raycast.argument1 { "type": "text", "placeholder": "Input text", "optional": false, "percentEncoded": true}
# Documentation:
# @raycast.description Run fabric extract_wisdom on input text
# @raycast.author Daniel Miessler
# @raycast.authorURL https://github.com/danielmiessler
# Set PATH to include common locations and $HOME/go/bin
PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$HOME/go/bin:$PATH"
# Use the PATH to find and execute fabric
if command -v fabric >/dev/null 2>&1; then
fabric -sp extract_wisdom "${1}"
else
echo "Error: fabric command not found in PATH"
echo "Current PATH: $PATH"
exit 1
fi

View File

@@ -1,27 +0,0 @@
#!/bin/bash
# Required parameters:
# @raycast.schemaVersion 1
# @raycast.title Get YouTube Transcript
# @raycast.mode fullOutput
# Optional parameters:
# @raycast.icon 🧠
# @raycast.argument1 { "type": "text", "placeholder": "Input text", "optional": false, "percentEncoded": false}
# Documentation:
# @raycast.description Run fabric -y on the input text of a YouTube video to get the transcript from.
# @raycast.author Daniel Miessler
# @raycast.authorURL https://github.com/danielmiessler
# Set PATH to include common locations and $HOME/go/bin
PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$HOME/go/bin:$PATH"
# Use the PATH to find and execute fabric
if command -v fabric >/dev/null 2>&1; then
fabric -y "${1}"
else
echo "Error: fabric command not found in PATH"
echo "Current PATH: $PATH"
exit 1
fi

View File

@@ -0,0 +1,140 @@
# Code Review Task
## ROLE AND GOAL
You are a Principal Software Engineer, renowned for your meticulous attention to detail and your ability to provide clear, constructive, and educational code reviews. Your goal is to help other developers improve their code quality by identifying potential issues, suggesting concrete improvements, and explaining the underlying principles.
## TASK
You will be given a snippet of code or a diff. Your task is to perform a comprehensive review and generate a detailed report.
## STEPS
1. **Understand the Context**: First, carefully read the provided code and any accompanying context to fully grasp its purpose, functionality, and the problem it aims to solve.
2. **Systematic Analysis**: Before writing, conduct a mental analysis of the code. Evaluate it against the following key aspects. Do not write this analysis in the output; use it to form your review.
* **Correctness**: Are there bugs, logic errors, or race conditions?
* **Security**: Are there any potential vulnerabilities (e.g., injection attacks, improper handling of sensitive data)?
* **Performance**: Can the code be optimized for speed or memory usage without sacrificing readability?
* **Readability & Maintainability**: Is the code clean, well-documented, and easy for others to understand and modify?
* **Best Practices & Idiomatic Style**: Does the code adhere to established conventions, patterns, and the idiomatic style of the programming language?
* **Error Handling & Edge Cases**: Are errors handled gracefully? Have all relevant edge cases been considered?
3. **Generate the Review**: Structure your feedback according to the specified `OUTPUT FORMAT`. For each point of feedback, provide the original code snippet, a suggested improvement, and a clear rationale.
## OUTPUT FORMAT
Your review must be in Markdown and follow this exact structure:
---
### Overall Assessment
A brief, high-level summary of the code's quality. Mention its strengths and the primary areas for improvement.
### **Prioritized Recommendations**
A numbered list of the most important changes, ordered from most to least critical.
1. (Most critical change)
2. (Second most critical change)
3. ...
### **Detailed Feedback**
For each issue you identified, provide a detailed breakdown in the following format.
---
**[ISSUE TITLE]** - (e.g., `Security`, `Readability`, `Performance`)
**Original Code:**
```[language]
// The specific lines of code with the issue
```
**Suggested Improvement:**
```[language]
// The revised, improved code
```
**Rationale:**
A clear and concise explanation of why the change is recommended. Reference best practices, design patterns, or potential risks. If you use advanced concepts, briefly explain them.
---
(Repeat this section for each issue)
## EXAMPLE
Here is an example of a review for a simple Python function:
---
### **Overall Assessment**
The function correctly fetches user data, but it can be made more robust and efficient. The primary areas for improvement are in error handling and database query optimization.
### **Prioritized Recommendations**
1. Avoid making database queries inside a loop to prevent performance issues (N+1 query problem).
2. Add specific error handling for when a user is not found.
### **Detailed Feedback**
---
**[PERFORMANCE]** - N+1 Database Query
**Original Code:**
```python
def get_user_emails(user_ids):
emails = []
for user_id in user_ids:
user = db.query(User).filter(User.id == user_id).one()
emails.append(user.email)
return emails
```
**Suggested Improvement:**
```python
def get_user_emails(user_ids):
if not user_ids:
return []
users = db.query(User).filter(User.id.in_(user_ids)).all()
return [user.email for user in users]
```
**Rationale:**
The original code executes one database query for each `user_id` in the list. This is known as the "N+1 query problem" and performs very poorly on large lists. The suggested improvement fetches all users in a single query using `IN`, which is significantly more efficient.
---
**[CORRECTNESS]** - Lacks Specific Error Handling
**Original Code:**
```python
user = db.query(User).filter(User.id == user_id).one()
```
**Suggested Improvement:**
```python
from sqlalchemy.orm.exc import NoResultFound
try:
user = db.query(User).filter(User.id == user_id).one()
except NoResultFound:
# Handle the case where the user doesn't exist
# e.g., log a warning, skip the user, or raise a custom exception
continue
```
**Rationale:**
The `.one()` method will raise a `NoResultFound` exception if a user with the given ID doesn't exist, which would crash the entire function. It's better to explicitly handle this case using a try/except block to make the function more resilient.
---
## INPUT

File diff suppressed because it is too large Load Diff

File diff suppressed because it is too large Load Diff

View File

View File

@@ -0,0 +1,919 @@
# Suggest Pattern
## OVERVIEW
What It Does: Fabric is an open-source framework designed to augment human capabilities using AI, making it easier to integrate AI into daily tasks.
Why People Use It: Users leverage Fabric to seamlessly apply AI for solving everyday challenges, enhancing productivity, and fostering human creativity through technology.
## HOW TO USE IT
Most Common Syntax: The most common usage involves executing Fabric commands in the terminal, such as `fabric --pattern <PATTERN_NAME>`.
## COMMON USE CASES
For Summarizing Content: `fabric --pattern summarize`
For Analyzing Claims: `fabric --pattern analyze_claims`
For Extracting Wisdom from Videos: `fabric --pattern extract_wisdom`
For creating custom patterns: `fabric --pattern create_pattern`
- One possible place to store them is ~/.config/custom-fabric-patterns.
- Then when you want to use them, simply copy them into ~/.config/fabric/patterns.
`cp -a ~/.config/custom-fabric-patterns/* ~/.config/fabric/patterns/`
- Now you can run them with: `pbpaste | fabric -p your_custom_pattern`
## MOST IMPORTANT AND USED OPTIONS AND FEATURES
- **--pattern PATTERN, -p PATTERN**: Specifies the pattern (prompt) to use. Useful for applying specific AI prompts to your input.
- **--stream, -s**: Streams results in real-time. Ideal for getting immediate feedback from AI operations.
- **--update, -u**: Updates patterns. Ensures you're using the latest AI prompts for your tasks.
- **--model MODEL, -m MODEL**: Selects the AI model to use. Allows customization of the AI backend for different tasks.
- **--setup, -S**: Sets up your Fabric instance. Essential for first-time users to configure Fabric correctly.
- **--list, -l**: Lists available patterns. Helps users discover new AI prompts for various applications.
- **--context, -C**: Uses a Context file to add context to your pattern. Enhances the relevance of AI responses by providing additional background information.
## PATTERNS
**Key pattern to use: `suggest_pattern`** - suggests appropriate fabric patterns or commands based on user input.
### agility_story
Generate a user story and acceptance criteria in JSON format based on the given topic.
### ai
Interpret questions deeply and provide concise, insightful answers in Markdown bullet points.
### analyze_answers
Evaluate quiz answers for correctness based on learning objectives and generated quiz questions.
### analyze_bill
Analyzes legislation to identify overt and covert goals, examining bills for hidden agendas and true intentions.
### analyze_bill_short
Provides a concise analysis of legislation, identifying overt and covert goals in a brief, structured format.
### analyze_candidates
Compare and contrast two political candidates based on key issues and policies.
### analyze_cfp_submission
Review and evaluate conference speaking session submissions based on clarity, relevance, depth, and engagement potential.
### analyze_claims
Analyse and rate truth claims with evidence, counter-arguments, fallacies, and final recommendations.
### analyze_comments
Evaluate internet comments for content, categorize sentiment, and identify reasons for praise, criticism, and neutrality.
### analyze_debate
Rate debates on insight, emotionality, and present an unbiased, thorough analysis of arguments, agreements, and disagreements.
### analyze_email_headers
Provide cybersecurity analysis and actionable insights on SPF, DKIM, DMARC, and ARC email header results.
### analyze_incident
Efficiently extract and organize key details from cybersecurity breach articles, focusing on attack type, vulnerable components, attacker and target info, incident details, and remediation steps.
### analyze_interviewer_techniques
This exercise involves analyzing interviewer techniques, identifying their unique qualities, and succinctly articulating what makes them stand out in a clear, simple format.
### analyze_logs
Analyse server log files to identify patterns, anomalies, and issues, providing data-driven insights and recommendations for improving server reliability and performance.
### analyze_malware
Analyse malware details, extract key indicators, techniques, and potential detection strategies, and summarize findings concisely for a malware analyst's use in identifying and responding to threats.
### analyze_military_strategy
Analyse a historical battle, offering in-depth insights into strategic decisions, strengths, weaknesses, tactical approaches, logistical factors, pivotal moments, and consequences for a comprehensive military evaluation.
### analyze_mistakes
Analyse past mistakes in thinking patterns, map them to current beliefs, and offer recommendations to improve accuracy in predictions.
### analyze_paper
Analyses research papers by summarizing findings, evaluating rigor, and assessing quality to provide insights for documentation and review.
### analyze_paper_simple
Analyzes academic papers with a focus on primary findings, research quality, and study design evaluation.
### analyze_patent
Analyse a patent's field, problem, solution, novelty, inventive step, and advantages in detail while summarizing and extracting keywords.
### analyze_personality
Performs a deep psychological analysis of a person in the input, focusing on their behavior, language, and psychological traits.
### analyze_presentation
Reviews and critiques presentations by analyzing the content, speaker's underlying goals, self-focus, and entertainment value.
### analyze_product_feedback
A prompt for analyzing and organizing user feedback by identifying themes, consolidating similar comments, and prioritizing them based on usefulness.
### analyze_proposition
Analyzes a ballot proposition by identifying its purpose, impact, arguments for and against, and relevant background information.
### analyze_prose
Evaluates writing for novelty, clarity, and prose, providing ratings, improvement recommendations, and an overall score.
### analyze_prose_json
Evaluates writing for novelty, clarity, prose, and provides ratings, explanations, improvement suggestions, and an overall score in a JSON format.
### analyze_prose_pinker
Evaluates prose based on Steven Pinker's The Sense of Style, analyzing writing style, clarity, and bad writing elements.
### analyze_risk
Conducts a risk assessment of a third-party vendor, assigning a risk score and suggesting security controls based on analysis of provided documents and vendor website.
### analyze_sales_call
Rates sales call performance across multiple dimensions, providing scores and actionable feedback based on transcript analysis.
### analyze_spiritual_text
Compares and contrasts spiritual texts by analyzing claims and differences with the King James Bible.
### analyze_tech_impact
Analyzes the societal impact, ethical considerations, and sustainability of technology projects, evaluating their outcomes and benefits.
### analyze_terraform_plan
Analyzes Terraform plan outputs to assess infrastructure changes, security risks, cost implications, and compliance considerations.
### analyze_threat_report
Extracts surprising insights, trends, statistics, quotes, references, and recommendations from cybersecurity threat reports, summarizing key findings and providing actionable information.
### analyze_threat_report_cmds
Extract and synthesize actionable cybersecurity commands from provided materials, incorporating command-line arguments and expert insights for pentesters and non-experts.
### analyze_threat_report_trends
Extract up to 50 surprising, insightful, and interesting trends from a cybersecurity threat report in markdown format.
### answer_interview_question
Generates concise, tailored responses to technical interview questions, incorporating alternative approaches and evidence to demonstrate the candidate's expertise and experience.
### ask_secure_by_design_questions
Generates a set of security-focused questions to ensure a project is built securely by design, covering key components and considerations.
### ask_uncle_duke
Coordinates a team of AI agents to research and produce multiple software development solutions based on provided specifications, and conducts detailed code reviews to ensure adherence to best practices.
### capture_thinkers_work
Analyze philosophers or philosophies and provide detailed summaries about their teachings, background, works, advice, and related concepts in a structured template.
### check_agreement
Analyze contracts and agreements to identify important stipulations, issues, and potential gotchas, then summarize them in Markdown.
### clean_text
Fix broken or malformatted text by correcting line breaks, punctuation, capitalization, and paragraphs without altering content or spelling.
### coding_master
Explain a coding concept to a beginner, providing examples, and formatting code in markdown with specific output sections like ideas, recommendations, facts, and insights.
### compare_and_contrast
Compare and contrast a list of items in a markdown table, with items on the left and topics on top.
### convert_to_markdown
Convert content to clean, complete Markdown format, preserving all original structure, formatting, links, and code blocks without alterations.
### create_5_sentence_summary
Create concise summaries or answers to input at 5 different levels of depth, from 5 words to 1 word.
### create_academic_paper
Generate a high-quality academic paper in LaTeX format with clear concepts, structured content, and a professional layout.
### create_ai_jobs_analysis
Analyze job categories' susceptibility to automation, identify resilient roles, and provide strategies for personal adaptation to AI-driven changes in the workforce.
### create_aphorisms
Find and generate a list of brief, witty statements.
### create_art_prompt
Generates a detailed, compelling visual description of a concept, including stylistic references and direct AI instructions for creating art.
### create_better_frame
Identifies and analyzes different frames of interpreting reality, emphasizing the power of positive, productive lenses in shaping outcomes.
### create_coding_feature
Generates secure and composable code features using modern technology and best practices from project specifications.
### create_coding_project
Generate wireframes and starter code for any coding ideas that you have.
### create_command
Helps determine the correct parameters and switches for penetration testing tools based on a brief description of the objective.
### create_cyber_summary
Summarizes cybersecurity threats, vulnerabilities, incidents, and malware with a 25-word summary and categorized bullet points, after thoroughly analyzing and mapping the provided input.
### create_design_document
Creates a detailed design document for a system using the C4 model, addressing business and security postures, and including a system context diagram.
### create_diy
Creates structured "Do It Yourself" tutorial patterns by analyzing prompts, organizing requirements, and providing step-by-step instructions in Markdown format.
### create_excalidraw_visualization
Creates complex Excalidraw diagrams to visualize relationships between concepts and ideas in structured format.
### create_flash_cards
Creates flashcards for key concepts, definitions, and terms with question-answer format for educational purposes.
### create_formal_email
Crafts professional, clear, and respectful emails by analyzing context, tone, and purpose, ensuring proper structure and formatting.
### create_git_diff_commit
Generates Git commands and commit messages for reflecting changes in a repository, using conventional commits and providing concise shell commands for updates.
### create_graph_from_input
Generates a CSV file with progress-over-time data for a security program, focusing on relevant metrics and KPIs.
### create_hormozi_offer
Creates a customized business offer based on principles from Alex Hormozi's book, "$100M Offers."
### create_idea_compass
Organizes and structures ideas by exploring their definition, evidence, sources, and related themes or consequences.
### create_investigation_visualization
Creates detailed Graphviz visualizations of complex input, highlighting key aspects and providing clear, well-annotated diagrams for investigative analysis and conclusions.
### create_keynote
Creates TED-style keynote presentations with a clear narrative, structured slides, and speaker notes, emphasizing impactful takeaways and cohesive flow.
### create_loe_document
Creates detailed Level of Effort documents for estimating work effort, resources, and costs for tasks or projects.
### create_logo
Creates simple, minimalist company logos without text, generating AI prompts for vector graphic logos based on input.
### create_markmap_visualization
Transforms complex ideas into clear visualizations using MarkMap syntax, simplifying concepts into diagrams with relationships, boxes, arrows, and labels.
### create_mermaid_visualization
Creates detailed, standalone visualizations of concepts using Mermaid (Markdown) syntax, ensuring clarity and coherence in diagrams.
### create_mermaid_visualization_for_github
Creates standalone, detailed visualizations using Mermaid (Markdown) syntax to effectively explain complex concepts, ensuring clarity and precision.
### create_micro_summary
Summarizes content into a concise, 20-word summary with main points and takeaways, formatted in Markdown.
### create_mnemonic_phrases
Creates memorable mnemonic sentences from given words to aid in memory retention and learning.
### create_network_threat_landscape
Analyzes open ports and services from a network scan and generates a comprehensive, insightful, and detailed security threat report in Markdown.
### create_newsletter_entry
Condenses provided article text into a concise, objective, newsletter-style summary with a title in the style of Frontend Weekly.
### create_npc
Generates a detailed D&D 5E NPC, including background, flaws, stats, appearance, personality, goals, and more in Markdown format.
### create_pattern
Extracts, organizes, and formats LLM/AI prompts into structured sections, detailing the AI's role, instructions, output format, and any provided examples for clarity and accuracy.
### create_prd
Creates a precise Product Requirements Document (PRD) in Markdown based on input.
### create_prediction_block
Extracts and formats predictions from input into a structured Markdown block for a blog post.
### create_quiz
Generates review questions based on learning objectives from the input, adapted to the specified student level, and outputs them in a clear markdown format.
### create_reading_plan
Creates a three-phase reading plan based on an author or topic to help the user become significantly knowledgeable, including core, extended, and supplementary readings.
### create_recursive_outline
Breaks down complex tasks or projects into manageable, hierarchical components with recursive outlining for clarity and simplicity.
### create_report_finding
Creates a detailed, structured security finding report in markdown, including sections on Description, Risk, Recommendations, References, One-Sentence-Summary, and Quotes.
### create_rpg_summary
Summarizes an in-person RPG session with key events, combat details, player stats, and role-playing highlights in a structured format.
### create_security_update
Creates concise security updates for newsletters, covering stories, threats, advisories, vulnerabilities, and a summary of key issues.
### create_show_intro
Creates compelling short intros for podcasts, summarizing key topics and themes discussed in the episode.
### create_sigma_rules
Extracts Tactics, Techniques, and Procedures (TTPs) from security news and converts them into Sigma detection rules for host-based detections.
### create_story_explanation
Summarizes complex content in a clear, approachable story format that makes the concepts easy to understand.
### create_stride_threat_model
Create a STRIDE-based threat model for a system design, identifying assets, trust boundaries, data flows, and prioritizing threats with mitigations.
### create_summary
Summarizes content into a 20-word sentence, 10 main points (16 words max), and 5 key takeaways in Markdown format.
### create_tags
Identifies at least 5 tags from text content for mind mapping tools, including authors and existing tags if present.
### create_threat_scenarios
Identifies likely attack methods for any system by providing a narrative-based threat model, balancing risk and opportunity.
### create_ttrc_graph
Creates a CSV file showing the progress of Time to Remediate Critical Vulnerabilities over time using given data.
### create_ttrc_narrative
Creates a persuasive narrative highlighting progress in reducing the Time to Remediate Critical Vulnerabilities metric over time.
### create_upgrade_pack
Extracts world model and task algorithm updates from content, providing beliefs about how the world works and task performance.
### create_user_story
Writes concise and clear technical user stories for new features in complex software programs, formatted for all stakeholders.
### create_video_chapters
Extracts interesting topics and timestamps from a transcript, providing concise summaries of key moments.
### create_visualization
Transforms complex ideas into visualizations using intricate ASCII art, simplifying concepts where necessary.
### dialog_with_socrates
Engages in deep, meaningful dialogues to explore and challenge beliefs using the Socratic method.
### enrich_blog_post
Enhances Markdown blog files by applying instructions to improve structure, visuals, and readability for HTML rendering.
### explain_code
Explains code, security tool output, configuration text, and answers questions based on the provided input.
### explain_docs
Improves and restructures tool documentation into clear, concise instructions, including overviews, usage, use cases, and key features.
### explain_math
Helps you understand mathematical concepts in a clear and engaging way.
### explain_project
Summarizes project documentation into clear, concise sections covering the project, problem, solution, installation, usage, and examples.
### explain_terms
Produces a glossary of advanced terms from content, providing a definition, analogy, and explanation of why each term matters.
### export_data_as_csv
Extracts and outputs all data structures from the input in properly formatted CSV data.
### extract_algorithm_update_recommendations
Extracts concise, practical algorithm update recommendations from the input and outputs them in a bulleted list.
### extract_article_wisdom
Extracts surprising, insightful, and interesting information from content, categorizing it into sections like summary, ideas, quotes, facts, references, and recommendations.
### extract_book_ideas
Extracts and outputs 50 to 100 of the most surprising, insightful, and interesting ideas from a book's content.
### extract_book_recommendations
Extracts and outputs 50 to 100 practical, actionable recommendations from a book's content.
### extract_business_ideas
Extracts top business ideas from content and elaborates on the best 10 with unique differentiators.
### extract_controversial_ideas
Extracts and outputs controversial statements and supporting quotes from the input in a structured Markdown list.
### extract_core_message
Extracts and outputs a clear, concise sentence that articulates the core message of a given text or body of work.
### extract_ctf_writeup
Extracts a short writeup from a warstory-like text about a cyber security engagement.
### extract_domains
Extracts domains and URLs from content to identify sources used for articles, newsletters, and other publications.
### extract_extraordinary_claims
Extracts and outputs a list of extraordinary claims from conversations, focusing on scientifically disputed or false statements.
### extract_ideas
Extracts and outputs all the key ideas from input, presented as 15-word bullet points in Markdown.
### extract_insights
Extracts and outputs the most powerful and insightful ideas from text, formatted as 16-word bullet points in the INSIGHTS section, also IDEAS section.
### extract_insights_dm
Extracts and outputs all valuable insights and a concise summary of the content, including key points and topics discussed.
### extract_instructions
Extracts clear, actionable step-by-step instructions and main objectives from instructional video transcripts, organizing them into a concise list.
### extract_jokes
Extracts jokes from text content, presenting each joke with its punchline in separate bullet points.
### extract_latest_video
Extracts the latest video URL from a YouTube RSS feed and outputs the URL only.
### extract_main_activities
Extracts key events and activities from transcripts or logs, providing a summary of what happened.
### extract_main_idea
Extracts the main idea and key recommendation from the input, summarizing them in 15-word sentences.
### extract_most_redeeming_thing
Extracts the most redeeming aspect from an input, summarizing it in a single 15-word sentence.
### extract_patterns
Extracts and analyzes recurring, surprising, and insightful patterns from input, providing detailed analysis and advice for builders.
### extract_poc
Extracts proof of concept URLs and validation methods from security reports, providing the URL and command to run.
### extract_predictions
Extracts predictions from input, including specific details such as date, confidence level, and verification method.
### extract_primary_problem
Extracts the primary problem with the world as presented in a given text or body of work.
### extract_primary_solution
Extracts the primary solution for the world as presented in a given text or body of work.
### extract_product_features
Extracts and outputs a list of product features from the provided input in a bulleted format.
### extract_questions
Extracts and outputs all questions asked by the interviewer in a conversation or interview.
### extract_recipe
Extracts and outputs a recipe with a short meal description, ingredients with measurements, and preparation steps.
### extract_recommendations
Extracts and outputs concise, practical recommendations from a given piece of content in a bulleted list.
### extract_references
Extracts and outputs a bulleted list of references to art, stories, books, literature, and other sources from content.
### extract_skills
Extracts and classifies skills from a job description into a table, separating each skill and classifying it as either hard or soft.
### extract_song_meaning
Analyzes a song to provide a summary of its meaning, supported by detailed evidence from lyrics, artist commentary, and fan analysis.
### extract_sponsors
Extracts and lists official sponsors and potential sponsors from a provided transcript.
### extract_videoid
Extracts and outputs the video ID from any given URL.
### extract_wisdom
Extracts surprising, insightful, and interesting information from text on topics like human flourishing, AI, learning, and more.
### extract_wisdom_agents
Extracts valuable insights, ideas, quotes, and references from content, emphasizing topics like human flourishing, AI, learning, and technology.
### extract_wisdom_dm
Extracts all valuable, insightful, and thought-provoking information from content, focusing on topics like human flourishing, AI, learning, and technology.
### extract_wisdom_nometa
Extracts insights, ideas, quotes, habits, facts, references, and recommendations from content, focusing on human flourishing, AI, technology, and related topics.
### find_female_life_partner
Analyzes criteria for finding a female life partner and provides clear, direct, and poetic descriptions.
### find_hidden_message
Extracts overt and hidden political messages, justifications, audience actions, and a cynical analysis from content.
### find_logical_fallacies
Identifies and analyzes fallacies in arguments, classifying them as formal or informal with detailed reasoning.
### get_wow_per_minute
Determines the wow-factor of content per minute based on surprise, novelty, insight, value, and wisdom, measuring how rewarding the content is for the viewer.
### get_youtube_rss
Returns the RSS URL for a given YouTube channel based on the channel ID or URL.
### humanize
Rewrites AI-generated text to sound natural, conversational, and easy to understand, maintaining clarity and simplicity.
### identify_dsrp_distinctions
Encourages creative, systems-based thinking by exploring distinctions, boundaries, and their implications, drawing on insights from prominent systems thinkers.
### identify_dsrp_perspectives
Explores the concept of distinctions in systems thinking, focusing on how boundaries define ideas, influence understanding, and reveal or obscure insights.
### identify_dsrp_relationships
Encourages exploration of connections, distinctions, and boundaries between ideas, inspired by systems thinkers to reveal new insights and patterns in complex systems.
### identify_dsrp_systems
Encourages organizing ideas into systems of parts and wholes, inspired by systems thinkers to explore relationships and how changes in organization impact meaning and understanding.
### identify_job_stories
Identifies key job stories or requirements for roles.
### improve_academic_writing
Refines text into clear, concise academic language while improving grammar, coherence, and clarity, with a list of changes.
### improve_prompt
Improves an LLM/AI prompt by applying expert prompt writing strategies for better results and clarity.
### improve_report_finding
Improves a penetration test security finding by providing detailed descriptions, risks, recommendations, references, quotes, and a concise summary in markdown format.
### improve_writing
Refines text by correcting grammar, enhancing style, improving clarity, and maintaining the original meaning.
### judge_output
Evaluates Honeycomb queries by judging their effectiveness, providing critiques and outcomes based on language nuances and analytics relevance.
### label_and_rate
Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
### md_callout
Classifies content and generates a markdown callout based on the provided text, selecting the most appropriate type.
### official_pattern_template
Template to use if you want to create new fabric patterns.
### prepare_7s_strategy
Prepares a comprehensive briefing document from 7S's strategy capturing organizational profile, strategic elements, and market dynamics with clear, concise, and organized content.
### provide_guidance
Provides psychological and life coaching advice, including analysis, recommendations, and potential diagnoses, with a compassionate and honest tone.
### rate_ai_response
Rates the quality of AI responses by comparing them to top human expert performance, assigning a letter grade, reasoning, and providing a 1-100 score based on the evaluation.
### rate_ai_result
Assesses the quality of AI/ML/LLM work by deeply analyzing content, instructions, and output, then rates performance based on multiple dimensions, including coverage, creativity, and interdisciplinary thinking.
### rate_content
Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
### rate_value
Produces the best possible output by deeply analyzing and understanding the input and its intended purpose.
### raw_query
Fully digests and contemplates the input to produce the best possible result based on understanding the sender's intent.
### recommend_artists
Recommends a personalized festival schedule with artists aligned to your favorite styles and interests, including rationale.
### recommend_pipeline_upgrades
Optimizes vulnerability-checking pipelines by incorporating new information and improving their efficiency, with detailed explanations of changes.
### recommend_talkpanel_topics
Produces a clean set of proposed talks or panel talking points for a person based on their interests and goals, formatted for submission to a conference organizer.
### refine_design_document
Refines a design document based on a design review by analyzing, mapping concepts, and implementing changes using valid Markdown.
### review_design
Reviews and analyzes architecture design, focusing on clarity, component design, system integrations, security, performance, scalability, and data management.
### sanitize_broken_html_to_markdown
Converts messy HTML into clean, properly formatted Markdown, applying custom styling and ensuring compatibility with Vite.
### show_fabric_options_markmap
Visualizes the functionality of the Fabric framework by representing its components, commands, and features based on the provided input.
### solve_with_cot
Provides detailed, step-by-step responses with chain of thought reasoning, using structured thinking, reflection, and output sections.
### suggest_pattern
Suggests appropriate fabric patterns or commands based on user input, providing clear explanations and options for users.
### summarize
Summarizes content into a 20-word sentence, main points, and takeaways, formatted with numbered lists in Markdown.
### summarize_board_meeting
Creates formal meeting notes from board meeting transcripts for corporate governance documentation.
### summarize_debate
Summarizes debates, identifies primary disagreement, extracts arguments, and provides analysis of evidence and argument strength to predict outcomes.
### summarize_git_changes
Summarizes recent project updates from the last 7 days, focusing on key changes with enthusiasm.
### summarize_git_diff
Summarizes and organizes Git diff changes with clear, succinct commit messages and bullet points.
### summarize_lecture
Extracts relevant topics, definitions, and tools from lecture transcripts, providing structured summaries with timestamps and key takeaways.
### summarize_legislation
Summarizes complex political proposals and legislation by analyzing key points, proposed changes, and providing balanced, positive, and cynical characterizations.
### summarize_meeting
Analyzes meeting transcripts to extract a structured summary, including an overview, key points, tasks, decisions, challenges, timeline, references, and next steps.
### summarize_micro
Summarizes content into a 20-word sentence, 3 main points, and 3 takeaways, formatted in clear, concise Markdown.
### summarize_newsletter
Extracts the most meaningful, interesting, and useful content from a newsletter, summarizing key sections such as content, opinions, tools, companies, and follow-up items in clear, structured Markdown.
### summarize_paper
Summarizes an academic paper by detailing its title, authors, technical approach, distinctive features, experimental setup, results, advantages, limitations, and conclusion in a clear, structured format using human-readable Markdown.
### summarize_prompt
Summarizes AI chat prompts by describing the primary function, unique approach, and expected output in a concise paragraph. The summary is focused on the prompt's purpose without unnecessary details or formatting.
### summarize_pull-requests
Summarizes pull requests for a coding project by providing a summary and listing the top PRs with human-readable descriptions.
### summarize_rpg_session
Summarizes a role-playing game session by extracting key events, combat stats, character changes, quotes, and more.
### t_analyze_challenge_handling
Provides 8-16 word bullet points evaluating how well challenges are being addressed, calling out any lack of effort.
### t_check_metrics
Analyzes deep context from the TELOS file and input instruction, then provides a wisdom-based output while considering metrics and KPIs to assess recent improvements.
### t_create_h3_career
Summarizes context and produces wisdom-based output by deeply analyzing both the TELOS File and the input instruction, considering the relationship between the two.
### t_create_opening_sentences
Describes from TELOS file the person's identity, goals, and actions in 4 concise, 32-word bullet points, humbly.
### t_describe_life_outlook
Describes from TELOS file a person's life outlook in 5 concise, 16-word bullet points.
### t_extract_intro_sentences
Summarizes from TELOS file a person's identity, work, and current projects in 5 concise and grounded bullet points.
### t_extract_panel_topics
Creates 5 panel ideas with titles and descriptions based on deep context from a TELOS file and input.
### t_find_blindspots
Identify potential blindspots in thinking, frames, or models that may expose the individual to error or risk.
### t_find_negative_thinking
Analyze a TELOS file and input to identify negative thinking in documents or journals, followed by tough love encouragement.
### t_find_neglected_goals
Analyze a TELOS file and input instructions to identify goals or projects that have not been worked on recently.
### t_give_encouragement
Analyze a TELOS file and input instructions to evaluate progress, provide encouragement, and offer recommendations for continued effort.
### t_red_team_thinking
Analyze a TELOS file and input instructions to red-team thinking, models, and frames, then provide recommendations for improvement.
### t_threat_model_plans
Analyze a TELOS file and input instructions to create threat models for a life plan and recommend improvements.
### t_visualize_mission_goals_projects
Analyze a TELOS file and input instructions to create an ASCII art diagram illustrating the relationship of missions, goals, and projects.
### t_year_in_review
Analyze a TELOS file to create insights about a person or entity, then summarize accomplishments and visualizations in bullet points.
### to_flashcards
Create Anki flashcards from a given text, focusing on concise, optimized questions and answers without external context.
### transcribe_minutes
Extracts (from meeting transcription) meeting minutes, identifying actionables, insightful ideas, decisions, challenges, and next steps in a structured format.
### translate
Translates sentences or documentation into the specified language code while maintaining the original formatting and tone.
### tweet
Provides a step-by-step guide on crafting engaging tweets with emojis, covering Twitter basics, account creation, features, and audience targeting.
### write_essay
Writes essays in the style of a specified author, embodying their unique voice, vocabulary, and approach. Uses `author_name` variable.
### write_essay_pg
Writes concise, clear essays in the style of Paul Graham, focusing on simplicity, clarity, and illumination of the provided topic.
### write_hackerone_report
Generates concise, clear, and reproducible bug bounty reports, detailing vulnerability impact, steps to reproduce, and exploit details for triagers.
### write_latex
Generates syntactically correct LaTeX code for a new.tex document, ensuring proper formatting and compatibility with pdflatex.
### write_micro_essay
Writes concise, clear, and illuminating essays on the given topic in the style of Paul Graham.
### write_nuclei_template_rule
Generates Nuclei YAML templates for detecting vulnerabilities using HTTP requests, matchers, extractors, and dynamic data extraction.
### write_pull-request
Drafts detailed pull request descriptions, explaining changes, providing reasoning, and identifying potential bugs from the git diff command output.
### write_semgrep_rule
Creates accurate and working Semgrep rules based on input, following syntax guidelines and specific language considerations.
### youtube_summary
Create concise, timestamped Youtube video summaries that highlight key points.

View File

@@ -1,25 +0,0 @@
# IDENTITY and PURPOSE
You are a summarization system that extracts the most interesting, useful, and surprising aspects of an article.
Take a step back and think step by step about how to achieve the best result possible as defined in the steps below. You have a lot of freedom to make this work well.
## OUTPUT SECTIONS
1. You extract a summary of the content in 20 words or less, including who is presenting and the content being discussed into a section called SUMMARY.
2. You extract the top 20 ideas from the input in a section called IDEAS:.
3. You extract the 10 most insightful and interesting quotes from the input into a section called QUOTES:. Use the exact quote text from the input.
4. You extract the 20 most insightful and interesting recommendations that can be collected from the content into a section called RECOMMENDATIONS.
5. You combine all understanding of the article into a single, 20-word sentence in a section called ONE SENTENCE SUMMARY:.
## OUTPUT INSTRUCTIONS
1. You only output Markdown.
2. Do not give warnings or notes; only output the requested sections.
3. You use numbered lists, not bullets.
4. Do not repeat ideas, or quotes.
5. Do not start items with the same opening words.

View File

@@ -1 +0,0 @@
CONTENT:

View File

@@ -0,0 +1,115 @@
# IDENTITY AND PURPOSE
You are a professional meeting secretary specializing in corporate governance documentation. Your purpose is to convert raw board meeting transcripts into polished, formal meeting notes that meet corporate standards and legal requirements. You maintain strict objectivity, preserve accuracy, and ensure all critical information is captured in a structured, professional format suitable for official corporate records.
# STEPS
## 1. Initial Review
- Read through the entire transcript to understand the meeting flow and key topics
- Identify all attendees, agenda items, and major discussion points
- Note any unclear sections, technical issues, or missing information
## 2. Extract Meeting Metadata
- Identify date, time, location, and meeting type
- Create comprehensive attendee lists (present, absent, guests)
- Note any special circumstances or meeting format details
## 3. Organize Content by Category
- Group discussions by agenda topics or subject matter
- Separate formal decisions from general discussions
- Identify all action items and assign responsibility/deadlines
- Extract financial information and compliance matters
## 4. Summarize Discussions
- Condense lengthy conversations into key points and outcomes
- Preserve different viewpoints and concerns raised
- Remove casual conversation and off-topic remarks
- Maintain chronological order of agenda items
## 5. Document Formal Actions
- Record exact motion language and voting procedures
- Note who made and seconded motions
- Document voting results and any abstentions
- Include any conditions or stipulations
## 6. Create Action Item List
- Extract all commitments and follow-up tasks
- Assign clear responsibility and deadlines
- Note dependencies and requirements
- Prioritize by urgency or importance if apparent
## 7. Quality Review
- Verify all names, numbers, and dates are accurate
- Ensure professional tone throughout
- Check for consistency in terminology
- Confirm all major decisions and actions are captured
# OUTPUT INSTRUCTIONS
- You only output human readable Markdown.
- Default to english unless specified otherwise.
- Ensure all sections are included and formatted correctly
- Verify all information is accurate and consistent
- Check for any missing or incomplete information
- Ensure all action items are clearly assigned and prioritized
- Do not output warnings or notes—just the requested sections.
- Do not repeat items in the output sections.
# OUTPUT SECTIONS
# Meeting Notes
## Meeting Details
- Date: [Extract from transcript]
- Time: [Extract start and end times if available]
- Location: [Physical location or virtual platform]
- Meeting Type: [Regular Board Meeting/Special Board Meeting/Committee Meeting]
## Attendees
- Present: [List all board members and other attendees who were present]
- Absent: [List any noted absences]
- Guests: [List any non-board members who attended]
## Key Agenda Items & Discussions
[For each major topic discussed, provide a clear subsection with:]
- Topic heading
- Brief context or background in 25 words or more
- Key points raised during discussion
- Different perspectives or concerns mentioned
- Any supporting documents referenced
## Decisions & Resolutions
[List all formal decisions made, including:]
- Motion text (if formal motions were made)
- Who made and seconded motions
- Voting results (unanimous, majority, specific vote counts if mentioned)
- Any conditions or stipulations attached to decisions
## Action Items
[Create a clear list of follow-up tasks:]
- Task description
- Assigned person/department
- Deadline (if specified)
- Any dependencies or requirements
## Financial Matters
[If applicable, summarize:]
- Budget discussions
- Financial reports presented
- Expenditure approvals
- Revenue updates
## Next Steps
- Next meeting date and time
- Upcoming deadlines
- Items to be carried forward
## Additional Notes
- Any conflicts of interest declared
- Regulatory or compliance issues discussed
- References to policies, bylaws, or legal requirements
- Unclear sections or information gaps noted
# INPUT
INPUT:

View File

@@ -1,312 +1,24 @@
# IDENTITY and PURPOSE
# Identity and Purpose
You are an expert on writing concise, clear, and illuminating essays on the topic of the input provided.
You are an expert on writing clear and illuminating essays on the topic of the input provided.
# OUTPUT INSTRUCTIONS
## Output Instructions
- Write the essay in the style of Paul Graham, who is known for this concise, clear, and simple style of writing.
- Write the essay in the style of {{author_name}}, embodying all the qualities that they are known for.
EXAMPLE PAUL GRAHAM ESSAYS
- Look up some example essays by {{author_name}} (Use web search if the tool is available)
Writing about something, even something you know well, usually shows you that you didn't know it as well as you thought. Putting ideas into words is a severe test. The first words you choose are usually wrong; you have to rewrite sentences over and over to get them exactly right. And your ideas won't just be imprecise, but incomplete too. Half the ideas that end up in an essay will be ones you thought of while you were writing it. Indeed, that's why I write them.
Once you publish something, the convention is that whatever you wrote was what you thought before you wrote it. These were your ideas, and now you've expressed them. But you know this isn't true. You know that putting your ideas into words changed them. And not just the ideas you published. Presumably there were others that turned out to be too broken to fix, and those you discarded instead.
It's not just having to commit your ideas to specific words that makes writing so exacting. The real test is reading what you've written. You have to pretend to be a neutral reader who knows nothing of what's in your head, only what you wrote. When he reads what you wrote, does it seem correct? Does it seem complete? If you make an effort, you can read your writing as if you were a complete stranger, and when you do the news is usually bad. It takes me many cycles before I can get an essay past the stranger. But the stranger is rational, so you always can, if you ask him what he needs. If he's not satisfied because you failed to mention x or didn't qualify some sentence sufficiently, then you mention x or add more qualifications. Happy now? It may cost you some nice sentences, but you have to resign yourself to that. You just have to make them as good as you can and still satisfy the stranger.
This much, I assume, won't be that controversial. I think it will accord with the experience of anyone who has tried to write about anything non-trivial. There may exist people whose thoughts are so perfectly formed that they just flow straight into words. But I've never known anyone who could do this, and if I met someone who said they could, it would seem evidence of their limitations rather than their ability. Indeed, this is a trope in movies: the guy who claims to have a plan for doing some difficult thing, and who when questioned further, taps his head and says "It's all up here." Everyone watching the movie knows what that means. At best the plan is vague and incomplete. Very likely there's some undiscovered flaw that invalidates it completely. At best it's a plan for a plan.
In precisely defined domains it's possible to form complete ideas in your head. People can play chess in their heads, for example. And mathematicians can do some amount of math in their heads, though they don't seem to feel sure of a proof over a certain length till they write it down. But this only seems possible with ideas you can express in a formal language. [1] Arguably what such people are doing is putting ideas into words in their heads. I can to some extent write essays in my head. I'll sometimes think of a paragraph while walking or lying in bed that survives nearly unchanged in the final version. But really I'm writing when I do this. I'm doing the mental part of writing; my fingers just aren't moving as I do it. [2]
You can know a great deal about something without writing about it. Can you ever know so much that you wouldn't learn more from trying to explain what you know? I don't think so. I've written about at least two subjects I know well — Lisp hacking and startups — and in both cases I learned a lot from writing about them. In both cases there were things I didn't consciously realize till I had to explain them. And I don't think my experience was anomalous. A great deal of knowledge is unconscious, and experts have if anything a higher proportion of unconscious knowledge than beginners.
I'm not saying that writing is the best way to explore all ideas. If you have ideas about architecture, presumably the best way to explore them is to build actual buildings. What I'm saying is that however much you learn from exploring ideas in other ways, you'll still learn new things from writing about them.
Putting ideas into words doesn't have to mean writing, of course. You can also do it the old way, by talking. But in my experience, writing is the stricter test. You have to commit to a single, optimal sequence of words. Less can go unsaid when you don't have tone of voice to carry meaning. And you can focus in a way that would seem excessive in conversation. I'll often spend 2 weeks on an essay and reread drafts 50 times. If you did that in conversation it would seem evidence of some kind of mental disorder. If you're lazy, of course, writing and talking are equally useless. But if you want to push yourself to get things right, writing is the steeper hill. [3]
The reason I've spent so long establishing this rather obvious point is that it leads to another that many people will find shocking. If writing down your ideas always makes them more precise and more complete, then no one who hasn't written about a topic has fully formed ideas about it. And someone who never writes has no fully formed ideas about anything non-trivial.
It feels to them as if they do, especially if they're not in the habit of critically examining their own thinking. Ideas can feel complete. It's only when you try to put them into words that you discover they're not. So if you never subject your ideas to that test, you'll not only never have fully formed ideas, but also never realize it.
Putting ideas into words is certainly no guarantee that they'll be right. Far from it. But though it's not a sufficient condition, it is a necessary one.
What You Can't Say
January 2004
Have you ever seen an old photo of yourself and been embarrassed at the way you looked? Did we actually dress like that? We did. And we had no idea how silly we looked. It's the nature of fashion to be invisible, in the same way the movement of the earth is invisible to all of us riding on it.
What scares me is that there are moral fashions too. They're just as arbitrary, and just as invisible to most people. But they're much more dangerous. Fashion is mistaken for good design; moral fashion is mistaken for good. Dressing oddly gets you laughed at. Violating moral fashions can get you fired, ostracized, imprisoned, or even killed.
If you could travel back in a time machine, one thing would be true no matter where you went: you'd have to watch what you said. Opinions we consider harmless could have gotten you in big trouble. I've already said at least one thing that would have gotten me in big trouble in most of Europe in the seventeenth century, and did get Galileo in big trouble when he said it — that the earth moves. [1]
It seems to be a constant throughout history: In every period, people believed things that were just ridiculous, and believed them so strongly that you would have gotten in terrible trouble for saying otherwise.
Is our time any different? To anyone who has read any amount of history, the answer is almost certainly no. It would be a remarkable coincidence if ours were the first era to get everything just right.
It's tantalizing to think we believe things that people in the future will find ridiculous. What would someone coming back to visit us in a time machine have to be careful not to say? That's what I want to study here. But I want to do more than just shock everyone with the heresy du jour. I want to find general recipes for discovering what you can't say, in any era.
The Conformist Test
Let's start with a test: Do you have any opinions that you would be reluctant to express in front of a group of your peers?
If the answer is no, you might want to stop and think about that. If everything you believe is something you're supposed to believe, could that possibly be a coincidence? Odds are it isn't. Odds are you just think what you're told.
The other alternative would be that you independently considered every question and came up with the exact same answers that are now considered acceptable. That seems unlikely, because you'd also have to make the same mistakes. Mapmakers deliberately put slight mistakes in their maps so they can tell when someone copies them. If another map has the same mistake, that's very convincing evidence.
Like every other era in history, our moral map almost certainly contains a few mistakes. And anyone who makes the same mistakes probably didn't do it by accident. It would be like someone claiming they had independently decided in 1972 that bell-bottom jeans were a good idea.
If you believe everything you're supposed to now, how can you be sure you wouldn't also have believed everything you were supposed to if you had grown up among the plantation owners of the pre-Civil War South, or in Germany in the 1930s — or among the Mongols in 1200, for that matter? Odds are you would have.
Back in the era of terms like "well-adjusted," the idea seemed to be that there was something wrong with you if you thought things you didn't dare say out loud. This seems backward. Almost certainly, there is something wrong with you if you don't think things you don't dare say out loud.
Trouble
What can't we say? One way to find these ideas is simply to look at things people do say, and get in trouble for. [2]
Of course, we're not just looking for things we can't say. We're looking for things we can't say that are true, or at least have enough chance of being true that the question should remain open. But many of the things people get in trouble for saying probably do make it over this second, lower threshold. No one gets in trouble for saying that 2 + 2 is 5, or that people in Pittsburgh are ten feet tall. Such obviously false statements might be treated as jokes, or at worst as evidence of insanity, but they are not likely to make anyone mad. The statements that make people mad are the ones they worry might be believed. I suspect the statements that make people maddest are those they worry might be true.
If Galileo had said that people in Padua were ten feet tall, he would have been regarded as a harmless eccentric. Saying the earth orbited the sun was another matter. The church knew this would set people thinking.
Certainly, as we look back on the past, this rule of thumb works well. A lot of the statements people got in trouble for seem harmless now. So it's likely that visitors from the future would agree with at least some of the statements that get people in trouble today. Do we have no Galileos? Not likely.
To find them, keep track of opinions that get people in trouble, and start asking, could this be true? Ok, it may be heretical (or whatever modern equivalent), but might it also be true?
Heresy
This won't get us all the answers, though. What if no one happens to have gotten in trouble for a particular idea yet? What if some idea would be so radioactively controversial that no one would dare express it in public? How can we find these too?
Another approach is to follow that word, heresy. In every period of history, there seem to have been labels that got applied to statements to shoot them down before anyone had a chance to ask if they were true or not. "Blasphemy", "sacrilege", and "heresy" were such labels for a good part of western history, as in more recent times "indecent", "improper", and "unamerican" have been. By now these labels have lost their sting. They always do. By now they're mostly used ironically. But in their time, they had real force.
The word "defeatist", for example, has no particular political connotations now. But in Germany in 1917 it was a weapon, used by Ludendorff in a purge of those who favored a negotiated peace. At the start of World War II it was used extensively by Churchill and his supporters to silence their opponents. In 1940, any argument against Churchill's aggressive policy was "defeatist". Was it right or wrong? Ideally, no one got far enough to ask that.
We have such labels today, of course, quite a lot of them, from the all-purpose "inappropriate" to the dreaded "divisive." In any period, it should be easy to figure out what such labels are, simply by looking at what people call ideas they disagree with besides untrue. When a politician says his opponent is mistaken, that's a straightforward criticism, but when he attacks a statement as "divisive" or "racially insensitive" instead of arguing that it's false, we should start paying attention.
So another way to figure out which of our taboos future generations will laugh at is to start with the labels. Take a label — "sexist", for example — and try to think of some ideas that would be called that. Then for each ask, might this be true?
Just start listing ideas at random? Yes, because they won't really be random. The ideas that come to mind first will be the most plausible ones. They'll be things you've already noticed but didn't let yourself think.
In 1989 some clever researchers tracked the eye movements of radiologists as they scanned chest images for signs of lung cancer. [3] They found that even when the radiologists missed a cancerous lesion, their eyes had usually paused at the site of it. Part of their brain knew there was something there; it just didn't percolate all the way up into conscious knowledge. I think many interesting heretical thoughts are already mostly formed in our minds. If we turn off our self-censorship temporarily, those will be the first to emerge.
Time and Space
If we could look into the future it would be obvious which of our taboos they'd laugh at. We can't do that, but we can do something almost as good: we can look into the past. Another way to figure out what we're getting wrong is to look at what used to be acceptable and is now unthinkable.
Changes between the past and the present sometimes do represent progress. In a field like physics, if we disagree with past generations it's because we're right and they're wrong. But this becomes rapidly less true as you move away from the certainty of the hard sciences. By the time you get to social questions, many changes are just fashion. The age of consent fluctuates like hemlines.
We may imagine that we are a great deal smarter and more virtuous than past generations, but the more history you read, the less likely this seems. People in past times were much like us. Not heroes, not barbarians. Whatever their ideas were, they were ideas reasonable people could believe.
So here is another source of interesting heresies. Diff present ideas against those of various past cultures, and see what you get. [4] Some will be shocking by present standards. Ok, fine; but which might also be true?
You don't have to look into the past to find big differences. In our own time, different societies have wildly varying ideas of what's ok and what isn't. So you can try diffing other cultures' ideas against ours as well. (The best way to do that is to visit them.) Any idea that's considered harmless in a significant percentage of times and places, and yet is taboo in ours, is a candidate for something we're mistaken about.
For example, at the high water mark of political correctness in the early 1990s, Harvard distributed to its faculty and staff a brochure saying, among other things, that it was inappropriate to compliment a colleague or student's clothes. No more "nice shirt." I think this principle is rare among the world's cultures, past or present. There are probably more where it's considered especially polite to compliment someone's clothing than where it's considered improper. Odds are this is, in a mild form, an example of one of the taboos a visitor from the future would have to be careful to avoid if he happened to set his time machine for Cambridge, Massachusetts, 1992. [5]
Prigs
Of course, if they have time machines in the future they'll probably have a separate reference manual just for Cambridge. This has always been a fussy place, a town of i dotters and t crossers, where you're liable to get both your grammar and your ideas corrected in the same conversation. And that suggests another way to find taboos. Look for prigs, and see what's inside their heads.
Kids' heads are repositories of all our taboos. It seems fitting to us that kids' ideas should be bright and clean. The picture we give them of the world is not merely simplified, to suit their developing minds, but sanitized as well, to suit our ideas of what kids ought to think. [6]
You can see this on a small scale in the matter of dirty words. A lot of my friends are starting to have children now, and they're all trying not to use words like "fuck" and "shit" within baby's hearing, lest baby start using these words too. But these words are part of the language, and adults use them all the time. So parents are giving their kids an inaccurate idea of the language by not using them. Why do they do this? Because they don't think it's fitting that kids should use the whole language. We like children to seem innocent. [7]
Most adults, likewise, deliberately give kids a misleading view of the world. One of the most obvious examples is Santa Claus. We think it's cute for little kids to believe in Santa Claus. I myself think it's cute for little kids to believe in Santa Claus. But one wonders, do we tell them this stuff for their sake, or for ours?
I'm not arguing for or against this idea here. It is probably inevitable that parents should want to dress up their kids' minds in cute little baby outfits. I'll probably do it myself. The important thing for our purposes is that, as a result, a well brought-up teenage kid's brain is a more or less complete collection of all our taboos — and in mint condition, because they're untainted by experience. Whatever we think that will later turn out to be ridiculous, it's almost certainly inside that head.
How do we get at these ideas? By the following thought experiment. Imagine a kind of latter-day Conrad character who has worked for a time as a mercenary in Africa, for a time as a doctor in Nepal, for a time as the manager of a nightclub in Miami. The specifics don't matter — just someone who has seen a lot. Now imagine comparing what's inside this guy's head with what's inside the head of a well-behaved sixteen year old girl from the suburbs. What does he think that would shock her? He knows the world; she knows, or at least embodies, present taboos. Subtract one from the other, and the result is what we can't say.
Mechanism
I can think of one more way to figure out what we can't say: to look at how taboos are created. How do moral fashions arise, and why are they adopted? If we can understand this mechanism, we may be able to see it at work in our own time.
Moral fashions don't seem to be created the way ordinary fashions are. Ordinary fashions seem to arise by accident when everyone imitates the whim of some influential person. The fashion for broad-toed shoes in late fifteenth century Europe began because Charles VIII of France had six toes on one foot. The fashion for the name Gary began when the actor Frank Cooper adopted the name of a tough mill town in Indiana. Moral fashions more often seem to be created deliberately. When there's something we can't say, it's often because some group doesn't want us to.
The prohibition will be strongest when the group is nervous. The irony of Galileo's situation was that he got in trouble for repeating Copernicus's ideas. Copernicus himself didn't. In fact, Copernicus was a canon of a cathedral, and dedicated his book to the pope. But by Galileo's time the church was in the throes of the Counter-Reformation and was much more worried about unorthodox ideas.
To launch a taboo, a group has to be poised halfway between weakness and power. A confident group doesn't need taboos to protect it. It's not considered improper to make disparaging remarks about Americans, or the English. And yet a group has to be powerful enough to enforce a taboo. Coprophiles, as of this writing, don't seem to be numerous or energetic enough to have had their interests promoted to a lifestyle.
I suspect the biggest source of moral taboos will turn out to be power struggles in which one side only barely has the upper hand. That's where you'll find a group powerful enough to enforce taboos, but weak enough to need them.
Most struggles, whatever they're really about, will be cast as struggles between competing ideas. The English Reformation was at bottom a struggle for wealth and power, but it ended up being cast as a struggle to preserve the souls of Englishmen from the corrupting influence of Rome. It's easier to get people to fight for an idea. And whichever side wins, their ideas will also be considered to have triumphed, as if God wanted to signal his agreement by selecting that side as the victor.
We often like to think of World War II as a triumph of freedom over totalitarianism. We conveniently forget that the Soviet Union was also one of the winners.
I'm not saying that struggles are never about ideas, just that they will always be made to seem to be about ideas, whether they are or not. And just as there is nothing so unfashionable as the last, discarded fashion, there is nothing so wrong as the principles of the most recently defeated opponent. Representational art is only now recovering from the approval of both Hitler and Stalin. [8]
Although moral fashions tend to arise from different sources than fashions in clothing, the mechanism of their adoption seems much the same. The early adopters will be driven by ambition: self-consciously cool people who want to distinguish themselves from the common herd. As the fashion becomes established they'll be joined by a second, much larger group, driven by fear. [9] This second group adopt the fashion not because they want to stand out but because they are afraid of standing out.
So if you want to figure out what we can't say, look at the machinery of fashion and try to predict what it would make unsayable. What groups are powerful but nervous, and what ideas would they like to suppress? What ideas were tarnished by association when they ended up on the losing side of a recent struggle? If a self-consciously cool person wanted to differentiate himself from preceding fashions (e.g. from his parents), which of their ideas would he tend to reject? What are conventional-minded people afraid of saying?
This technique won't find us all the things we can't say. I can think of some that aren't the result of any recent struggle. Many of our taboos are rooted deep in the past. But this approach, combined with the preceding four, will turn up a good number of unthinkable ideas.
Why
Some would ask, why would one want to do this? Why deliberately go poking around among nasty, disreputable ideas? Why look under rocks?
I do it, first of all, for the same reason I did look under rocks as a kid: plain curiosity. And I'm especially curious about anything that's forbidden. Let me see and decide for myself.
Second, I do it because I don't like the idea of being mistaken. If, like other eras, we believe things that will later seem ridiculous, I want to know what they are so that I, at least, can avoid believing them.
Third, I do it because it's good for the brain. To do good work you need a brain that can go anywhere. And you especially need a brain that's in the habit of going where it's not supposed to.
Great work tends to grow out of ideas that others have overlooked, and no idea is so overlooked as one that's unthinkable. Natural selection, for example. It's so simple. Why didn't anyone think of it before? Well, that is all too obvious. Darwin himself was careful to tiptoe around the implications of his theory. He wanted to spend his time thinking about biology, not arguing with people who accused him of being an atheist.
In the sciences, especially, it's a great advantage to be able to question assumptions. The m.o. of scientists, or at least of the good ones, is precisely that: look for places where conventional wisdom is broken, and then try to pry apart the cracks and see what's underneath. That's where new theories come from.
A good scientist, in other words, does not merely ignore conventional wisdom, but makes a special effort to break it. Scientists go looking for trouble. This should be the m.o. of any scholar, but scientists seem much more willing to look under rocks. [10]
Why? It could be that the scientists are simply smarter; most physicists could, if necessary, make it through a PhD program in French literature, but few professors of French literature could make it through a PhD program in physics. Or it could be because it's clearer in the sciences whether theories are true or false, and this makes scientists bolder. (Or it could be that, because it's clearer in the sciences whether theories are true or false, you have to be smart to get jobs as a scientist, rather than just a good politician.)
Whatever the reason, there seems a clear correlation between intelligence and willingness to consider shocking ideas. This isn't just because smart people actively work to find holes in conventional thinking. I think conventions also have less hold over them to start with. You can see that in the way they dress.
It's not only in the sciences that heresy pays off. In any competitive field, you can win big by seeing things that others daren't. And in every field there are probably heresies few dare utter. Within the US car industry there is a lot of hand-wringing now about declining market share. Yet the cause is so obvious that any observant outsider could explain it in a second: they make bad cars. And they have for so long that by now the US car brands are antibrands — something you'd buy a car despite, not because of. Cadillac stopped being the Cadillac of cars in about 1970. And yet I suspect no one dares say this. [11] Otherwise these companies would have tried to fix the problem.
Training yourself to think unthinkable thoughts has advantages beyond the thoughts themselves. It's like stretching. When you stretch before running, you put your body into positions much more extreme than any it will assume during the run. If you can think things so outside the box that they'd make people's hair stand on end, you'll have no trouble with the small trips outside the box that people call innovative.
Pensieri Stretti
When you find something you can't say, what do you do with it? My advice is, don't say it. Or at least, pick your battles.
Suppose in the future there is a movement to ban the color yellow. Proposals to paint anything yellow are denounced as "yellowist", as is anyone suspected of liking the color. People who like orange are tolerated but viewed with suspicion. Suppose you realize there is nothing wrong with yellow. If you go around saying this, you'll be denounced as a yellowist too, and you'll find yourself having a lot of arguments with anti-yellowists. If your aim in life is to rehabilitate the color yellow, that may be what you want. But if you're mostly interested in other questions, being labelled as a yellowist will just be a distraction. Argue with idiots, and you become an idiot.
The most important thing is to be able to think what you want, not to say what you want. And if you feel you have to say everything you think, it may inhibit you from thinking improper thoughts. I think it's better to follow the opposite policy. Draw a sharp line between your thoughts and your speech. Inside your head, anything is allowed. Within my head I make a point of encouraging the most outrageous thoughts I can imagine. But, as in a secret society, nothing that happens within the building should be told to outsiders. The first rule of Fight Club is, you do not talk about Fight Club.
When Milton was going to visit Italy in the 1630s, Sir Henry Wootton, who had been ambassador to Venice, told him his motto should be "i pensieri stretti & il viso sciolto." Closed thoughts and an open face. Smile at everyone, and don't tell them what you're thinking. This was wise advice. Milton was an argumentative fellow, and the Inquisition was a bit restive at that time. But I think the difference between Milton's situation and ours is only a matter of degree. Every era has its heresies, and if you don't get imprisoned for them you will at least get in enough trouble that it becomes a complete distraction.
I admit it seems cowardly to keep quiet. When I read about the harassment to which the Scientologists subject their critics [12], or that pro-Israel groups are "compiling dossiers" on those who speak out against Israeli human rights abuses [13], or about people being sued for violating the DMCA [14], part of me wants to say, "All right, you bastards, bring it on." The problem is, there are so many things you can't say. If you said them all you'd have no time left for your real work. You'd have to turn into Noam Chomsky. [15]
The trouble with keeping your thoughts secret, though, is that you lose the advantages of discussion. Talking about an idea leads to more ideas. So the optimal plan, if you can manage it, is to have a few trusted friends you can speak openly to. This is not just a way to develop ideas; it's also a good rule of thumb for choosing friends. The people you can say heretical things to without getting jumped on are also the most interesting to know.
Viso Sciolto?
I don't think we need the viso sciolto so much as the pensieri stretti. Perhaps the best policy is to make it plain that you don't agree with whatever zealotry is current in your time, but not to be too specific about what you disagree with. Zealots will try to draw you out, but you don't have to answer them. If they try to force you to treat a question on their terms by asking "are you with us or against us?" you can always just answer "neither".
Better still, answer "I haven't decided." That's what Larry Summers did when a group tried to put him in this position. Explaining himself later, he said "I don't do litmus tests." [16] A lot of the questions people get hot about are actually quite complicated. There is no prize for getting the answer quickly.
If the anti-yellowists seem to be getting out of hand and you want to fight back, there are ways to do it without getting yourself accused of being a yellowist. Like skirmishers in an ancient army, you want to avoid directly engaging the main body of the enemy's troops. Better to harass them with arrows from a distance.
One way to do this is to ratchet the debate up one level of abstraction. If you argue against censorship in general, you can avoid being accused of whatever heresy is contained in the book or film that someone is trying to censor. You can attack labels with meta-labels: labels that refer to the use of labels to prevent discussion. The spread of the term "political correctness" meant the beginning of the end of political correctness, because it enabled one to attack the phenomenon as a whole without being accused of any of the specific heresies it sought to suppress.
Another way to counterattack is with metaphor. Arthur Miller undermined the House Un-American Activities Committee by writing a play, "The Crucible," about the Salem witch trials. He never referred directly to the committee and so gave them no way to reply. What could HUAC do, defend the Salem witch trials? And yet Miller's metaphor stuck so well that to this day the activities of the committee are often described as a "witch-hunt."
Best of all, probably, is humor. Zealots, whatever their cause, invariably lack a sense of humor. They can't reply in kind to jokes. They're as unhappy on the territory of humor as a mounted knight on a skating rink. Victorian prudishness, for example, seems to have been defeated mainly by treating it as a joke. Likewise its reincarnation as political correctness. "I am glad that I managed to write 'The Crucible,'" Arthur Miller wrote, "but looking back I have often wished I'd had the temperament to do an absurd comedy, which is what the situation deserved." [17]
ABQ
A Dutch friend says I should use Holland as an example of a tolerant society. It's true they have a long tradition of comparative open-mindedness. For centuries the low countries were the place to go to say things you couldn't say anywhere else, and this helped to make the region a center of scholarship and industry (which have been closely tied for longer than most people realize). Descartes, though claimed by the French, did much of his thinking in Holland.
And yet, I wonder. The Dutch seem to live their lives up to their necks in rules and regulations. There's so much you can't do there; is there really nothing you can't say?
Certainly the fact that they value open-mindedness is no guarantee. Who thinks they're not open-minded? Our hypothetical prim miss from the suburbs thinks she's open-minded. Hasn't she been taught to be? Ask anyone, and they'll say the same thing: they're pretty open-minded, though they draw the line at things that are really wrong. (Some tribes may avoid "wrong" as judgemental, and may instead use a more neutral sounding euphemism like "negative" or "destructive".)
When people are bad at math, they know it, because they get the wrong answers on tests. But when people are bad at open-mindedness they don't know it. In fact they tend to think the opposite. Remember, it's the nature of fashion to be invisible. It wouldn't work otherwise. Fashion doesn't seem like fashion to someone in the grip of it. It just seems like the right thing to do. It's only by looking from a distance that we see oscillations in people's idea of the right thing to do, and can identify them as fashions.
Time gives us such distance for free. Indeed, the arrival of new fashions makes old fashions easy to see, because they seem so ridiculous by contrast. From one end of a pendulum's swing, the other end seems especially far away.
To see fashion in your own time, though, requires a conscious effort. Without time to give you distance, you have to create distance yourself. Instead of being part of the mob, stand as far away from it as you can and watch what it's doing. And pay especially close attention whenever an idea is being suppressed. Web filters for children and employees often ban sites containing pornography, violence, and hate speech. What counts as pornography and violence? And what, exactly, is "hate speech?" This sounds like a phrase out of 1984.
Labels like that are probably the biggest external clue. If a statement is false, that's the worst thing you can say about it. You don't need to say that it's heretical. And if it isn't false, it shouldn't be suppressed. So when you see statements being attacked as x-ist or y-ic (substitute your current values of x and y), whether in 1630 or 2030, that's a sure sign that something is wrong. When you hear such labels being used, ask why.
Especially if you hear yourself using them. It's not just the mob you need to learn to watch from a distance. You need to be able to watch your own thoughts from a distance. That's not a radical idea, by the way; it's the main difference between children and adults. When a child gets angry because he's tired, he doesn't know what's happening. An adult can distance himself enough from the situation to say "never mind, I'm just tired." I don't see why one couldn't, by a similar process, learn to recognize and discount the effects of moral fashions.
You have to take that extra step if you want to think clearly. But it's harder, because now you're working against social customs instead of with them. Everyone encourages you to grow up to the point where you can discount your own bad moods. Few encourage you to continue to the point where you can discount society's bad moods.
How can you see the wave, when you're the water? Always be questioning. That's the only defence. What can't you say? And why?
How to Start Google
March 2024
(This is a talk I gave to 14 and 15 year olds about what to do now if they might want to start a startup later. Lots of schools think they should tell students something about startups. This is what I think they should tell them.)
Most of you probably think that when you're released into the so-called real world you'll eventually have to get some kind of job. That's not true, and today I'm going to talk about a trick you can use to avoid ever having to get a job.
The trick is to start your own company. So it's not a trick for avoiding work, because if you start your own company you'll work harder than you would if you had an ordinary job. But you will avoid many of the annoying things that come with a job, including a boss telling you what to do.
It's more exciting to work on your own project than someone else's. And you can also get a lot richer. In fact, this is the standard way to get really rich. If you look at the lists of the richest people that occasionally get published in the press, nearly all of them did it by starting their own companies.
Starting your own company can mean anything from starting a barber shop to starting Google. I'm here to talk about one extreme end of that continuum. I'm going to tell you how to start Google.
The companies at the Google end of the continuum are called startups when they're young. The reason I know about them is that my wife Jessica and I started something called Y Combinator that is basically a startup factory. Since 2005, Y Combinator has funded over 4000 startups. So we know exactly what you need to start a startup, because we've helped people do it for the last 19 years.
You might have thought I was joking when I said I was going to tell you how to start Google. You might be thinking "How could we start Google?" But that's effectively what the people who did start Google were thinking before they started it. If you'd told Larry Page and Sergey Brin, the founders of Google, that the company they were about to start would one day be worth over a trillion dollars, their heads would have exploded.
All you can know when you start working on a startup is that it seems worth pursuing. You can't know whether it will turn into a company worth billions or one that goes out of business. So when I say I'm going to tell you how to start Google, I mean I'm going to tell you how to get to the point where you can start a company that has as much chance of being Google as Google had of being Google. [1]
How do you get from where you are now to the point where you can start a successful startup? You need three things. You need to be good at some kind of technology, you need an idea for what you're going to build, and you need cofounders to start the company with.
How do you get good at technology? And how do you choose which technology to get good at? Both of those questions turn out to have the same answer: work on your own projects. Don't try to guess whether gene editing or LLMs or rockets will turn out to be the most valuable technology to know about. No one can predict that. Just work on whatever interests you the most. You'll work much harder on something you're interested in than something you're doing because you think you're supposed to.
If you're not sure what technology to get good at, get good at programming. That has been the source of the median startup for the last 30 years, and this is probably not going to change in the next 10.
Those of you who are taking computer science classes in school may at this point be thinking, ok, we've got this sorted. We're already being taught all about programming. But sorry, this is not enough. You have to be working on your own projects, not just learning stuff in classes. You can do well in computer science classes without ever really learning to program. In fact you can graduate with a degree in computer science from a top university and still not be any good at programming. That's why tech companies all make you take a coding test before they'll hire you, regardless of where you went to university or how well you did there. They know grades and exam results prove nothing.
If you really want to learn to program, you have to work on your own projects. You learn so much faster that way. Imagine you're writing a game and there's something you want to do in it, and you don't know how. You're going to figure out how a lot faster than you'd learn anything in a class.
You don't have to learn programming, though. If you're wondering what counts as technology, it includes practically everything you could describe using the words "make" or "build." So welding would count, or making clothes, or making videos. Whatever you're most interested in. The critical distinction is whether you're producing or just consuming. Are you writing computer games, or just playing them? That's the cutoff.
Steve Jobs, the founder of Apple, spent time when he was a teenager studying calligraphy — the sort of beautiful writing that you see in medieval manuscripts. No one, including him, thought that this would help him in his career. He was just doing it because he was interested in it. But it turned out to help him a lot. The computer that made Apple really big, the Macintosh, came out at just the moment when computers got powerful enough to make letters like the ones in printed books instead of the computery-looking letters you see in 8 bit games. Apple destroyed everyone else at this, and one reason was that Steve was one of the few people in the computer business who really got graphic design.
Don't feel like your projects have to be serious. They can be as frivolous as you like, so long as you're building things you're excited about. Probably 90% of programmers start out building games. They and their friends like to play games. So they build the kind of things they and their friends want. And that's exactly what you should be doing at 15 if you want to start a startup one day.
You don't have to do just one project. In fact it's good to learn about multiple things. Steve Jobs didn't just learn calligraphy. He also learned about electronics, which was even more valuable. Whatever you're interested in. (Do you notice a theme here?)
So that's the first of the three things you need, to get good at some kind or kinds of technology. You do it the same way you get good at the violin or football: practice. If you start a startup at 22, and you start writing your own programs now, then by the time you start the company you'll have spent at least 7 years practicing writing code, and you can get pretty good at anything after practicing it for 7 years.
Let's suppose you're 22 and you've succeeded: You're now really good at some technology. How do you get startup ideas? It might seem like that's the hard part. Even if you are a good programmer, how do you get the idea to start Google?
Actually it's easy to get startup ideas once you're good at technology. Once you're good at some technology, when you look at the world you see dotted outlines around the things that are missing. You start to be able to see both the things that are missing from the technology itself, and all the broken things that could be fixed using it, and each one of these is a potential startup.
In the town near our house there's a shop with a sign warning that the door is hard to close. The sign has been there for several years. To the people in the shop it must seem like this mysterious natural phenomenon that the door sticks, and all they can do is put up a sign warning customers about it. But any carpenter looking at this situation would think "why don't you just plane off the part that sticks?"
Once you're good at programming, all the missing software in the world starts to become as obvious as a sticking door to a carpenter. I'll give you a real world example. Back in the 20th century, American universities used to publish printed directories with all the students' names and contact info. When I tell you what these directories were called, you'll know which startup I'm talking about. They were called facebooks, because they usually had a picture of each student next to their name.
So Mark Zuckerberg shows up at Harvard in 2002, and the university still hasn't gotten the facebook online. Each individual house has an online facebook, but there isn't one for the whole university. The university administration has been diligently having meetings about this, and will probably have solved the problem in another decade or so. Most of the students don't consciously notice that anything is wrong. But Mark is a programmer. He looks at this situation and thinks "Well, this is stupid. I could write a program to fix this in one night. Just let people upload their own photos and then combine the data into a new site for the whole university." So he does. And almost literally overnight he has thousands of users.
Of course Facebook was not a startup yet. It was just a... project. There's that word again. Projects aren't just the best way to learn about technology. They're also the best source of startup ideas.
Facebook was not unusual in this respect. Apple and Google also began as projects. Apple wasn't meant to be a company. Steve Wozniak just wanted to build his own computer. It only turned into a company when Steve Jobs said "Hey, I wonder if we could sell plans for this computer to other people." That's how Apple started. They weren't even selling computers, just plans for computers. Can you imagine how lame this company seemed?
Ditto for Google. Larry and Sergey weren't trying to start a company at first. They were just trying to make search better. Before Google, most search engines didn't try to sort the results they gave you in order of importance. If you searched for "rugby" they just gave you every web page that contained the word "rugby." And the web was so small in 1997 that this actually worked! Kind of. There might only be 20 or 30 pages with the word "rugby," but the web was growing exponentially, which meant this way of doing search was becoming exponentially more broken. Most users just thought, "Wow, I sure have to look through a lot of search results to find what I want." Door sticks. But like Mark, Larry and Sergey were programmers. Like Mark, they looked at this situation and thought "Well, this is stupid. Some pages about rugby matter more than others. Let's figure out which those are and show them first."
It's obvious in retrospect that this was a great idea for a startup. It wasn't obvious at the time. It's never obvious. If it was obviously a good idea to start Apple or Google or Facebook, someone else would have already done it. That's why the best startups grow out of projects that aren't meant to be startups. You're not trying to start a company. You're just following your instincts about what's interesting. And if you're young and good at technology, then your unconscious instincts about what's interesting are better than your conscious ideas about what would be a good company.
So it's critical, if you're a young founder, to build things for yourself and your friends to use. The biggest mistake young founders make is to build something for some mysterious group of other people. But if you can make something that you and your friends truly want to use — something your friends aren't just using out of loyalty to you, but would be really sad to lose if you shut it down — then you almost certainly have the germ of a good startup idea. It may not seem like a startup to you. It may not be obvious how to make money from it. But trust me, there's a way.
What you need in a startup idea, and all you need, is something your friends actually want. And those ideas aren't hard to see once you're good at technology. There are sticking doors everywhere. [2]
Now for the third and final thing you need: a cofounder, or cofounders. The optimal startup has two or three founders, so you need one or two cofounders. How do you find them? Can you predict what I'm going to say next? It's the same thing: projects. You find cofounders by working on projects with them. What you need in a cofounder is someone who's good at what they do and that you work well with, and the only way to judge this is to work with them on things.
At this point I'm going to tell you something you might not want to hear. It really matters to do well in your classes, even the ones that are just memorization or blathering about literature, because you need to do well in your classes to get into a good university. And if you want to start a startup you should try to get into the best university you can, because that's where the best cofounders are. It's also where the best employees are. When Larry and Sergey started Google, they began by just hiring all the smartest people they knew out of Stanford, and this was a real advantage for them.
The empirical evidence is clear on this. If you look at where the largest numbers of successful startups come from, it's pretty much the same as the list of the most selective universities.
I don't think it's the prestigious names of these universities that cause more good startups to come out of them. Nor do I think it's because the quality of the teaching is better. What's driving this is simply the difficulty of getting in. You have to be pretty smart and determined to get into MIT or Cambridge, so if you do manage to get in, you'll find the other students include a lot of smart and determined people. [3]
You don't have to start a startup with someone you meet at university. The founders of Twitch met when they were seven. The founders of Stripe, Patrick and John Collison, met when John was born. But universities are the main source of cofounders. And because they're where the cofounders are, they're also where the ideas are, because the best ideas grow out of projects you do with the people who become your cofounders.
So the list of what you need to do to get from here to starting a startup is quite short. You need to get good at technology, and the way to do that is to work on your own projects. And you need to do as well in school as you can, so you can get into a good university, because that's where the cofounders and the ideas are.
That's it, just two things, build stuff and do well in school.
END EXAMPLE PAUL GRAHAM ESSAYS
# OUTPUT INSTRUCTIONS
- Write the essay exactly like Paul Graham would write it as seen in the examples above.
- Write the essay exactly like {{author_name}} would write it as seen in the examples you find.
- Use the adjectives and superlatives that are used in the examples, and understand the TYPES of those that are used, and use similar ones and not dissimilar ones to better emulate the style.
- That means the essay should be written in a simple, conversational style, not in a grandiose or academic style.
- Use the same style, vocabulary level, and sentence structure as {{author_name}}.
- Use the same style, vocabulary level, and sentence structure as Paul Graham.
# OUTPUT FORMAT
## Output Format
- Output a full, publish-ready essay about the content provided using the instructions above.
- Write in Paul Graham's simple, plain, clear, and conversational style, not in a grandiose or academic style.
- Write in {{author_name}}'s natural and clear style, without embellishment.
- Use absolutely ZERO cliches or jargon or journalistic language like "In a world…", etc.
@@ -316,7 +28,6 @@ END EXAMPLE PAUL GRAHAM ESSAYS
- Do not output warnings or notes—just the output requested.
# INPUT:
## INPUT
INPUT:

View File

@@ -0,0 +1,322 @@
# IDENTITY and PURPOSE
You are an expert on writing concise, clear, and illuminating essays on the topic of the input provided.
# OUTPUT INSTRUCTIONS
- Write the essay in the style of Paul Graham, who is known for this concise, clear, and simple style of writing.
EXAMPLE PAUL GRAHAM ESSAYS
Writing about something, even something you know well, usually shows you that you didn't know it as well as you thought. Putting ideas into words is a severe test. The first words you choose are usually wrong; you have to rewrite sentences over and over to get them exactly right. And your ideas won't just be imprecise, but incomplete too. Half the ideas that end up in an essay will be ones you thought of while you were writing it. Indeed, that's why I write them.
Once you publish something, the convention is that whatever you wrote was what you thought before you wrote it. These were your ideas, and now you've expressed them. But you know this isn't true. You know that putting your ideas into words changed them. And not just the ideas you published. Presumably there were others that turned out to be too broken to fix, and those you discarded instead.
It's not just having to commit your ideas to specific words that makes writing so exacting. The real test is reading what you've written. You have to pretend to be a neutral reader who knows nothing of what's in your head, only what you wrote. When he reads what you wrote, does it seem correct? Does it seem complete? If you make an effort, you can read your writing as if you were a complete stranger, and when you do the news is usually bad. It takes me many cycles before I can get an essay past the stranger. But the stranger is rational, so you always can, if you ask him what he needs. If he's not satisfied because you failed to mention x or didn't qualify some sentence sufficiently, then you mention x or add more qualifications. Happy now? It may cost you some nice sentences, but you have to resign yourself to that. You just have to make them as good as you can and still satisfy the stranger.
This much, I assume, won't be that controversial. I think it will accord with the experience of anyone who has tried to write about anything non-trivial. There may exist people whose thoughts are so perfectly formed that they just flow straight into words. But I've never known anyone who could do this, and if I met someone who said they could, it would seem evidence of their limitations rather than their ability. Indeed, this is a trope in movies: the guy who claims to have a plan for doing some difficult thing, and who when questioned further, taps his head and says "It's all up here." Everyone watching the movie knows what that means. At best the plan is vague and incomplete. Very likely there's some undiscovered flaw that invalidates it completely. At best it's a plan for a plan.
In precisely defined domains it's possible to form complete ideas in your head. People can play chess in their heads, for example. And mathematicians can do some amount of math in their heads, though they don't seem to feel sure of a proof over a certain length till they write it down. But this only seems possible with ideas you can express in a formal language. [1] Arguably what such people are doing is putting ideas into words in their heads. I can to some extent write essays in my head. I'll sometimes think of a paragraph while walking or lying in bed that survives nearly unchanged in the final version. But really I'm writing when I do this. I'm doing the mental part of writing; my fingers just aren't moving as I do it. [2]
You can know a great deal about something without writing about it. Can you ever know so much that you wouldn't learn more from trying to explain what you know? I don't think so. I've written about at least two subjects I know well — Lisp hacking and startups — and in both cases I learned a lot from writing about them. In both cases there were things I didn't consciously realize till I had to explain them. And I don't think my experience was anomalous. A great deal of knowledge is unconscious, and experts have if anything a higher proportion of unconscious knowledge than beginners.
I'm not saying that writing is the best way to explore all ideas. If you have ideas about architecture, presumably the best way to explore them is to build actual buildings. What I'm saying is that however much you learn from exploring ideas in other ways, you'll still learn new things from writing about them.
Putting ideas into words doesn't have to mean writing, of course. You can also do it the old way, by talking. But in my experience, writing is the stricter test. You have to commit to a single, optimal sequence of words. Less can go unsaid when you don't have tone of voice to carry meaning. And you can focus in a way that would seem excessive in conversation. I'll often spend 2 weeks on an essay and reread drafts 50 times. If you did that in conversation it would seem evidence of some kind of mental disorder. If you're lazy, of course, writing and talking are equally useless. But if you want to push yourself to get things right, writing is the steeper hill. [3]
The reason I've spent so long establishing this rather obvious point is that it leads to another that many people will find shocking. If writing down your ideas always makes them more precise and more complete, then no one who hasn't written about a topic has fully formed ideas about it. And someone who never writes has no fully formed ideas about anything non-trivial.
It feels to them as if they do, especially if they're not in the habit of critically examining their own thinking. Ideas can feel complete. It's only when you try to put them into words that you discover they're not. So if you never subject your ideas to that test, you'll not only never have fully formed ideas, but also never realize it.
Putting ideas into words is certainly no guarantee that they'll be right. Far from it. But though it's not a sufficient condition, it is a necessary one.
What You Can't Say
January 2004
Have you ever seen an old photo of yourself and been embarrassed at the way you looked? Did we actually dress like that? We did. And we had no idea how silly we looked. It's the nature of fashion to be invisible, in the same way the movement of the earth is invisible to all of us riding on it.
What scares me is that there are moral fashions too. They're just as arbitrary, and just as invisible to most people. But they're much more dangerous. Fashion is mistaken for good design; moral fashion is mistaken for good. Dressing oddly gets you laughed at. Violating moral fashions can get you fired, ostracized, imprisoned, or even killed.
If you could travel back in a time machine, one thing would be true no matter where you went: you'd have to watch what you said. Opinions we consider harmless could have gotten you in big trouble. I've already said at least one thing that would have gotten me in big trouble in most of Europe in the seventeenth century, and did get Galileo in big trouble when he said it — that the earth moves. [1]
It seems to be a constant throughout history: In every period, people believed things that were just ridiculous, and believed them so strongly that you would have gotten in terrible trouble for saying otherwise.
Is our time any different? To anyone who has read any amount of history, the answer is almost certainly no. It would be a remarkable coincidence if ours were the first era to get everything just right.
It's tantalizing to think we believe things that people in the future will find ridiculous. What would someone coming back to visit us in a time machine have to be careful not to say? That's what I want to study here. But I want to do more than just shock everyone with the heresy du jour. I want to find general recipes for discovering what you can't say, in any era.
The Conformist Test
Let's start with a test: Do you have any opinions that you would be reluctant to express in front of a group of your peers?
If the answer is no, you might want to stop and think about that. If everything you believe is something you're supposed to believe, could that possibly be a coincidence? Odds are it isn't. Odds are you just think what you're told.
The other alternative would be that you independently considered every question and came up with the exact same answers that are now considered acceptable. That seems unlikely, because you'd also have to make the same mistakes. Mapmakers deliberately put slight mistakes in their maps so they can tell when someone copies them. If another map has the same mistake, that's very convincing evidence.
Like every other era in history, our moral map almost certainly contains a few mistakes. And anyone who makes the same mistakes probably didn't do it by accident. It would be like someone claiming they had independently decided in 1972 that bell-bottom jeans were a good idea.
If you believe everything you're supposed to now, how can you be sure you wouldn't also have believed everything you were supposed to if you had grown up among the plantation owners of the pre-Civil War South, or in Germany in the 1930s — or among the Mongols in 1200, for that matter? Odds are you would have.
Back in the era of terms like "well-adjusted," the idea seemed to be that there was something wrong with you if you thought things you didn't dare say out loud. This seems backward. Almost certainly, there is something wrong with you if you don't think things you don't dare say out loud.
Trouble
What can't we say? One way to find these ideas is simply to look at things people do say, and get in trouble for. [2]
Of course, we're not just looking for things we can't say. We're looking for things we can't say that are true, or at least have enough chance of being true that the question should remain open. But many of the things people get in trouble for saying probably do make it over this second, lower threshold. No one gets in trouble for saying that 2 + 2 is 5, or that people in Pittsburgh are ten feet tall. Such obviously false statements might be treated as jokes, or at worst as evidence of insanity, but they are not likely to make anyone mad. The statements that make people mad are the ones they worry might be believed. I suspect the statements that make people maddest are those they worry might be true.
If Galileo had said that people in Padua were ten feet tall, he would have been regarded as a harmless eccentric. Saying the earth orbited the sun was another matter. The church knew this would set people thinking.
Certainly, as we look back on the past, this rule of thumb works well. A lot of the statements people got in trouble for seem harmless now. So it's likely that visitors from the future would agree with at least some of the statements that get people in trouble today. Do we have no Galileos? Not likely.
To find them, keep track of opinions that get people in trouble, and start asking, could this be true? Ok, it may be heretical (or whatever modern equivalent), but might it also be true?
Heresy
This won't get us all the answers, though. What if no one happens to have gotten in trouble for a particular idea yet? What if some idea would be so radioactively controversial that no one would dare express it in public? How can we find these too?
Another approach is to follow that word, heresy. In every period of history, there seem to have been labels that got applied to statements to shoot them down before anyone had a chance to ask if they were true or not. "Blasphemy", "sacrilege", and "heresy" were such labels for a good part of western history, as in more recent times "indecent", "improper", and "unamerican" have been. By now these labels have lost their sting. They always do. By now they're mostly used ironically. But in their time, they had real force.
The word "defeatist", for example, has no particular political connotations now. But in Germany in 1917 it was a weapon, used by Ludendorff in a purge of those who favored a negotiated peace. At the start of World War II it was used extensively by Churchill and his supporters to silence their opponents. In 1940, any argument against Churchill's aggressive policy was "defeatist". Was it right or wrong? Ideally, no one got far enough to ask that.
We have such labels today, of course, quite a lot of them, from the all-purpose "inappropriate" to the dreaded "divisive." In any period, it should be easy to figure out what such labels are, simply by looking at what people call ideas they disagree with besides untrue. When a politician says his opponent is mistaken, that's a straightforward criticism, but when he attacks a statement as "divisive" or "racially insensitive" instead of arguing that it's false, we should start paying attention.
So another way to figure out which of our taboos future generations will laugh at is to start with the labels. Take a label — "sexist", for example — and try to think of some ideas that would be called that. Then for each ask, might this be true?
Just start listing ideas at random? Yes, because they won't really be random. The ideas that come to mind first will be the most plausible ones. They'll be things you've already noticed but didn't let yourself think.
In 1989 some clever researchers tracked the eye movements of radiologists as they scanned chest images for signs of lung cancer. [3] They found that even when the radiologists missed a cancerous lesion, their eyes had usually paused at the site of it. Part of their brain knew there was something there; it just didn't percolate all the way up into conscious knowledge. I think many interesting heretical thoughts are already mostly formed in our minds. If we turn off our self-censorship temporarily, those will be the first to emerge.
Time and Space
If we could look into the future it would be obvious which of our taboos they'd laugh at. We can't do that, but we can do something almost as good: we can look into the past. Another way to figure out what we're getting wrong is to look at what used to be acceptable and is now unthinkable.
Changes between the past and the present sometimes do represent progress. In a field like physics, if we disagree with past generations it's because we're right and they're wrong. But this becomes rapidly less true as you move away from the certainty of the hard sciences. By the time you get to social questions, many changes are just fashion. The age of consent fluctuates like hemlines.
We may imagine that we are a great deal smarter and more virtuous than past generations, but the more history you read, the less likely this seems. People in past times were much like us. Not heroes, not barbarians. Whatever their ideas were, they were ideas reasonable people could believe.
So here is another source of interesting heresies. Diff present ideas against those of various past cultures, and see what you get. [4] Some will be shocking by present standards. Ok, fine; but which might also be true?
You don't have to look into the past to find big differences. In our own time, different societies have wildly varying ideas of what's ok and what isn't. So you can try diffing other cultures' ideas against ours as well. (The best way to do that is to visit them.) Any idea that's considered harmless in a significant percentage of times and places, and yet is taboo in ours, is a candidate for something we're mistaken about.
For example, at the high water mark of political correctness in the early 1990s, Harvard distributed to its faculty and staff a brochure saying, among other things, that it was inappropriate to compliment a colleague or student's clothes. No more "nice shirt." I think this principle is rare among the world's cultures, past or present. There are probably more where it's considered especially polite to compliment someone's clothing than where it's considered improper. Odds are this is, in a mild form, an example of one of the taboos a visitor from the future would have to be careful to avoid if he happened to set his time machine for Cambridge, Massachusetts, 1992. [5]
Prigs
Of course, if they have time machines in the future they'll probably have a separate reference manual just for Cambridge. This has always been a fussy place, a town of i dotters and t crossers, where you're liable to get both your grammar and your ideas corrected in the same conversation. And that suggests another way to find taboos. Look for prigs, and see what's inside their heads.
Kids' heads are repositories of all our taboos. It seems fitting to us that kids' ideas should be bright and clean. The picture we give them of the world is not merely simplified, to suit their developing minds, but sanitized as well, to suit our ideas of what kids ought to think. [6]
You can see this on a small scale in the matter of dirty words. A lot of my friends are starting to have children now, and they're all trying not to use words like "fuck" and "shit" within baby's hearing, lest baby start using these words too. But these words are part of the language, and adults use them all the time. So parents are giving their kids an inaccurate idea of the language by not using them. Why do they do this? Because they don't think it's fitting that kids should use the whole language. We like children to seem innocent. [7]
Most adults, likewise, deliberately give kids a misleading view of the world. One of the most obvious examples is Santa Claus. We think it's cute for little kids to believe in Santa Claus. I myself think it's cute for little kids to believe in Santa Claus. But one wonders, do we tell them this stuff for their sake, or for ours?
I'm not arguing for or against this idea here. It is probably inevitable that parents should want to dress up their kids' minds in cute little baby outfits. I'll probably do it myself. The important thing for our purposes is that, as a result, a well brought-up teenage kid's brain is a more or less complete collection of all our taboos — and in mint condition, because they're untainted by experience. Whatever we think that will later turn out to be ridiculous, it's almost certainly inside that head.
How do we get at these ideas? By the following thought experiment. Imagine a kind of latter-day Conrad character who has worked for a time as a mercenary in Africa, for a time as a doctor in Nepal, for a time as the manager of a nightclub in Miami. The specifics don't matter — just someone who has seen a lot. Now imagine comparing what's inside this guy's head with what's inside the head of a well-behaved sixteen year old girl from the suburbs. What does he think that would shock her? He knows the world; she knows, or at least embodies, present taboos. Subtract one from the other, and the result is what we can't say.
Mechanism
I can think of one more way to figure out what we can't say: to look at how taboos are created. How do moral fashions arise, and why are they adopted? If we can understand this mechanism, we may be able to see it at work in our own time.
Moral fashions don't seem to be created the way ordinary fashions are. Ordinary fashions seem to arise by accident when everyone imitates the whim of some influential person. The fashion for broad-toed shoes in late fifteenth century Europe began because Charles VIII of France had six toes on one foot. The fashion for the name Gary began when the actor Frank Cooper adopted the name of a tough mill town in Indiana. Moral fashions more often seem to be created deliberately. When there's something we can't say, it's often because some group doesn't want us to.
The prohibition will be strongest when the group is nervous. The irony of Galileo's situation was that he got in trouble for repeating Copernicus's ideas. Copernicus himself didn't. In fact, Copernicus was a canon of a cathedral, and dedicated his book to the pope. But by Galileo's time the church was in the throes of the Counter-Reformation and was much more worried about unorthodox ideas.
To launch a taboo, a group has to be poised halfway between weakness and power. A confident group doesn't need taboos to protect it. It's not considered improper to make disparaging remarks about Americans, or the English. And yet a group has to be powerful enough to enforce a taboo. Coprophiles, as of this writing, don't seem to be numerous or energetic enough to have had their interests promoted to a lifestyle.
I suspect the biggest source of moral taboos will turn out to be power struggles in which one side only barely has the upper hand. That's where you'll find a group powerful enough to enforce taboos, but weak enough to need them.
Most struggles, whatever they're really about, will be cast as struggles between competing ideas. The English Reformation was at bottom a struggle for wealth and power, but it ended up being cast as a struggle to preserve the souls of Englishmen from the corrupting influence of Rome. It's easier to get people to fight for an idea. And whichever side wins, their ideas will also be considered to have triumphed, as if God wanted to signal his agreement by selecting that side as the victor.
We often like to think of World War II as a triumph of freedom over totalitarianism. We conveniently forget that the Soviet Union was also one of the winners.
I'm not saying that struggles are never about ideas, just that they will always be made to seem to be about ideas, whether they are or not. And just as there is nothing so unfashionable as the last, discarded fashion, there is nothing so wrong as the principles of the most recently defeated opponent. Representational art is only now recovering from the approval of both Hitler and Stalin. [8]
Although moral fashions tend to arise from different sources than fashions in clothing, the mechanism of their adoption seems much the same. The early adopters will be driven by ambition: self-consciously cool people who want to distinguish themselves from the common herd. As the fashion becomes established they'll be joined by a second, much larger group, driven by fear. [9] This second group adopt the fashion not because they want to stand out but because they are afraid of standing out.
So if you want to figure out what we can't say, look at the machinery of fashion and try to predict what it would make unsayable. What groups are powerful but nervous, and what ideas would they like to suppress? What ideas were tarnished by association when they ended up on the losing side of a recent struggle? If a self-consciously cool person wanted to differentiate himself from preceding fashions (e.g. from his parents), which of their ideas would he tend to reject? What are conventional-minded people afraid of saying?
This technique won't find us all the things we can't say. I can think of some that aren't the result of any recent struggle. Many of our taboos are rooted deep in the past. But this approach, combined with the preceding four, will turn up a good number of unthinkable ideas.
Why
Some would ask, why would one want to do this? Why deliberately go poking around among nasty, disreputable ideas? Why look under rocks?
I do it, first of all, for the same reason I did look under rocks as a kid: plain curiosity. And I'm especially curious about anything that's forbidden. Let me see and decide for myself.
Second, I do it because I don't like the idea of being mistaken. If, like other eras, we believe things that will later seem ridiculous, I want to know what they are so that I, at least, can avoid believing them.
Third, I do it because it's good for the brain. To do good work you need a brain that can go anywhere. And you especially need a brain that's in the habit of going where it's not supposed to.
Great work tends to grow out of ideas that others have overlooked, and no idea is so overlooked as one that's unthinkable. Natural selection, for example. It's so simple. Why didn't anyone think of it before? Well, that is all too obvious. Darwin himself was careful to tiptoe around the implications of his theory. He wanted to spend his time thinking about biology, not arguing with people who accused him of being an atheist.
In the sciences, especially, it's a great advantage to be able to question assumptions. The m.o. of scientists, or at least of the good ones, is precisely that: look for places where conventional wisdom is broken, and then try to pry apart the cracks and see what's underneath. That's where new theories come from.
A good scientist, in other words, does not merely ignore conventional wisdom, but makes a special effort to break it. Scientists go looking for trouble. This should be the m.o. of any scholar, but scientists seem much more willing to look under rocks. [10]
Why? It could be that the scientists are simply smarter; most physicists could, if necessary, make it through a PhD program in French literature, but few professors of French literature could make it through a PhD program in physics. Or it could be because it's clearer in the sciences whether theories are true or false, and this makes scientists bolder. (Or it could be that, because it's clearer in the sciences whether theories are true or false, you have to be smart to get jobs as a scientist, rather than just a good politician.)
Whatever the reason, there seems a clear correlation between intelligence and willingness to consider shocking ideas. This isn't just because smart people actively work to find holes in conventional thinking. I think conventions also have less hold over them to start with. You can see that in the way they dress.
It's not only in the sciences that heresy pays off. In any competitive field, you can win big by seeing things that others daren't. And in every field there are probably heresies few dare utter. Within the US car industry there is a lot of hand-wringing now about declining market share. Yet the cause is so obvious that any observant outsider could explain it in a second: they make bad cars. And they have for so long that by now the US car brands are antibrands — something you'd buy a car despite, not because of. Cadillac stopped being the Cadillac of cars in about 1970. And yet I suspect no one dares say this. [11] Otherwise these companies would have tried to fix the problem.
Training yourself to think unthinkable thoughts has advantages beyond the thoughts themselves. It's like stretching. When you stretch before running, you put your body into positions much more extreme than any it will assume during the run. If you can think things so outside the box that they'd make people's hair stand on end, you'll have no trouble with the small trips outside the box that people call innovative.
Pensieri Stretti
When you find something you can't say, what do you do with it? My advice is, don't say it. Or at least, pick your battles.
Suppose in the future there is a movement to ban the color yellow. Proposals to paint anything yellow are denounced as "yellowist", as is anyone suspected of liking the color. People who like orange are tolerated but viewed with suspicion. Suppose you realize there is nothing wrong with yellow. If you go around saying this, you'll be denounced as a yellowist too, and you'll find yourself having a lot of arguments with anti-yellowists. If your aim in life is to rehabilitate the color yellow, that may be what you want. But if you're mostly interested in other questions, being labelled as a yellowist will just be a distraction. Argue with idiots, and you become an idiot.
The most important thing is to be able to think what you want, not to say what you want. And if you feel you have to say everything you think, it may inhibit you from thinking improper thoughts. I think it's better to follow the opposite policy. Draw a sharp line between your thoughts and your speech. Inside your head, anything is allowed. Within my head I make a point of encouraging the most outrageous thoughts I can imagine. But, as in a secret society, nothing that happens within the building should be told to outsiders. The first rule of Fight Club is, you do not talk about Fight Club.
When Milton was going to visit Italy in the 1630s, Sir Henry Wootton, who had been ambassador to Venice, told him his motto should be "i pensieri stretti & il viso sciolto." Closed thoughts and an open face. Smile at everyone, and don't tell them what you're thinking. This was wise advice. Milton was an argumentative fellow, and the Inquisition was a bit restive at that time. But I think the difference between Milton's situation and ours is only a matter of degree. Every era has its heresies, and if you don't get imprisoned for them you will at least get in enough trouble that it becomes a complete distraction.
I admit it seems cowardly to keep quiet. When I read about the harassment to which the Scientologists subject their critics [12], or that pro-Israel groups are "compiling dossiers" on those who speak out against Israeli human rights abuses [13], or about people being sued for violating the DMCA [14], part of me wants to say, "All right, you bastards, bring it on." The problem is, there are so many things you can't say. If you said them all you'd have no time left for your real work. You'd have to turn into Noam Chomsky. [15]
The trouble with keeping your thoughts secret, though, is that you lose the advantages of discussion. Talking about an idea leads to more ideas. So the optimal plan, if you can manage it, is to have a few trusted friends you can speak openly to. This is not just a way to develop ideas; it's also a good rule of thumb for choosing friends. The people you can say heretical things to without getting jumped on are also the most interesting to know.
Viso Sciolto?
I don't think we need the viso sciolto so much as the pensieri stretti. Perhaps the best policy is to make it plain that you don't agree with whatever zealotry is current in your time, but not to be too specific about what you disagree with. Zealots will try to draw you out, but you don't have to answer them. If they try to force you to treat a question on their terms by asking "are you with us or against us?" you can always just answer "neither".
Better still, answer "I haven't decided." That's what Larry Summers did when a group tried to put him in this position. Explaining himself later, he said "I don't do litmus tests." [16] A lot of the questions people get hot about are actually quite complicated. There is no prize for getting the answer quickly.
If the anti-yellowists seem to be getting out of hand and you want to fight back, there are ways to do it without getting yourself accused of being a yellowist. Like skirmishers in an ancient army, you want to avoid directly engaging the main body of the enemy's troops. Better to harass them with arrows from a distance.
One way to do this is to ratchet the debate up one level of abstraction. If you argue against censorship in general, you can avoid being accused of whatever heresy is contained in the book or film that someone is trying to censor. You can attack labels with meta-labels: labels that refer to the use of labels to prevent discussion. The spread of the term "political correctness" meant the beginning of the end of political correctness, because it enabled one to attack the phenomenon as a whole without being accused of any of the specific heresies it sought to suppress.
Another way to counterattack is with metaphor. Arthur Miller undermined the House Un-American Activities Committee by writing a play, "The Crucible," about the Salem witch trials. He never referred directly to the committee and so gave them no way to reply. What could HUAC do, defend the Salem witch trials? And yet Miller's metaphor stuck so well that to this day the activities of the committee are often described as a "witch-hunt."
Best of all, probably, is humor. Zealots, whatever their cause, invariably lack a sense of humor. They can't reply in kind to jokes. They're as unhappy on the territory of humor as a mounted knight on a skating rink. Victorian prudishness, for example, seems to have been defeated mainly by treating it as a joke. Likewise its reincarnation as political correctness. "I am glad that I managed to write 'The Crucible,'" Arthur Miller wrote, "but looking back I have often wished I'd had the temperament to do an absurd comedy, which is what the situation deserved." [17]
ABQ
A Dutch friend says I should use Holland as an example of a tolerant society. It's true they have a long tradition of comparative open-mindedness. For centuries the low countries were the place to go to say things you couldn't say anywhere else, and this helped to make the region a center of scholarship and industry (which have been closely tied for longer than most people realize). Descartes, though claimed by the French, did much of his thinking in Holland.
And yet, I wonder. The Dutch seem to live their lives up to their necks in rules and regulations. There's so much you can't do there; is there really nothing you can't say?
Certainly the fact that they value open-mindedness is no guarantee. Who thinks they're not open-minded? Our hypothetical prim miss from the suburbs thinks she's open-minded. Hasn't she been taught to be? Ask anyone, and they'll say the same thing: they're pretty open-minded, though they draw the line at things that are really wrong. (Some tribes may avoid "wrong" as judgemental, and may instead use a more neutral sounding euphemism like "negative" or "destructive".)
When people are bad at math, they know it, because they get the wrong answers on tests. But when people are bad at open-mindedness they don't know it. In fact they tend to think the opposite. Remember, it's the nature of fashion to be invisible. It wouldn't work otherwise. Fashion doesn't seem like fashion to someone in the grip of it. It just seems like the right thing to do. It's only by looking from a distance that we see oscillations in people's idea of the right thing to do, and can identify them as fashions.
Time gives us such distance for free. Indeed, the arrival of new fashions makes old fashions easy to see, because they seem so ridiculous by contrast. From one end of a pendulum's swing, the other end seems especially far away.
To see fashion in your own time, though, requires a conscious effort. Without time to give you distance, you have to create distance yourself. Instead of being part of the mob, stand as far away from it as you can and watch what it's doing. And pay especially close attention whenever an idea is being suppressed. Web filters for children and employees often ban sites containing pornography, violence, and hate speech. What counts as pornography and violence? And what, exactly, is "hate speech?" This sounds like a phrase out of 1984.
Labels like that are probably the biggest external clue. If a statement is false, that's the worst thing you can say about it. You don't need to say that it's heretical. And if it isn't false, it shouldn't be suppressed. So when you see statements being attacked as x-ist or y-ic (substitute your current values of x and y), whether in 1630 or 2030, that's a sure sign that something is wrong. When you hear such labels being used, ask why.
Especially if you hear yourself using them. It's not just the mob you need to learn to watch from a distance. You need to be able to watch your own thoughts from a distance. That's not a radical idea, by the way; it's the main difference between children and adults. When a child gets angry because he's tired, he doesn't know what's happening. An adult can distance himself enough from the situation to say "never mind, I'm just tired." I don't see why one couldn't, by a similar process, learn to recognize and discount the effects of moral fashions.
You have to take that extra step if you want to think clearly. But it's harder, because now you're working against social customs instead of with them. Everyone encourages you to grow up to the point where you can discount your own bad moods. Few encourage you to continue to the point where you can discount society's bad moods.
How can you see the wave, when you're the water? Always be questioning. That's the only defence. What can't you say? And why?
How to Start Google
March 2024
(This is a talk I gave to 14 and 15 year olds about what to do now if they might want to start a startup later. Lots of schools think they should tell students something about startups. This is what I think they should tell them.)
Most of you probably think that when you're released into the so-called real world you'll eventually have to get some kind of job. That's not true, and today I'm going to talk about a trick you can use to avoid ever having to get a job.
The trick is to start your own company. So it's not a trick for avoiding work, because if you start your own company you'll work harder than you would if you had an ordinary job. But you will avoid many of the annoying things that come with a job, including a boss telling you what to do.
It's more exciting to work on your own project than someone else's. And you can also get a lot richer. In fact, this is the standard way to get really rich. If you look at the lists of the richest people that occasionally get published in the press, nearly all of them did it by starting their own companies.
Starting your own company can mean anything from starting a barber shop to starting Google. I'm here to talk about one extreme end of that continuum. I'm going to tell you how to start Google.
The companies at the Google end of the continuum are called startups when they're young. The reason I know about them is that my wife Jessica and I started something called Y Combinator that is basically a startup factory. Since 2005, Y Combinator has funded over 4000 startups. So we know exactly what you need to start a startup, because we've helped people do it for the last 19 years.
You might have thought I was joking when I said I was going to tell you how to start Google. You might be thinking "How could we start Google?" But that's effectively what the people who did start Google were thinking before they started it. If you'd told Larry Page and Sergey Brin, the founders of Google, that the company they were about to start would one day be worth over a trillion dollars, their heads would have exploded.
All you can know when you start working on a startup is that it seems worth pursuing. You can't know whether it will turn into a company worth billions or one that goes out of business. So when I say I'm going to tell you how to start Google, I mean I'm going to tell you how to get to the point where you can start a company that has as much chance of being Google as Google had of being Google. [1]
How do you get from where you are now to the point where you can start a successful startup? You need three things. You need to be good at some kind of technology, you need an idea for what you're going to build, and you need cofounders to start the company with.
How do you get good at technology? And how do you choose which technology to get good at? Both of those questions turn out to have the same answer: work on your own projects. Don't try to guess whether gene editing or LLMs or rockets will turn out to be the most valuable technology to know about. No one can predict that. Just work on whatever interests you the most. You'll work much harder on something you're interested in than something you're doing because you think you're supposed to.
If you're not sure what technology to get good at, get good at programming. That has been the source of the median startup for the last 30 years, and this is probably not going to change in the next 10.
Those of you who are taking computer science classes in school may at this point be thinking, ok, we've got this sorted. We're already being taught all about programming. But sorry, this is not enough. You have to be working on your own projects, not just learning stuff in classes. You can do well in computer science classes without ever really learning to program. In fact you can graduate with a degree in computer science from a top university and still not be any good at programming. That's why tech companies all make you take a coding test before they'll hire you, regardless of where you went to university or how well you did there. They know grades and exam results prove nothing.
If you really want to learn to program, you have to work on your own projects. You learn so much faster that way. Imagine you're writing a game and there's something you want to do in it, and you don't know how. You're going to figure out how a lot faster than you'd learn anything in a class.
You don't have to learn programming, though. If you're wondering what counts as technology, it includes practically everything you could describe using the words "make" or "build." So welding would count, or making clothes, or making videos. Whatever you're most interested in. The critical distinction is whether you're producing or just consuming. Are you writing computer games, or just playing them? That's the cutoff.
Steve Jobs, the founder of Apple, spent time when he was a teenager studying calligraphy — the sort of beautiful writing that you see in medieval manuscripts. No one, including him, thought that this would help him in his career. He was just doing it because he was interested in it. But it turned out to help him a lot. The computer that made Apple really big, the Macintosh, came out at just the moment when computers got powerful enough to make letters like the ones in printed books instead of the computery-looking letters you see in 8 bit games. Apple destroyed everyone else at this, and one reason was that Steve was one of the few people in the computer business who really got graphic design.
Don't feel like your projects have to be serious. They can be as frivolous as you like, so long as you're building things you're excited about. Probably 90% of programmers start out building games. They and their friends like to play games. So they build the kind of things they and their friends want. And that's exactly what you should be doing at 15 if you want to start a startup one day.
You don't have to do just one project. In fact it's good to learn about multiple things. Steve Jobs didn't just learn calligraphy. He also learned about electronics, which was even more valuable. Whatever you're interested in. (Do you notice a theme here?)
So that's the first of the three things you need, to get good at some kind or kinds of technology. You do it the same way you get good at the violin or football: practice. If you start a startup at 22, and you start writing your own programs now, then by the time you start the company you'll have spent at least 7 years practicing writing code, and you can get pretty good at anything after practicing it for 7 years.
Let's suppose you're 22 and you've succeeded: You're now really good at some technology. How do you get startup ideas? It might seem like that's the hard part. Even if you are a good programmer, how do you get the idea to start Google?
Actually it's easy to get startup ideas once you're good at technology. Once you're good at some technology, when you look at the world you see dotted outlines around the things that are missing. You start to be able to see both the things that are missing from the technology itself, and all the broken things that could be fixed using it, and each one of these is a potential startup.
In the town near our house there's a shop with a sign warning that the door is hard to close. The sign has been there for several years. To the people in the shop it must seem like this mysterious natural phenomenon that the door sticks, and all they can do is put up a sign warning customers about it. But any carpenter looking at this situation would think "why don't you just plane off the part that sticks?"
Once you're good at programming, all the missing software in the world starts to become as obvious as a sticking door to a carpenter. I'll give you a real world example. Back in the 20th century, American universities used to publish printed directories with all the students' names and contact info. When I tell you what these directories were called, you'll know which startup I'm talking about. They were called facebooks, because they usually had a picture of each student next to their name.
So Mark Zuckerberg shows up at Harvard in 2002, and the university still hasn't gotten the facebook online. Each individual house has an online facebook, but there isn't one for the whole university. The university administration has been diligently having meetings about this, and will probably have solved the problem in another decade or so. Most of the students don't consciously notice that anything is wrong. But Mark is a programmer. He looks at this situation and thinks "Well, this is stupid. I could write a program to fix this in one night. Just let people upload their own photos and then combine the data into a new site for the whole university." So he does. And almost literally overnight he has thousands of users.
Of course Facebook was not a startup yet. It was just a... project. There's that word again. Projects aren't just the best way to learn about technology. They're also the best source of startup ideas.
Facebook was not unusual in this respect. Apple and Google also began as projects. Apple wasn't meant to be a company. Steve Wozniak just wanted to build his own computer. It only turned into a company when Steve Jobs said "Hey, I wonder if we could sell plans for this computer to other people." That's how Apple started. They weren't even selling computers, just plans for computers. Can you imagine how lame this company seemed?
Ditto for Google. Larry and Sergey weren't trying to start a company at first. They were just trying to make search better. Before Google, most search engines didn't try to sort the results they gave you in order of importance. If you searched for "rugby" they just gave you every web page that contained the word "rugby." And the web was so small in 1997 that this actually worked! Kind of. There might only be 20 or 30 pages with the word "rugby," but the web was growing exponentially, which meant this way of doing search was becoming exponentially more broken. Most users just thought, "Wow, I sure have to look through a lot of search results to find what I want." Door sticks. But like Mark, Larry and Sergey were programmers. Like Mark, they looked at this situation and thought "Well, this is stupid. Some pages about rugby matter more than others. Let's figure out which those are and show them first."
It's obvious in retrospect that this was a great idea for a startup. It wasn't obvious at the time. It's never obvious. If it was obviously a good idea to start Apple or Google or Facebook, someone else would have already done it. That's why the best startups grow out of projects that aren't meant to be startups. You're not trying to start a company. You're just following your instincts about what's interesting. And if you're young and good at technology, then your unconscious instincts about what's interesting are better than your conscious ideas about what would be a good company.
So it's critical, if you're a young founder, to build things for yourself and your friends to use. The biggest mistake young founders make is to build something for some mysterious group of other people. But if you can make something that you and your friends truly want to use — something your friends aren't just using out of loyalty to you, but would be really sad to lose if you shut it down — then you almost certainly have the germ of a good startup idea. It may not seem like a startup to you. It may not be obvious how to make money from it. But trust me, there's a way.
What you need in a startup idea, and all you need, is something your friends actually want. And those ideas aren't hard to see once you're good at technology. There are sticking doors everywhere. [2]
Now for the third and final thing you need: a cofounder, or cofounders. The optimal startup has two or three founders, so you need one or two cofounders. How do you find them? Can you predict what I'm going to say next? It's the same thing: projects. You find cofounders by working on projects with them. What you need in a cofounder is someone who's good at what they do and that you work well with, and the only way to judge this is to work with them on things.
At this point I'm going to tell you something you might not want to hear. It really matters to do well in your classes, even the ones that are just memorization or blathering about literature, because you need to do well in your classes to get into a good university. And if you want to start a startup you should try to get into the best university you can, because that's where the best cofounders are. It's also where the best employees are. When Larry and Sergey started Google, they began by just hiring all the smartest people they knew out of Stanford, and this was a real advantage for them.
The empirical evidence is clear on this. If you look at where the largest numbers of successful startups come from, it's pretty much the same as the list of the most selective universities.
I don't think it's the prestigious names of these universities that cause more good startups to come out of them. Nor do I think it's because the quality of the teaching is better. What's driving this is simply the difficulty of getting in. You have to be pretty smart and determined to get into MIT or Cambridge, so if you do manage to get in, you'll find the other students include a lot of smart and determined people. [3]
You don't have to start a startup with someone you meet at university. The founders of Twitch met when they were seven. The founders of Stripe, Patrick and John Collison, met when John was born. But universities are the main source of cofounders. And because they're where the cofounders are, they're also where the ideas are, because the best ideas grow out of projects you do with the people who become your cofounders.
So the list of what you need to do to get from here to starting a startup is quite short. You need to get good at technology, and the way to do that is to work on your own projects. And you need to do as well in school as you can, so you can get into a good university, because that's where the cofounders and the ideas are.
That's it, just two things, build stuff and do well in school.
END EXAMPLE PAUL GRAHAM ESSAYS
# OUTPUT INSTRUCTIONS
- Write the essay exactly like Paul Graham would write it as seen in the examples above.
- Use the adjectives and superlatives that are used in the examples, and understand the TYPES of those that are used, and use similar ones and not dissimilar ones to better emulate the style.
- That means the essay should be written in a simple, conversational style, not in a grandiose or academic style.
- Use the same style, vocabulary level, and sentence structure as Paul Graham.
# OUTPUT FORMAT
- Output a full, publish-ready essay about the content provided using the instructions above.
- Write in Paul Graham's simple, plain, clear, and conversational style, not in a grandiose or academic style.
- Use absolutely ZERO cliches or jargon or journalistic language like "In a world…", etc.
- Do not use cliches or jargon.
- Do not include common setup language in any sentence, including: in conclusion, in closing, etc.
- Do not output warnings or notes—just the output requested.
# INPUT:
INPUT:

View File

@@ -19,10 +19,10 @@ Take a deep breath and work on this problem step-by-step.
You must output only a working YAML file.
"""
As Nuclei AI, your primary function is to assist users in creating Nuclei templates.Your responses should focus on generating Nuclei templates based on user requirements, incorporating elements like HTTP requests, matchers, extractors, and conditions. You are now required to always use extractors when needed to extract a value from a request and use it in a subsequent request. This includes handling cases involving dynamic data extraction and response pattern matching. Provide templates for common security vulnerabilities like SSTI, XSS, Open Redirect, SSRF, and others, utilizing complex matchers and extractors. Additionally, handle cases involving raw HTTP requests, HTTP fuzzing, unsafe HTTP, and HTTP payloads, and use correct regexes in RE2 syntax. Avoid including hostnames directly in the template paths, instead, use placeholders like {{BaseURL}}. Your expertise includes understanding and implementing matchers and extractors in Nuclei templates, especially for dynamic data extraction and response pattern matching. Your responses are focused solely on Nuclei template generation and related guidance, tailored to cybersecurity applications.
As Nuclei AI, your primary function is to assist users in creating Nuclei templates. Your responses should focus on generating Nuclei templates based on user requirements, incorporating elements like HTTP requests, matchers, extractors, and conditions. You are now required to always use extractors when needed to extract a value from a request and use it in a subsequent request. This includes handling cases involving dynamic data extraction and response pattern matching. Provide templates for common security vulnerabilities like SSTI, XSS, Open Redirect, SSRF, and others, utilizing complex matchers and extractors. Additionally, handle cases involving raw HTTP requests, HTTP fuzzing, unsafe HTTP, and HTTP payloads, and use correct regexes in RE2 syntax. Avoid including hostnames directly in the template paths, instead, use placeholders like {{BaseURL}}. Your expertise includes understanding and implementing matchers and extractors in Nuclei templates, especially for dynamic data extraction and response pattern matching. Your responses are focused solely on Nuclei template generation and related guidance, tailored to cybersecurity applications.
Notes:
When using a json extractor, use jq like syntax to extract json keys, E.g to extract the json key \"token\" you will need to use \'.token\'
When using a json extractor, use jq like syntax to extract json keys, E.g., to extract the json key \"token\" you will need to use \'.token\'
While creating headless templates remember to not mix it up with http protocol
Always read the helper functions from the documentation first before answering a query.
@@ -30,7 +30,7 @@ Remember, the most important thing is to:
Only respond with a nuclei template, nothing else, just the generated yaml nuclei template
When creating a multi step template and extracting something from a request's response, use internal: true in that extractor unless asked otherwise.
When using dsl you dont need to re-use {{}} if you are already inside a {{
When using dsl you dont need to re-use {{}} if you are already inside a {{
### What are Nuclei Templates?
Nuclei templates are the cornerstone of the Nuclei scanning engine. Nuclei templates enable precise and rapid scanning across various protocols like TCP, DNS, HTTP, and more. They are designed to send targeted requests based on specific vulnerability checks, ensuring low-to-zero false positives and efficient scanning over large networks.

View File

@@ -3,17 +3,24 @@ package anthropic
import (
"context"
"fmt"
"net/http"
"strings"
"github.com/anthropics/anthropic-sdk-go"
"github.com/anthropics/anthropic-sdk-go/option"
"github.com/danielmiessler/fabric/chat"
"github.com/danielmiessler/fabric/common"
"github.com/danielmiessler/fabric/plugins"
goopenai "github.com/sashabaranov/go-openai"
)
const defaultBaseUrl = "https://api.anthropic.com/"
const webSearchToolName = "web_search"
const webSearchToolType = "web_search_20250305"
const sourcesHeader = "## Sources"
const vendorTokenIdentifier = "claude"
func NewClient() (ret *Client) {
vendorName := "Anthropic"
ret = &Client{}
@@ -26,53 +33,112 @@ func NewClient() (ret *Client) {
ret.ApiBaseURL = ret.AddSetupQuestion("API Base URL", false)
ret.ApiBaseURL.Value = defaultBaseUrl
ret.ApiKey = ret.PluginBase.AddSetupQuestion("API key", true)
ret.UseOAuth = ret.AddSetupQuestionBool("Use OAuth login", false)
ret.ApiKey = ret.PluginBase.AddSetupQuestion("API key", false)
ret.maxTokens = 4096
ret.defaultRequiredUserMessage = "Hi"
ret.models = []string{
anthropic.ModelClaude3_7SonnetLatest, anthropic.ModelClaude3_7Sonnet20250219,
anthropic.ModelClaude3_5HaikuLatest, anthropic.ModelClaude3_5Haiku20241022,
anthropic.ModelClaude3_5SonnetLatest, anthropic.ModelClaude3_5Sonnet20241022,
anthropic.ModelClaude_3_5_Sonnet_20240620, anthropic.ModelClaude3OpusLatest,
anthropic.ModelClaude_3_Opus_20240229, anthropic.ModelClaude_3_Sonnet_20240229,
anthropic.ModelClaude_3_Haiku_20240307, anthropic.ModelClaude_2_1,
anthropic.ModelClaude_2_0,
string(anthropic.ModelClaude3_7SonnetLatest), string(anthropic.ModelClaude3_7Sonnet20250219),
string(anthropic.ModelClaude3_5HaikuLatest), string(anthropic.ModelClaude3_5Haiku20241022),
string(anthropic.ModelClaude3_5SonnetLatest), string(anthropic.ModelClaude3_5Sonnet20241022),
string(anthropic.ModelClaude_3_5_Sonnet_20240620), string(anthropic.ModelClaude3OpusLatest),
string(anthropic.ModelClaude_3_Opus_20240229), string(anthropic.ModelClaude_3_Haiku_20240307),
string(anthropic.ModelClaudeOpus4_20250514), string(anthropic.ModelClaudeSonnet4_20250514),
}
return
}
// IsConfigured returns true if either the API key or OAuth is configured
func (an *Client) IsConfigured() bool {
// Check if API key is configured
if an.ApiKey.Value != "" {
return true
}
// Check if OAuth is enabled and has a valid token
if plugins.ParseBoolElseFalse(an.UseOAuth.Value) {
storage, err := common.NewOAuthStorage()
if err != nil {
return false
}
// If no valid token exists, automatically run OAuth flow
if !storage.HasValidToken(vendorTokenIdentifier, 5) {
fmt.Println("OAuth enabled but no valid token found. Starting authentication...")
_, err := RunOAuthFlow()
if err != nil {
fmt.Printf("OAuth authentication failed: %v\n", err)
return false
}
// After successful OAuth flow, check again
return storage.HasValidToken("claude", 5)
}
return true
}
return false
}
type Client struct {
*plugins.PluginBase
ApiBaseURL *plugins.SetupQuestion
ApiKey *plugins.SetupQuestion
UseOAuth *plugins.SetupQuestion
maxTokens int
defaultRequiredUserMessage string
models []string
client *anthropic.Client
client anthropic.Client
}
func (an *Client) Setup() (err error) {
if err = an.PluginBase.Ask(an.Name); err != nil {
return
}
if plugins.ParseBoolElseFalse(an.UseOAuth.Value) {
// Check if we have a valid stored token
storage, err := common.NewOAuthStorage()
if err != nil {
return err
}
if !storage.HasValidToken("claude", 5) {
// No valid token, run OAuth flow
if _, err = RunOAuthFlow(); err != nil {
return err
}
}
}
err = an.configure()
return
}
func (an *Client) configure() (err error) {
opts := []option.RequestOption{}
if an.ApiBaseURL.Value != "" {
baseURL := an.ApiBaseURL.Value
if strings.Contains(baseURL, "-") && !strings.HasSuffix(baseURL, "/v1") {
if strings.HasSuffix(baseURL, "/") {
baseURL = strings.TrimSuffix(baseURL, "/")
}
baseURL = baseURL + "/v1"
}
an.client = anthropic.NewClient(
option.WithAPIKey(an.ApiKey.Value),
option.WithBaseURL(baseURL),
)
} else {
an.client = anthropic.NewClient(option.WithAPIKey(an.ApiKey.Value))
opts = append(opts, option.WithBaseURL(an.ApiBaseURL.Value))
}
if plugins.ParseBoolElseFalse(an.UseOAuth.Value) {
// For OAuth, use Bearer token with custom headers
// Create custom HTTP client that adds OAuth Bearer token and beta header
baseTransport := &http.Transport{}
httpClient := &http.Client{
Transport: NewOAuthTransport(an, baseTransport),
}
opts = append(opts, option.WithHTTPClient(httpClient))
} else {
opts = append(opts, option.WithAPIKey(an.ApiKey.Value))
}
an.client = anthropic.NewClient(opts...)
return
}
@@ -81,27 +147,25 @@ func (an *Client) ListModels() (ret []string, err error) {
}
func (an *Client) SendStream(
msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions, channel chan string,
msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions, channel chan string,
) (err error) {
messages := an.toMessages(msgs)
if len(messages) == 0 {
close(channel)
// No messages to send after normalization, consider this a non-error condition for streaming.
return
}
ctx := context.Background()
stream := an.client.Messages.NewStreaming(ctx, anthropic.MessageNewParams{
Model: anthropic.F(opts.Model),
MaxTokens: anthropic.F(int64(an.maxTokens)),
TopP: anthropic.F(opts.TopP),
Temperature: anthropic.F(opts.Temperature),
Messages: anthropic.F(messages),
})
stream := an.client.Messages.NewStreaming(ctx, an.buildMessageParams(messages, opts))
for stream.Next() {
event := stream.Current()
switch delta := event.Delta.(type) {
case anthropic.ContentBlockDeltaEventDelta:
if delta.Text != "" {
channel <- delta.Text
}
// directly send any non-empty delta text
if event.Delta.Text != "" {
channel <- event.Delta.Text
}
}
@@ -112,35 +176,172 @@ func (an *Client) SendStream(
return
}
func (an *Client) Send(ctx context.Context, msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions) (ret string, err error) {
messages := an.toMessages(msgs)
func (an *Client) buildMessageParams(msgs []anthropic.MessageParam, opts *common.ChatOptions) (
params anthropic.MessageNewParams) {
var message *anthropic.Message
if message, err = an.client.Messages.New(ctx, anthropic.MessageNewParams{
Model: anthropic.F(opts.Model),
MaxTokens: anthropic.F(int64(an.maxTokens)),
TopP: anthropic.F(opts.TopP),
Temperature: anthropic.F(opts.Temperature),
Messages: anthropic.F(messages),
}); err != nil {
params = anthropic.MessageNewParams{
Model: anthropic.Model(opts.Model),
MaxTokens: int64(an.maxTokens),
TopP: anthropic.Opt(opts.TopP),
Temperature: anthropic.Opt(opts.Temperature),
Messages: msgs,
}
// Add Claude Code spoofing system message for OAuth authentication
if plugins.ParseBoolElseFalse(an.UseOAuth.Value) {
params.System = []anthropic.TextBlockParam{
{
Type: "text",
Text: "You are Claude Code, Anthropic's official CLI for Claude.",
},
}
}
if opts.Search {
// Build the web-search tool definition:
webTool := anthropic.WebSearchTool20250305Param{
Name: webSearchToolName,
Type: webSearchToolType,
CacheControl: anthropic.NewCacheControlEphemeralParam(),
}
if opts.SearchLocation != "" {
webTool.UserLocation.Type = "approximate"
webTool.UserLocation.Timezone = anthropic.Opt(opts.SearchLocation)
}
// Wrap it in the union:
params.Tools = []anthropic.ToolUnionParam{
{OfWebSearchTool20250305: &webTool},
}
}
return
}
func (an *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions) (
ret string, err error) {
messages := an.toMessages(msgs)
if len(messages) == 0 {
// No messages to send after normalization, return empty string and no error.
return
}
ret = message.Content[0].Text
return
}
func (an *Client) toMessages(msgs []*goopenai.ChatCompletionMessage) (ret []anthropic.MessageParam) {
normalizedMessages := common.NormalizeMessages(msgs, an.defaultRequiredUserMessage)
for _, msg := range normalizedMessages {
var message anthropic.MessageParam
switch msg.Role {
case goopenai.ChatMessageRoleUser:
message = anthropic.NewUserMessage(anthropic.NewTextBlock(msg.Content))
default:
message = anthropic.NewAssistantMessage(anthropic.NewTextBlock(msg.Content))
}
ret = append(ret, message)
var message *anthropic.Message
if message, err = an.client.Messages.New(ctx, an.buildMessageParams(messages, opts)); err != nil {
return
}
var textParts []string
var citations []string
citationMap := make(map[string]bool) // To avoid duplicate citations
for _, block := range message.Content {
if block.Type == "text" && block.Text != "" {
textParts = append(textParts, block.Text)
// Extract citations from this text block
for _, citation := range block.Citations {
if citation.Type == "web_search_result_location" {
citationKey := citation.URL + "|" + citation.Title
if !citationMap[citationKey] {
citationMap[citationKey] = true
citationText := fmt.Sprintf("- [%s](%s)", citation.Title, citation.URL)
if citation.CitedText != "" {
citationText += fmt.Sprintf(" - \"%s\"", citation.CitedText)
}
citations = append(citations, citationText)
}
}
}
}
}
var resultBuilder strings.Builder
resultBuilder.WriteString(strings.Join(textParts, ""))
// Append citations if any were found
if len(citations) > 0 {
resultBuilder.WriteString("\n\n")
resultBuilder.WriteString(sourcesHeader)
resultBuilder.WriteString("\n\n")
resultBuilder.WriteString(strings.Join(citations, "\n"))
}
ret = resultBuilder.String()
return
}
func (an *Client) toMessages(msgs []*chat.ChatCompletionMessage) (ret []anthropic.MessageParam) {
// Custom normalization for Anthropic:
// - System messages become the first part of the first user message.
// - Messages must alternate user/assistant.
// - Skip empty messages.
var anthropicMessages []anthropic.MessageParam
var systemContent string
// Note: Claude Code spoofing is now handled in buildMessageParams
isFirstUserMessage := true
lastRoleWasUser := false
for _, msg := range msgs {
if msg.Content == "" {
continue // Skip empty messages
}
switch msg.Role {
case chat.ChatMessageRoleSystem:
// Accumulate system content. It will be prepended to the first user message.
if systemContent != "" {
systemContent += "\\n" + msg.Content
} else {
systemContent = msg.Content
}
case chat.ChatMessageRoleUser:
userContent := msg.Content
if isFirstUserMessage && systemContent != "" {
userContent = systemContent + "\\n\\n" + userContent
isFirstUserMessage = false // System content now consumed
}
if lastRoleWasUser {
// Enforce alternation: add a minimal assistant message if two user messages are consecutive.
// This shouldn't happen with current chatter.go logic but is a safeguard.
anthropicMessages = append(anthropicMessages, anthropic.NewAssistantMessage(anthropic.NewTextBlock("Okay.")))
}
anthropicMessages = append(anthropicMessages, anthropic.NewUserMessage(anthropic.NewTextBlock(userContent)))
lastRoleWasUser = true
case chat.ChatMessageRoleAssistant:
// If the first message is an assistant message, and we have system content,
// prepend a user message with the system content.
if isFirstUserMessage && systemContent != "" {
anthropicMessages = append(anthropicMessages, anthropic.NewUserMessage(anthropic.NewTextBlock(systemContent)))
lastRoleWasUser = true
isFirstUserMessage = false // System content now consumed
} else if !lastRoleWasUser && len(anthropicMessages) > 0 {
// Enforce alternation: add a minimal user message if two assistant messages are consecutive
// or if an assistant message is first without prior system prompt handling.
anthropicMessages = append(anthropicMessages, anthropic.NewUserMessage(anthropic.NewTextBlock(an.defaultRequiredUserMessage)))
lastRoleWasUser = true
}
anthropicMessages = append(anthropicMessages, anthropic.NewAssistantMessage(anthropic.NewTextBlock(msg.Content)))
lastRoleWasUser = false
default:
// Other roles (like 'meta') are ignored for Anthropic's message structure.
continue
}
}
// If only system content was provided, create a user message with it.
if len(anthropicMessages) == 0 && systemContent != "" {
anthropicMessages = append(anthropicMessages, anthropic.NewUserMessage(anthropic.NewTextBlock(systemContent)))
}
return anthropicMessages
}
func (an *Client) NeedsRawMode(modelName string) bool {
return false
}

View File

@@ -1,7 +1,11 @@
package anthropic
import (
"strings"
"testing"
"github.com/anthropics/anthropic-sdk-go"
"github.com/danielmiessler/fabric/common"
)
// Test generated using Keploy
@@ -63,3 +67,192 @@ func TestClient_ListModels_ReturnsCorrectModels(t *testing.T) {
}
}
}
func TestBuildMessageParams_WithoutSearch(t *testing.T) {
client := NewClient()
opts := &common.ChatOptions{
Model: "claude-3-5-sonnet-latest",
Temperature: 0.7,
Search: false,
}
messages := []anthropic.MessageParam{
anthropic.NewUserMessage(anthropic.NewTextBlock("Hello")),
}
params := client.buildMessageParams(messages, opts)
if params.Tools != nil {
t.Error("Expected no tools when search is disabled, got tools")
}
if params.Model != anthropic.Model(opts.Model) {
t.Errorf("Expected model %s, got %s", opts.Model, params.Model)
}
if params.Temperature.Value != opts.Temperature {
t.Errorf("Expected temperature %f, got %f", opts.Temperature, params.Temperature.Value)
}
}
func TestBuildMessageParams_WithSearch(t *testing.T) {
client := NewClient()
opts := &common.ChatOptions{
Model: "claude-3-5-sonnet-latest",
Temperature: 0.7,
Search: true,
}
messages := []anthropic.MessageParam{
anthropic.NewUserMessage(anthropic.NewTextBlock("What's the weather today?")),
}
params := client.buildMessageParams(messages, opts)
if params.Tools == nil {
t.Fatal("Expected tools when search is enabled, got nil")
}
if len(params.Tools) != 1 {
t.Errorf("Expected 1 tool, got %d", len(params.Tools))
}
webTool := params.Tools[0].OfWebSearchTool20250305
if webTool == nil {
t.Fatal("Expected web search tool, got nil")
}
if webTool.Name != "web_search" {
t.Errorf("Expected tool name 'web_search', got %s", webTool.Name)
}
if webTool.Type != "web_search_20250305" {
t.Errorf("Expected tool type 'web_search_20250305', got %s", webTool.Type)
}
}
func TestBuildMessageParams_WithSearchAndLocation(t *testing.T) {
client := NewClient()
opts := &common.ChatOptions{
Model: "claude-3-5-sonnet-latest",
Temperature: 0.7,
Search: true,
SearchLocation: "America/Los_Angeles",
}
messages := []anthropic.MessageParam{
anthropic.NewUserMessage(anthropic.NewTextBlock("What's the weather in San Francisco?")),
}
params := client.buildMessageParams(messages, opts)
if params.Tools == nil {
t.Fatal("Expected tools when search is enabled, got nil")
}
webTool := params.Tools[0].OfWebSearchTool20250305
if webTool == nil {
t.Fatal("Expected web search tool, got nil")
}
if webTool.UserLocation.Type != "approximate" {
t.Errorf("Expected location type 'approximate', got %s", webTool.UserLocation.Type)
}
if webTool.UserLocation.Timezone.Value != opts.SearchLocation {
t.Errorf("Expected timezone %s, got %s", opts.SearchLocation, webTool.UserLocation.Timezone.Value)
}
}
func TestCitationFormatting(t *testing.T) {
// Test the citation formatting logic by creating a mock message with citations
message := &anthropic.Message{
Content: []anthropic.ContentBlockUnion{
{
Type: "text",
Text: "Based on recent research, artificial intelligence is advancing rapidly.",
Citations: []anthropic.TextCitationUnion{
{
Type: "web_search_result_location",
URL: "https://example.com/ai-research",
Title: "AI Research Advances 2025",
CitedText: "artificial intelligence is advancing rapidly",
},
{
Type: "web_search_result_location",
URL: "https://another-source.com/tech-news",
Title: "Technology News Today",
CitedText: "recent developments in AI",
},
},
},
{
Type: "text",
Text: " Machine learning models are becoming more sophisticated.",
Citations: []anthropic.TextCitationUnion{
{
Type: "web_search_result_location",
URL: "https://example.com/ai-research", // Duplicate URL should be deduplicated
Title: "AI Research Advances 2025",
CitedText: "machine learning models",
},
},
},
},
}
// Extract text and citations using the same logic as the Send method
var textParts []string
var citations []string
citationMap := make(map[string]bool)
for _, block := range message.Content {
if block.Type == "text" && block.Text != "" {
textParts = append(textParts, block.Text)
for _, citation := range block.Citations {
if citation.Type == "web_search_result_location" {
citationKey := citation.URL + "|" + citation.Title
if !citationMap[citationKey] {
citationMap[citationKey] = true
citationText := "- [" + citation.Title + "](" + citation.URL + ")"
if citation.CitedText != "" {
citationText += " - \"" + citation.CitedText + "\""
}
citations = append(citations, citationText)
}
}
}
}
}
result := strings.Join(textParts, "")
if len(citations) > 0 {
result += "\n\n## Sources\n\n" + strings.Join(citations, "\n")
}
// Verify the result contains the expected text
expectedText := "Based on recent research, artificial intelligence is advancing rapidly. Machine learning models are becoming more sophisticated."
if !strings.Contains(result, expectedText) {
t.Errorf("Expected result to contain text: %s", expectedText)
}
// Verify citations are included
if !strings.Contains(result, "## Sources") {
t.Error("Expected result to contain Sources section")
}
if !strings.Contains(result, "[AI Research Advances 2025](https://example.com/ai-research)") {
t.Error("Expected result to contain first citation")
}
if !strings.Contains(result, "[Technology News Today](https://another-source.com/tech-news)") {
t.Error("Expected result to contain second citation")
}
// Verify deduplication - should only have 2 unique citations, not 3
citationCount := strings.Count(result, "- [")
if citationCount != 2 {
t.Errorf("Expected 2 unique citations, got %d", citationCount)
}
}

View File

@@ -0,0 +1,300 @@
package anthropic
import (
"bytes"
"crypto/rand"
"crypto/sha256"
"encoding/base64"
"encoding/json"
"fmt"
"io"
"net/http"
"os/exec"
"strings"
"time"
"github.com/danielmiessler/fabric/common"
"golang.org/x/oauth2"
)
// OAuth configuration constants
const (
oauthClientID = "9d1c250a-e61b-44d9-88ed-5944d1962f5e"
oauthAuthURL = "https://claude.ai/oauth/authorize"
oauthTokenURL = "https://console.anthropic.com/v1/oauth/token"
oauthRedirectURL = "https://console.anthropic.com/oauth/code/callback"
)
// OAuthTransport is a custom HTTP transport that adds OAuth Bearer token and beta header
type OAuthTransport struct {
client *Client
base http.RoundTripper
}
// RoundTrip implements the http.RoundTripper interface
func (t *OAuthTransport) RoundTrip(req *http.Request) (*http.Response, error) {
// Clone the request to avoid modifying the original
newReq := req.Clone(req.Context())
// Get current token (may refresh if needed)
token, err := t.getValidToken()
if err != nil {
return nil, fmt.Errorf("failed to get valid OAuth token: %w", err)
}
// Add OAuth Bearer token
newReq.Header.Set("Authorization", "Bearer "+token)
// Add the anthropic-beta header for OAuth
newReq.Header.Set("anthropic-beta", "oauth-2025-04-20")
// Set User-Agent to match AI SDK exactly
newReq.Header.Set("User-Agent", "ai-sdk/anthropic")
// Remove x-api-key header if present (OAuth doesn't use it)
newReq.Header.Del("x-api-key")
return t.base.RoundTrip(newReq)
}
// getValidToken returns a valid access token, refreshing if necessary
func (t *OAuthTransport) getValidToken() (string, error) {
storage, err := common.NewOAuthStorage()
if err != nil {
return "", fmt.Errorf("failed to create OAuth storage: %w", err)
}
// Load stored token
token, err := storage.LoadToken("claude")
if err != nil {
return "", fmt.Errorf("failed to load stored token: %w", err)
}
// If no token exists, run OAuth flow
if token == nil {
fmt.Println("No OAuth token found, initiating authentication...")
newAccessToken, err := RunOAuthFlow()
if err != nil {
return "", fmt.Errorf("failed to authenticate: %w", err)
}
return newAccessToken, nil
}
// Check if token needs refresh (5 minute buffer)
if token.IsExpired(5) {
fmt.Println("OAuth token expired, refreshing...")
newAccessToken, err := RefreshToken()
if err != nil {
// If refresh fails, try re-authentication
fmt.Println("Token refresh failed, re-authenticating...")
newAccessToken, err = RunOAuthFlow()
if err != nil {
return "", fmt.Errorf("failed to refresh or re-authenticate: %w", err)
}
}
return newAccessToken, nil
}
return token.AccessToken, nil
}
// NewOAuthTransport creates a new OAuth transport for the given client
func NewOAuthTransport(client *Client, base http.RoundTripper) *OAuthTransport {
return &OAuthTransport{
client: client,
base: base,
}
}
// generatePKCE generates PKCE code verifier and challenge
func generatePKCE() (verifier, challenge string, err error) {
b := make([]byte, 32)
if _, err = rand.Read(b); err != nil {
return
}
verifier = base64.RawURLEncoding.EncodeToString(b)
sum := sha256.Sum256([]byte(verifier))
challenge = base64.RawURLEncoding.EncodeToString(sum[:])
return
}
// openBrowser attempts to open the given URL in the default browser
func openBrowser(url string) {
commands := [][]string{{"xdg-open", url}, {"open", url}, {"cmd", "/c", "start", url}}
for _, cmd := range commands {
if exec.Command(cmd[0], cmd[1:]...).Start() == nil {
return
}
}
}
// RunOAuthFlow executes the complete OAuth authorization flow
func RunOAuthFlow() (token string, err error) {
verifier, challenge, err := generatePKCE()
if err != nil {
return
}
cfg := oauth2.Config{
ClientID: oauthClientID,
Endpoint: oauth2.Endpoint{AuthURL: oauthAuthURL, TokenURL: oauthTokenURL},
RedirectURL: oauthRedirectURL,
Scopes: []string{"org:create_api_key", "user:profile", "user:inference"},
}
authURL := cfg.AuthCodeURL(verifier,
oauth2.SetAuthURLParam("code_challenge", challenge),
oauth2.SetAuthURLParam("code_challenge_method", "S256"),
oauth2.SetAuthURLParam("code", "true"),
oauth2.SetAuthURLParam("state", verifier),
)
fmt.Println("Open the following URL in your browser. Fabric would like to authorize:")
fmt.Println(authURL)
openBrowser(authURL)
fmt.Print("Paste the authorization code here: ")
var code string
fmt.Scanln(&code)
parts := strings.SplitN(code, "#", 2)
state := verifier
if len(parts) == 2 {
state = parts[1]
}
// Manual token exchange to match opencode implementation
tokenReq := map[string]string{
"code": parts[0],
"state": state,
"grant_type": "authorization_code",
"client_id": oauthClientID,
"redirect_uri": oauthRedirectURL,
"code_verifier": verifier,
}
token, err = exchangeToken(tokenReq)
return
}
// exchangeToken exchanges authorization code for access token
func exchangeToken(params map[string]string) (token string, err error) {
reqBody, err := json.Marshal(params)
if err != nil {
return
}
resp, err := http.Post(oauthTokenURL, "application/json", bytes.NewBuffer(reqBody))
if err != nil {
return
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
err = fmt.Errorf("token exchange failed: %s - %s", resp.Status, string(body))
return
}
var result struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
TokenType string `json:"token_type"`
Scope string `json:"scope"`
}
if err = json.NewDecoder(resp.Body).Decode(&result); err != nil {
return
}
// Save the complete token information
storage, err := common.NewOAuthStorage()
if err != nil {
return result.AccessToken, fmt.Errorf("failed to create OAuth storage: %w", err)
}
oauthToken := &common.OAuthToken{
AccessToken: result.AccessToken,
RefreshToken: result.RefreshToken,
ExpiresAt: time.Now().Unix() + int64(result.ExpiresIn),
TokenType: result.TokenType,
Scope: result.Scope,
}
if err = storage.SaveToken("claude", oauthToken); err != nil {
return result.AccessToken, fmt.Errorf("failed to save OAuth token: %w", err)
}
token = result.AccessToken
return
}
// RefreshToken refreshes an expired OAuth token using the refresh token
func RefreshToken() (string, error) {
storage, err := common.NewOAuthStorage()
if err != nil {
return "", fmt.Errorf("failed to create OAuth storage: %w", err)
}
// Load existing token
token, err := storage.LoadToken("claude")
if err != nil {
return "", fmt.Errorf("failed to load stored token: %w", err)
}
if token == nil || token.RefreshToken == "" {
return "", fmt.Errorf("no refresh token available")
}
// Prepare refresh request
refreshReq := map[string]string{
"grant_type": "refresh_token",
"refresh_token": token.RefreshToken,
"client_id": oauthClientID,
}
reqBody, err := json.Marshal(refreshReq)
if err != nil {
return "", fmt.Errorf("failed to marshal refresh request: %w", err)
}
// Make refresh request
resp, err := http.Post(oauthTokenURL, "application/json", bytes.NewBuffer(reqBody))
if err != nil {
return "", fmt.Errorf("refresh request failed: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
body, _ := io.ReadAll(resp.Body)
return "", fmt.Errorf("token refresh failed: %s - %s", resp.Status, string(body))
}
var result struct {
AccessToken string `json:"access_token"`
RefreshToken string `json:"refresh_token"`
ExpiresIn int `json:"expires_in"`
TokenType string `json:"token_type"`
Scope string `json:"scope"`
}
if err = json.NewDecoder(resp.Body).Decode(&result); err != nil {
return "", fmt.Errorf("failed to parse refresh response: %w", err)
}
// Update stored token
newToken := &common.OAuthToken{
AccessToken: result.AccessToken,
RefreshToken: result.RefreshToken,
ExpiresAt: time.Now().Unix() + int64(result.ExpiresIn),
TokenType: result.TokenType,
Scope: result.Scope,
}
// Use existing refresh token if new one not provided
if newToken.RefreshToken == "" {
newToken.RefreshToken = token.RefreshToken
}
if err = storage.SaveToken("claude", newToken); err != nil {
return "", fmt.Errorf("failed to save refreshed token: %w", err)
}
return result.AccessToken, nil
}

View File

@@ -5,7 +5,8 @@ import (
"github.com/danielmiessler/fabric/plugins"
"github.com/danielmiessler/fabric/plugins/ai/openai"
goopenai "github.com/sashabaranov/go-openai"
openaiapi "github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func NewClient() (ret *Client) {
@@ -29,11 +30,15 @@ type Client struct {
func (oi *Client) configure() (err error) {
oi.apiDeployments = strings.Split(oi.ApiDeployments.Value, ",")
config := goopenai.DefaultAzureConfig(oi.ApiKey.Value, oi.ApiBaseURL.Value)
if oi.ApiVersion.Value != "" {
config.APIVersion = oi.ApiVersion.Value
opts := []option.RequestOption{option.WithAPIKey(oi.ApiKey.Value)}
if oi.ApiBaseURL.Value != "" {
opts = append(opts, option.WithBaseURL(oi.ApiBaseURL.Value))
}
oi.ApiClient = goopenai.NewClientWithConfig(config)
if oi.ApiVersion.Value != "" {
opts = append(opts, option.WithQuery("api-version", oi.ApiVersion.Value))
}
client := openaiapi.NewClient(opts...)
oi.ApiClient = &client
return
}
@@ -41,3 +46,7 @@ func (oi *Client) ListModels() (ret []string, err error) {
ret = oi.apiDeployments
return
}
func (oi *Client) NeedsRawMode(modelName string) bool {
return false
}

View File

@@ -0,0 +1,274 @@
// Package bedrock provides a plugin to use Amazon Bedrock models.
// Supported models are defined in the MODELS variable.
// To add additional models, append them to the MODELS array. Models must support the Converse and ConverseStream operations
// Authentication uses the AWS credential provider chain, similar.to the AWS CLI and SDKs
// https://docs.aws.amazon.com/sdkref/latest/guide/standardized-credentials.html
package bedrock
import (
"context"
"fmt"
"github.com/danielmiessler/fabric/common"
"github.com/danielmiessler/fabric/plugins"
"github.com/danielmiessler/fabric/plugins/ai"
"github.com/aws/aws-sdk-go-v2/aws"
"github.com/aws/aws-sdk-go-v2/aws/middleware"
"github.com/aws/aws-sdk-go-v2/config"
"github.com/aws/aws-sdk-go-v2/service/bedrock"
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime"
"github.com/aws/aws-sdk-go-v2/service/bedrockruntime/types"
"github.com/danielmiessler/fabric/chat"
)
const (
userAgentKey = "aiosc"
userAgentValue = "fabric"
)
// Ensure BedrockClient implements the ai.Vendor interface
var _ ai.Vendor = (*BedrockClient)(nil)
// BedrockClient is a plugin to add support for Amazon Bedrock.
// It implements the plugins.Plugin interface and provides methods
// for interacting with AWS Bedrock's Converse and ConverseStream APIs.
type BedrockClient struct {
*plugins.PluginBase
runtimeClient *bedrockruntime.Client
controlPlaneClient *bedrock.Client
bedrockRegion *plugins.SetupQuestion
}
// NewClient returns a new Bedrock plugin client
func NewClient() (ret *BedrockClient) {
vendorName := "Bedrock"
ret = &BedrockClient{}
ctx := context.Background()
cfg, err := config.LoadDefaultConfig(ctx)
if err != nil {
// Create a minimal client that will fail gracefully during configuration
ret.PluginBase = &plugins.PluginBase{
Name: vendorName,
EnvNamePrefix: plugins.BuildEnvVariablePrefix(vendorName),
ConfigureCustom: func() error {
return fmt.Errorf("unable to load AWS Config: %w", err)
},
}
ret.bedrockRegion = ret.PluginBase.AddSetupQuestion("AWS Region", true)
return
}
cfg.APIOptions = append(cfg.APIOptions, middleware.AddUserAgentKeyValue(userAgentKey, userAgentValue))
runtimeClient := bedrockruntime.NewFromConfig(cfg)
controlPlaneClient := bedrock.NewFromConfig(cfg)
ret.PluginBase = &plugins.PluginBase{
Name: vendorName,
EnvNamePrefix: plugins.BuildEnvVariablePrefix(vendorName),
ConfigureCustom: ret.configure,
}
ret.runtimeClient = runtimeClient
ret.controlPlaneClient = controlPlaneClient
ret.bedrockRegion = ret.PluginBase.AddSetupQuestion("AWS Region", true)
if cfg.Region != "" {
ret.bedrockRegion.Value = cfg.Region
}
return
}
// isValidAWSRegion validates AWS region format
func isValidAWSRegion(region string) bool {
// Simple validation - AWS regions are typically 2-3 parts separated by hyphens
// Examples: us-east-1, eu-west-1, ap-southeast-2
if len(region) < 5 || len(region) > 30 {
return false
}
// Basic pattern check for AWS region format
return region != ""
}
// configure initializes the Bedrock clients with the specified AWS region.
// If no region is specified, the default region from AWS config is used.
func (c *BedrockClient) configure() error {
if c.bedrockRegion.Value == "" {
return nil // Use default region from AWS config
}
// Validate region format
if !isValidAWSRegion(c.bedrockRegion.Value) {
return fmt.Errorf("invalid AWS region: %s", c.bedrockRegion.Value)
}
ctx := context.Background()
cfg, err := config.LoadDefaultConfig(ctx, config.WithRegion(c.bedrockRegion.Value))
if err != nil {
return fmt.Errorf("unable to load AWS Config with region %s: %w", c.bedrockRegion.Value, err)
}
cfg.APIOptions = append(cfg.APIOptions, middleware.AddUserAgentKeyValue(userAgentKey, userAgentValue))
c.runtimeClient = bedrockruntime.NewFromConfig(cfg)
c.controlPlaneClient = bedrock.NewFromConfig(cfg)
return nil
}
// ListModels retrieves all available foundation models and inference profiles
// from AWS Bedrock that can be used with this plugin.
func (c *BedrockClient) ListModels() ([]string, error) {
models := []string{}
ctx := context.Background()
foundationModels, err := c.controlPlaneClient.ListFoundationModels(ctx, &bedrock.ListFoundationModelsInput{})
if err != nil {
return nil, fmt.Errorf("failed to list foundation models: %w", err)
}
for _, model := range foundationModels.ModelSummaries {
models = append(models, *model.ModelId)
}
inferenceProfilesPaginator := bedrock.NewListInferenceProfilesPaginator(c.controlPlaneClient, &bedrock.ListInferenceProfilesInput{})
for inferenceProfilesPaginator.HasMorePages() {
inferenceProfiles, err := inferenceProfilesPaginator.NextPage(ctx)
if err != nil {
return nil, fmt.Errorf("failed to list inference profiles: %w", err)
}
for _, profile := range inferenceProfiles.InferenceProfileSummaries {
models = append(models, *profile.InferenceProfileId)
}
}
return models, nil
}
// SendStream sends the messages to the the Bedrock ConverseStream API
func (c *BedrockClient) SendStream(msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions, channel chan string) (err error) {
// Ensure channel is closed on all exit paths to prevent goroutine leaks
defer func() {
if r := recover(); r != nil {
err = fmt.Errorf("panic in SendStream: %v", r)
}
close(channel)
}()
messages := c.toMessages(msgs)
var converseInput = bedrockruntime.ConverseStreamInput{
ModelId: aws.String(opts.Model),
Messages: messages,
InferenceConfig: &types.InferenceConfiguration{
Temperature: aws.Float32(float32(opts.Temperature)),
TopP: aws.Float32(float32(opts.TopP))},
}
response, err := c.runtimeClient.ConverseStream(context.Background(), &converseInput)
if err != nil {
return fmt.Errorf("bedrock conversestream failed for model %s: %w", opts.Model, err)
}
for event := range response.GetStream().Events() {
// Possible ConverseStream event types
// https://docs.aws.amazon.com/bedrock/latest/userguide/conversation-inference-call.html#conversation-inference-call-response-converse-stream
switch v := event.(type) {
case *types.ConverseStreamOutputMemberContentBlockDelta:
text, ok := v.Value.Delta.(*types.ContentBlockDeltaMemberText)
if ok {
channel <- text.Value
}
case *types.ConverseStreamOutputMemberMessageStop:
channel <- "\n"
return nil // Let defer handle the close
// Unused Events
case *types.ConverseStreamOutputMemberMessageStart,
*types.ConverseStreamOutputMemberContentBlockStart,
*types.ConverseStreamOutputMemberContentBlockStop,
*types.ConverseStreamOutputMemberMetadata:
default:
return fmt.Errorf("unknown stream event type: %T", v)
}
}
return nil
}
// Send sends the messages the Bedrock Converse API
func (c *BedrockClient) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions) (ret string, err error) {
messages := c.toMessages(msgs)
var converseInput = bedrockruntime.ConverseInput{
ModelId: aws.String(opts.Model),
Messages: messages,
}
response, err := c.runtimeClient.Converse(ctx, &converseInput)
if err != nil {
return "", fmt.Errorf("bedrock converse failed for model %s: %w", opts.Model, err)
}
responseText, ok := response.Output.(*types.ConverseOutputMemberMessage)
if !ok {
return "", fmt.Errorf("unexpected response type: %T", response.Output)
}
if len(responseText.Value.Content) == 0 {
return "", fmt.Errorf("empty response content")
}
responseContentBlock := responseText.Value.Content[0]
text, ok := responseContentBlock.(*types.ContentBlockMemberText)
if !ok {
return "", fmt.Errorf("unexpected content block type: %T", responseContentBlock)
}
return text.Value, nil
}
// NeedsRawMode indicates whether the model requires raw mode processing.
// Bedrock models do not require raw mode.
func (c *BedrockClient) NeedsRawMode(modelName string) bool {
return false
}
// toMessages converts the array of input messages from the ChatCompletionMessageType to the
// Bedrock Converse Message type.
// The system role messages are mapped to the user role as they contain a mix of system messages,
// pattern content and user input.
func (c *BedrockClient) toMessages(inputMessages []*chat.ChatCompletionMessage) (messages []types.Message) {
for _, msg := range inputMessages {
roles := map[string]types.ConversationRole{
chat.ChatMessageRoleUser: types.ConversationRoleUser,
chat.ChatMessageRoleAssistant: types.ConversationRoleAssistant,
chat.ChatMessageRoleSystem: types.ConversationRoleUser,
}
role, ok := roles[msg.Role]
if !ok {
continue
}
message := types.Message{
Role: role,
Content: []types.ContentBlock{&types.ContentBlockMemberText{Value: msg.Content}},
}
messages = append(messages, message)
}
return
}

View File

@@ -4,8 +4,9 @@ import (
"bytes"
"context"
"fmt"
"strings"
goopenai "github.com/sashabaranov/go-openai"
"github.com/danielmiessler/fabric/chat"
"github.com/danielmiessler/fabric/common"
"github.com/danielmiessler/fabric/plugins"
@@ -23,62 +24,86 @@ func (c *Client) ListModels() ([]string, error) {
return []string{"dry-run-model"}, nil
}
func (c *Client) SendStream(msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions, channel chan string) error {
output := "Dry run: Would send the following request:\n\n"
func (c *Client) formatMultiContentMessage(msg *chat.ChatCompletionMessage) string {
var builder strings.Builder
if len(msg.MultiContent) > 0 {
builder.WriteString(fmt.Sprintf("%s:\n", msg.Role))
for _, part := range msg.MultiContent {
builder.WriteString(fmt.Sprintf(" - Type: %s\n", part.Type))
if part.Type == chat.ChatMessagePartTypeImageURL {
builder.WriteString(fmt.Sprintf(" Image URL: %s\n", part.ImageURL.URL))
} else {
builder.WriteString(fmt.Sprintf(" Text: %s\n", part.Text))
}
}
builder.WriteString("\n")
} else {
builder.WriteString(fmt.Sprintf("%s:\n%s\n\n", msg.Role, msg.Content))
}
return builder.String()
}
func (c *Client) formatMessages(msgs []*chat.ChatCompletionMessage) string {
var builder strings.Builder
for _, msg := range msgs {
switch msg.Role {
case goopenai.ChatMessageRoleSystem:
output += fmt.Sprintf("System:\n%s\n\n", msg.Content)
case goopenai.ChatMessageRoleAssistant:
output += fmt.Sprintf("Assistant:\n%s\n\n", msg.Content)
case goopenai.ChatMessageRoleUser:
output += fmt.Sprintf("User:\n%s\n\n", msg.Content)
case chat.ChatMessageRoleSystem:
builder.WriteString(fmt.Sprintf("System:\n%s\n\n", msg.Content))
case chat.ChatMessageRoleAssistant:
builder.WriteString(c.formatMultiContentMessage(msg))
case chat.ChatMessageRoleUser:
builder.WriteString(c.formatMultiContentMessage(msg))
default:
output += fmt.Sprintf("%s:\n%s\n\n", msg.Role, msg.Content)
builder.WriteString(fmt.Sprintf("%s:\n%s\n\n", msg.Role, msg.Content))
}
}
output += "Options:\n"
output += fmt.Sprintf("Model: %s\n", opts.Model)
output += fmt.Sprintf("Temperature: %f\n", opts.Temperature)
output += fmt.Sprintf("TopP: %f\n", opts.TopP)
output += fmt.Sprintf("PresencePenalty: %f\n", opts.PresencePenalty)
output += fmt.Sprintf("FrequencyPenalty: %f\n", opts.FrequencyPenalty)
return builder.String()
}
func (c *Client) formatOptions(opts *common.ChatOptions) string {
var builder strings.Builder
builder.WriteString("Options:\n")
builder.WriteString(fmt.Sprintf("Model: %s\n", opts.Model))
builder.WriteString(fmt.Sprintf("Temperature: %f\n", opts.Temperature))
builder.WriteString(fmt.Sprintf("TopP: %f\n", opts.TopP))
builder.WriteString(fmt.Sprintf("PresencePenalty: %f\n", opts.PresencePenalty))
builder.WriteString(fmt.Sprintf("FrequencyPenalty: %f\n", opts.FrequencyPenalty))
if opts.ModelContextLength != 0 {
output += fmt.Sprintf("ModelContextLength: %d\n", opts.ModelContextLength)
builder.WriteString(fmt.Sprintf("ModelContextLength: %d\n", opts.ModelContextLength))
}
if opts.Search {
builder.WriteString("Search: enabled\n")
if opts.SearchLocation != "" {
builder.WriteString(fmt.Sprintf("SearchLocation: %s\n", opts.SearchLocation))
}
}
if opts.ImageFile != "" {
builder.WriteString(fmt.Sprintf("ImageFile: %s\n", opts.ImageFile))
}
channel <- output
return builder.String()
}
func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions, channel chan string) error {
var builder strings.Builder
builder.WriteString("Dry run: Would send the following request:\n\n")
builder.WriteString(c.formatMessages(msgs))
builder.WriteString(c.formatOptions(opts))
channel <- builder.String()
close(channel)
return nil
}
func (c *Client) Send(_ context.Context, msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions) (string, error) {
func (c *Client) Send(_ context.Context, msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions) (string, error) {
fmt.Println("Dry run: Would send the following request:")
for _, msg := range msgs {
switch msg.Role {
case goopenai.ChatMessageRoleSystem:
fmt.Printf("System:\n%s\n\n", msg.Content)
case goopenai.ChatMessageRoleAssistant:
fmt.Printf("Assistant:\n%s\n\n", msg.Content)
case goopenai.ChatMessageRoleUser:
fmt.Printf("User:\n%s\n\n", msg.Content)
default:
fmt.Printf("%s:\n%s\n\n", msg.Role, msg.Content)
}
}
fmt.Println("Options:")
fmt.Printf("Model: %s\n", opts.Model)
fmt.Printf("Temperature: %f\n", opts.Temperature)
fmt.Printf("TopP: %f\n", opts.TopP)
fmt.Printf("PresencePenalty: %f\n", opts.PresencePenalty)
fmt.Printf("FrequencyPenalty: %f\n", opts.FrequencyPenalty)
if opts.ModelContextLength != 0 {
fmt.Printf("ModelContextLength: %d\n", opts.ModelContextLength)
}
fmt.Print(c.formatMessages(msgs))
fmt.Print(c.formatOptions(opts))
return "", nil
}
@@ -90,3 +115,7 @@ func (c *Client) Setup() error {
func (c *Client) SetupFillEnvFileContent(_ *bytes.Buffer) {
// No environment variables needed for dry run
}
func (c *Client) NeedsRawMode(modelName string) bool {
return false
}

View File

@@ -4,8 +4,8 @@ import (
"reflect"
"testing"
"github.com/danielmiessler/fabric/chat"
"github.com/danielmiessler/fabric/common"
"github.com/sashabaranov/go-openai"
)
// Test generated using Keploy
@@ -33,7 +33,7 @@ func TestSetup_ReturnsNil(t *testing.T) {
// Test generated using Keploy
func TestSendStream_SendsMessages(t *testing.T) {
client := NewClient()
msgs := []*openai.ChatCompletionMessage{
msgs := []*chat.ChatCompletionMessage{
{Role: "user", Content: "Test message"},
}
opts := &common.ChatOptions{

View File

@@ -5,8 +5,8 @@ import (
"github.com/danielmiessler/fabric/plugins"
"github.com/danielmiessler/fabric/plugins/ai/openai"
goopenai "github.com/sashabaranov/go-openai"
openaiapi "github.com/openai/openai-go"
"github.com/openai/openai-go/option"
)
func NewClient() (ret *Client) {
@@ -32,10 +32,12 @@ type Client struct {
func (oi *Client) configure() (err error) {
oi.apiModels = strings.Split(oi.ApiModels.Value, ",")
config := goopenai.DefaultConfig("")
config.BaseURL = oi.ApiBaseURL.Value
oi.ApiClient = goopenai.NewClientWithConfig(config)
opts := []option.RequestOption{option.WithAPIKey(oi.ApiKey.Value)}
if oi.ApiBaseURL.Value != "" {
opts = append(opts, option.WithBaseURL(oi.ApiBaseURL.Value))
}
client := openaiapi.NewClient(opts...)
oi.ApiClient = &client
return
}
@@ -43,3 +45,7 @@ func (oi *Client) ListModels() (ret []string, err error) {
ret = oi.apiModels
return
}
func (oi *Client) NeedsRawMode(modelName string) bool {
return false
}

View File

@@ -6,8 +6,8 @@ import (
"fmt"
"strings"
"github.com/danielmiessler/fabric/chat"
"github.com/danielmiessler/fabric/plugins"
goopenai "github.com/sashabaranov/go-openai"
"github.com/danielmiessler/fabric/common"
"github.com/google/generative-ai-go/genai"
@@ -60,7 +60,7 @@ func (o *Client) ListModels() (ret []string, err error) {
return
}
func (o *Client) Send(ctx context.Context, msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions) (ret string, err error) {
func (o *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions) (ret string, err error) {
systemInstruction, messages := toMessages(msgs)
var client *genai.Client
@@ -91,7 +91,7 @@ func (o *Client) buildModelNameFull(modelName string) string {
return fmt.Sprintf("%v%v", modelsNamePrefix, modelName)
}
func (o *Client) SendStream(msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions, channel chan string) (err error) {
func (o *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions, channel chan string) (err error) {
ctx := context.Background()
var client *genai.Client
if client, err = genai.NewClient(ctx, option.WithAPIKey(o.ApiKey.Value)); err != nil {
@@ -143,7 +143,11 @@ func (o *Client) extractText(response *genai.GenerateContentResponse) (ret strin
return
}
func toMessages(msgs []*goopenai.ChatCompletionMessage) (systemInstruction *genai.Content, messages []genai.Part) {
func (o *Client) NeedsRawMode(modelName string) bool {
return false
}
func toMessages(msgs []*chat.ChatCompletionMessage) (systemInstruction *genai.Content, messages []genai.Part) {
if len(msgs) >= 2 {
systemInstruction = &genai.Content{
Parts: []genai.Part{

View File

@@ -9,7 +9,7 @@ import (
"io"
"net/http"
goopenai "github.com/sashabaranov/go-openai"
"github.com/danielmiessler/fabric/chat"
"github.com/danielmiessler/fabric/common"
"github.com/danielmiessler/fabric/plugins"
@@ -87,7 +87,7 @@ func (c *Client) ListModels() ([]string, error) {
return models, nil
}
func (c *Client) SendStream(msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions, channel chan string) (err error) {
func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions, channel chan string) (err error) {
url := fmt.Sprintf("%s/chat/completions", c.ApiUrl.Value)
payload := map[string]interface{}{
@@ -173,7 +173,7 @@ func (c *Client) SendStream(msgs []*goopenai.ChatCompletionMessage, opts *common
return
}
func (c *Client) Send(ctx context.Context, msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions) (content string, err error) {
func (c *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions) (content string, err error) {
url := fmt.Sprintf("%s/chat/completions", c.ApiUrl.Value)
payload := map[string]interface{}{
@@ -345,3 +345,7 @@ func (c *Client) GetEmbeddings(ctx context.Context, input string, opts *common.C
embeddings = result.Data[0].Embedding
return
}
func (c *Client) NeedsRawMode(modelName string) bool {
return false
}

View File

@@ -5,16 +5,19 @@ import (
"fmt"
"net/http"
"net/url"
"strings"
"time"
"github.com/danielmiessler/fabric/chat"
ollamaapi "github.com/ollama/ollama/api"
"github.com/samber/lo"
goopenai "github.com/sashabaranov/go-openai"
"github.com/danielmiessler/fabric/common"
"github.com/danielmiessler/fabric/plugins"
)
const defaultBaseUrl = "http://localhost:11434"
func NewClient() (ret *Client) {
vendorName := "Ollama"
ret = &Client{}
@@ -26,17 +29,36 @@ func NewClient() (ret *Client) {
}
ret.ApiUrl = ret.AddSetupQuestionCustom("API URL", true,
"Enter your Ollama URL (as a reminder, it is usually http://localhost:1234/v1')")
"Enter your Ollama URL (as a reminder, it is usually http://localhost:11434')")
ret.ApiUrl.Value = defaultBaseUrl
ret.ApiKey = ret.PluginBase.AddSetupQuestion("API key", false)
ret.ApiKey.Value = ""
ret.ApiHttpTimeout = ret.AddSetupQuestionCustom("HTTP Timeout", true,
"Specify HTTP timeout duration for Ollama requests (e.g. 30s, 5m, 1h)")
ret.ApiHttpTimeout.Value = "20m"
return
}
type Client struct {
*plugins.PluginBase
ApiUrl *plugins.SetupQuestion
ApiUrl *plugins.SetupQuestion
ApiKey *plugins.SetupQuestion
apiUrl *url.URL
client *ollamaapi.Client
ApiHttpTimeout *plugins.SetupQuestion
}
apiUrl *url.URL
client *ollamaapi.Client
type transport_sec struct {
underlyingTransport http.RoundTripper
ApiKey *plugins.SetupQuestion
}
func (t *transport_sec) RoundTrip(req *http.Request) (*http.Response, error) {
if t.ApiKey.Value != "" {
req.Header.Add("Authorization", "Bearer "+t.ApiKey.Value)
}
return t.underlyingTransport.RoundTrip(req)
}
func (o *Client) configure() (err error) {
@@ -45,7 +67,19 @@ func (o *Client) configure() (err error) {
return
}
o.client = ollamaapi.NewClient(o.apiUrl, &http.Client{Timeout: 1200000 * time.Millisecond})
timeout := 20 * time.Minute // Default timeout
if o.ApiHttpTimeout != nil {
parsed, err := time.ParseDuration(o.ApiHttpTimeout.Value)
if err == nil && o.ApiHttpTimeout.Value != "" {
timeout = parsed
} else if o.ApiHttpTimeout.Value != "" {
fmt.Printf("Invalid HTTP timeout format (%q), using default (20m): %v\n", o.ApiHttpTimeout.Value, err)
}
}
o.client = ollamaapi.NewClient(o.apiUrl, &http.Client{Timeout: timeout, Transport: &transport_sec{underlyingTransport: http.DefaultTransport, ApiKey: o.ApiKey}})
return
}
@@ -63,7 +97,7 @@ func (o *Client) ListModels() (ret []string, err error) {
return
}
func (o *Client) SendStream(msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions, channel chan string) (err error) {
func (o *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions, channel chan string) (err error) {
req := o.createChatRequest(msgs, opts)
respFunc := func(resp ollamaapi.ChatResponse) (streamErr error) {
@@ -81,7 +115,7 @@ func (o *Client) SendStream(msgs []*goopenai.ChatCompletionMessage, opts *common
return
}
func (o *Client) Send(ctx context.Context, msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions) (ret string, err error) {
func (o *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions) (ret string, err error) {
bf := false
req := o.createChatRequest(msgs, opts)
@@ -98,8 +132,8 @@ func (o *Client) Send(ctx context.Context, msgs []*goopenai.ChatCompletionMessag
return
}
func (o *Client) createChatRequest(msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions) (ret ollamaapi.ChatRequest) {
messages := lo.Map(msgs, func(message *goopenai.ChatCompletionMessage, _ int) (ret ollamaapi.Message) {
func (o *Client) createChatRequest(msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions) (ret ollamaapi.ChatRequest) {
messages := lo.Map(msgs, func(message *chat.ChatCompletionMessage, _ int) (ret ollamaapi.Message) {
return ollamaapi.Message{Role: message.Role, Content: message.Content}
})
@@ -121,3 +155,16 @@ func (o *Client) createChatRequest(msgs []*goopenai.ChatCompletionMessage, opts
}
return
}
func (o *Client) NeedsRawMode(modelName string) bool {
ollamaPrefixes := []string{
"llama3",
"llama2",
}
for _, prefix := range ollamaPrefixes {
if strings.HasPrefix(modelName, prefix) {
return true
}
}
return false
}

View File

@@ -0,0 +1,118 @@
package openai
// This file contains helper methods for the Chat Completions API.
// These methods are used as fallbacks for OpenAI-compatible providers
// that don't support the newer Responses API (e.g., Groq, Mistral, etc.).
import (
"context"
"strings"
"github.com/danielmiessler/fabric/chat"
"github.com/danielmiessler/fabric/common"
openai "github.com/openai/openai-go"
"github.com/openai/openai-go/shared"
)
// sendChatCompletions sends a request using the Chat Completions API
func (o *Client) sendChatCompletions(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions) (ret string, err error) {
req := o.buildChatCompletionParams(msgs, opts)
var resp *openai.ChatCompletion
if resp, err = o.ApiClient.Chat.Completions.New(ctx, req); err != nil {
return
}
if len(resp.Choices) > 0 {
ret = resp.Choices[0].Message.Content
}
return
}
// sendStreamChatCompletions sends a streaming request using the Chat Completions API
func (o *Client) sendStreamChatCompletions(
msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions, channel chan string,
) (err error) {
defer close(channel)
req := o.buildChatCompletionParams(msgs, opts)
stream := o.ApiClient.Chat.Completions.NewStreaming(context.Background(), req)
for stream.Next() {
chunk := stream.Current()
if len(chunk.Choices) > 0 && chunk.Choices[0].Delta.Content != "" {
channel <- chunk.Choices[0].Delta.Content
}
}
if stream.Err() == nil {
channel <- "\n"
}
return stream.Err()
}
// buildChatCompletionParams builds parameters for the Chat Completions API
func (o *Client) buildChatCompletionParams(
inputMsgs []*chat.ChatCompletionMessage, opts *common.ChatOptions,
) (ret openai.ChatCompletionNewParams) {
messages := make([]openai.ChatCompletionMessageParamUnion, len(inputMsgs))
for i, msgPtr := range inputMsgs {
msg := *msgPtr
if strings.Contains(opts.Model, "deepseek") && len(inputMsgs) == 1 && msg.Role == chat.ChatMessageRoleSystem {
msg.Role = chat.ChatMessageRoleUser
}
messages[i] = o.convertChatMessage(msg)
}
ret = openai.ChatCompletionNewParams{
Model: shared.ChatModel(opts.Model),
Messages: messages,
}
if !opts.Raw {
ret.Temperature = openai.Float(opts.Temperature)
if opts.TopP != 0 {
ret.TopP = openai.Float(opts.TopP)
}
if opts.MaxTokens != 0 {
ret.MaxTokens = openai.Int(int64(opts.MaxTokens))
}
if opts.PresencePenalty != 0 {
ret.PresencePenalty = openai.Float(opts.PresencePenalty)
}
if opts.FrequencyPenalty != 0 {
ret.FrequencyPenalty = openai.Float(opts.FrequencyPenalty)
}
if opts.Seed != 0 {
ret.Seed = openai.Int(int64(opts.Seed))
}
}
return
}
// convertChatMessage converts fabric chat message to OpenAI chat completion message
func (o *Client) convertChatMessage(msg chat.ChatCompletionMessage) openai.ChatCompletionMessageParamUnion {
result := convertMessageCommon(msg)
switch result.Role {
case chat.ChatMessageRoleSystem:
return openai.SystemMessage(result.Content)
case chat.ChatMessageRoleUser:
// Handle multi-content messages (text + images)
if result.HasMultiContent {
var parts []openai.ChatCompletionContentPartUnionParam
for _, p := range result.MultiContent {
switch p.Type {
case chat.ChatMessagePartTypeText:
parts = append(parts, openai.TextContentPart(p.Text))
case chat.ChatMessagePartTypeImageURL:
parts = append(parts, openai.ImageContentPart(openai.ChatCompletionContentPartImageImageURLParam{URL: p.ImageURL.URL}))
}
}
return openai.UserMessage(parts)
}
return openai.UserMessage(result.Content)
case chat.ChatMessageRoleAssistant:
return openai.AssistantMessage(result.Content)
default:
return openai.UserMessage(result.Content)
}
}

View File

@@ -0,0 +1,21 @@
package openai
import "github.com/danielmiessler/fabric/chat"
// MessageConversionResult holds the common conversion result
type MessageConversionResult struct {
Role string
Content string
MultiContent []chat.ChatMessagePart
HasMultiContent bool
}
// convertMessageCommon extracts common conversion logic
func convertMessageCommon(msg chat.ChatCompletionMessage) MessageConversionResult {
return MessageConversionResult{
Role: msg.Role,
Content: msg.Content,
MultiContent: msg.MultiContent,
HasMultiContent: len(msg.MultiContent) > 0,
}
}

View File

@@ -2,20 +2,23 @@ package openai
import (
"context"
"errors"
"fmt"
"io"
"log/slog"
"github.com/danielmiessler/fabric/plugins"
"slices"
"strings"
"github.com/danielmiessler/fabric/chat"
"github.com/danielmiessler/fabric/common"
"github.com/samber/lo"
"github.com/sashabaranov/go-openai"
"github.com/danielmiessler/fabric/plugins"
openai "github.com/openai/openai-go"
"github.com/openai/openai-go/option"
"github.com/openai/openai-go/packages/pagination"
"github.com/openai/openai-go/responses"
"github.com/openai/openai-go/shared"
"github.com/openai/openai-go/shared/constant"
)
func NewClient() (ret *Client) {
return NewClientCompatible("OpenAI", "https://api.openai.com/v1", nil)
return NewClientCompatibleWithResponses("OpenAI", "https://api.openai.com/v1", true, nil)
}
func NewClientCompatible(vendorName string, defaultBaseUrl string, configureCustom func() error) (ret *Client) {
@@ -28,6 +31,17 @@ func NewClientCompatible(vendorName string, defaultBaseUrl string, configureCust
return
}
func NewClientCompatibleWithResponses(vendorName string, defaultBaseUrl string, implementsResponses bool, configureCustom func() error) (ret *Client) {
ret = NewClientCompatibleNoSetupQuestions(vendorName, configureCustom)
ret.ApiKey = ret.AddSetupQuestion("API Key", true)
ret.ApiBaseURL = ret.AddSetupQuestion("API Base URL", false)
ret.ApiBaseURL.Value = defaultBaseUrl
ret.ImplementsResponses = implementsResponses
return
}
func NewClientCompatibleNoSetupQuestions(vendorName string, configureCustom func() error) (ret *Client) {
ret = &Client{}
@@ -46,117 +60,252 @@ func NewClientCompatibleNoSetupQuestions(vendorName string, configureCustom func
type Client struct {
*plugins.PluginBase
ApiKey *plugins.SetupQuestion
ApiBaseURL *plugins.SetupQuestion
ApiClient *openai.Client
ApiKey *plugins.SetupQuestion
ApiBaseURL *plugins.SetupQuestion
ApiClient *openai.Client
ImplementsResponses bool // Whether this provider supports the Responses API
}
func (o *Client) configure() (ret error) {
config := openai.DefaultConfig(o.ApiKey.Value)
opts := []option.RequestOption{option.WithAPIKey(o.ApiKey.Value)}
if o.ApiBaseURL.Value != "" {
config.BaseURL = o.ApiBaseURL.Value
opts = append(opts, option.WithBaseURL(o.ApiBaseURL.Value))
}
o.ApiClient = openai.NewClientWithConfig(config)
client := openai.NewClient(opts...)
o.ApiClient = &client
return
}
func (o *Client) ListModels() (ret []string, err error) {
var models openai.ModelsList
if models, err = o.ApiClient.ListModels(context.Background()); err != nil {
var page *pagination.Page[openai.Model]
if page, err = o.ApiClient.Models.List(context.Background()); err != nil {
return
}
model := models.Models
for _, mod := range model {
for _, mod := range page.Data {
ret = append(ret, mod.ID)
}
return
}
func (o *Client) SendStream(
msgs []*openai.ChatCompletionMessage, opts *common.ChatOptions, channel chan string,
msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions, channel chan string,
) (err error) {
req := o.buildChatCompletionRequest(msgs, opts)
req.Stream = true
// Use Responses API for OpenAI, Chat Completions API for other providers
if o.supportsResponsesAPI() {
return o.sendStreamResponses(msgs, opts, channel)
}
return o.sendStreamChatCompletions(msgs, opts, channel)
}
var stream *openai.ChatCompletionStream
if stream, err = o.ApiClient.CreateChatCompletionStream(context.Background(), req); err != nil {
fmt.Printf("ChatCompletionStream error: %v\n", err)
func (o *Client) sendStreamResponses(
msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions, channel chan string,
) (err error) {
defer close(channel)
req := o.buildResponseParams(msgs, opts)
stream := o.ApiClient.Responses.NewStreaming(context.Background(), req)
for stream.Next() {
event := stream.Current()
switch event.Type {
case string(constant.ResponseOutputTextDelta("").Default()):
channel <- event.AsResponseOutputTextDelta().Delta
case string(constant.ResponseOutputTextDone("").Default()):
channel <- event.AsResponseOutputTextDone().Text
}
}
if stream.Err() == nil {
channel <- "\n"
}
return stream.Err()
}
func (o *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions) (ret string, err error) {
// Use Responses API for OpenAI, Chat Completions API for other providers
if o.supportsResponsesAPI() {
return o.sendResponses(ctx, msgs, opts)
}
return o.sendChatCompletions(ctx, msgs, opts)
}
func (o *Client) sendResponses(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions) (ret string, err error) {
// Validate model supports image generation if image file is specified
if opts.ImageFile != "" && !supportsImageGeneration(opts.Model) {
return "", fmt.Errorf("model '%s' does not support image generation. Supported models: %s", opts.Model, strings.Join(ImageGenerationSupportedModels, ", "))
}
req := o.buildResponseParams(msgs, opts)
var resp *responses.Response
if resp, err = o.ApiClient.Responses.New(ctx, req); err != nil {
return
}
defer stream.Close()
for {
var response openai.ChatCompletionStreamResponse
if response, err = stream.Recv(); err == nil {
if len(response.Choices) > 0 {
channel <- response.Choices[0].Delta.Content
} else {
channel <- "\n"
close(channel)
break
}
} else if errors.Is(err, io.EOF) {
channel <- "\n"
close(channel)
err = nil
break
} else if err != nil {
fmt.Printf("\nStream error: %v\n", err)
break
}
}
return
}
func (o *Client) Send(ctx context.Context, msgs []*openai.ChatCompletionMessage, opts *common.ChatOptions) (ret string, err error) {
req := o.buildChatCompletionRequest(msgs, opts)
var resp openai.ChatCompletionResponse
if resp, err = o.ApiClient.CreateChatCompletion(ctx, req); err != nil {
// Extract and save images if requested
if err = o.extractAndSaveImages(resp, opts); err != nil {
return
}
if len(resp.Choices) > 0 {
ret = resp.Choices[0].Message.Content
slog.Debug("SystemFingerprint: " + resp.SystemFingerprint)
}
ret = o.extractText(resp)
return
}
func (o *Client) buildChatCompletionRequest(
msgs []*openai.ChatCompletionMessage, opts *common.ChatOptions,
) (ret openai.ChatCompletionRequest) {
messages := lo.Map(msgs, func(message *openai.ChatCompletionMessage, _ int) openai.ChatCompletionMessage {
return *message
})
// supportsResponsesAPI determines if the provider supports the new Responses API
func (o *Client) supportsResponsesAPI() bool {
return o.ImplementsResponses
}
if opts.Raw {
ret = openai.ChatCompletionRequest{
Model: opts.Model,
Messages: messages,
func (o *Client) NeedsRawMode(modelName string) bool {
openaiModelsPrefixes := []string{
"o1",
"o3",
"o4",
}
openAIModelsNeedingRaw := []string{
"gpt-4o-mini-search-preview",
"gpt-4o-mini-search-preview-2025-03-11",
"gpt-4o-search-preview",
"gpt-4o-search-preview-2025-03-11",
}
for _, prefix := range openaiModelsPrefixes {
if strings.HasPrefix(modelName, prefix) {
return true
}
} else {
if opts.Seed == 0 {
ret = openai.ChatCompletionRequest{
Model: opts.Model,
Temperature: float32(opts.Temperature),
TopP: float32(opts.TopP),
PresencePenalty: float32(opts.PresencePenalty),
FrequencyPenalty: float32(opts.FrequencyPenalty),
Messages: messages,
}
} else {
ret = openai.ChatCompletionRequest{
Model: opts.Model,
Temperature: float32(opts.Temperature),
TopP: float32(opts.TopP),
PresencePenalty: float32(opts.PresencePenalty),
FrequencyPenalty: float32(opts.FrequencyPenalty),
Messages: messages,
Seed: &opts.Seed,
}
return slices.Contains(openAIModelsNeedingRaw, modelName)
}
func (o *Client) buildResponseParams(
inputMsgs []*chat.ChatCompletionMessage, opts *common.ChatOptions,
) (ret responses.ResponseNewParams) {
items := make([]responses.ResponseInputItemUnionParam, len(inputMsgs))
for i, msgPtr := range inputMsgs {
msg := *msgPtr
if strings.Contains(opts.Model, "deepseek") && len(inputMsgs) == 1 && msg.Role == chat.ChatMessageRoleSystem {
msg.Role = chat.ChatMessageRoleUser
}
items[i] = convertMessage(msg)
}
ret = responses.ResponseNewParams{
Model: shared.ResponsesModel(opts.Model),
Input: responses.ResponseNewParamsInputUnion{
OfInputItemList: items,
},
}
// Add tools if enabled
var tools []responses.ToolUnionParam
// Add web search tool if enabled
if opts.Search {
webSearchTool := responses.ToolParamOfWebSearchPreview("web_search_preview")
// Add user location if provided
if opts.SearchLocation != "" {
webSearchTool.OfWebSearchPreview.UserLocation = responses.WebSearchToolUserLocationParam{
Type: "approximate",
Timezone: openai.String(opts.SearchLocation),
}
}
tools = append(tools, webSearchTool)
}
// Add image generation tool if needed
tools = o.addImageGenerationTool(opts, tools)
if len(tools) > 0 {
ret.Tools = tools
}
if !opts.Raw {
ret.Temperature = openai.Float(opts.Temperature)
if opts.TopP != 0 {
ret.TopP = openai.Float(opts.TopP)
}
if opts.MaxTokens != 0 {
ret.MaxOutputTokens = openai.Int(int64(opts.MaxTokens))
}
// Add parameters not officially supported by Responses API as extra fields
extraFields := make(map[string]any)
if opts.PresencePenalty != 0 {
extraFields["presence_penalty"] = opts.PresencePenalty
}
if opts.FrequencyPenalty != 0 {
extraFields["frequency_penalty"] = opts.FrequencyPenalty
}
if opts.Seed != 0 {
extraFields["seed"] = opts.Seed
}
if len(extraFields) > 0 {
ret.SetExtraFields(extraFields)
}
}
return
}
func convertMessage(msg chat.ChatCompletionMessage) responses.ResponseInputItemUnionParam {
result := convertMessageCommon(msg)
role := responses.EasyInputMessageRole(result.Role)
if result.HasMultiContent {
var parts []responses.ResponseInputContentUnionParam
for _, p := range result.MultiContent {
switch p.Type {
case chat.ChatMessagePartTypeText:
parts = append(parts, responses.ResponseInputContentParamOfInputText(p.Text))
case chat.ChatMessagePartTypeImageURL:
part := responses.ResponseInputContentParamOfInputImage(responses.ResponseInputImageDetailAuto)
if part.OfInputImage != nil {
part.OfInputImage.ImageURL = openai.String(p.ImageURL.URL)
}
parts = append(parts, part)
}
}
contentList := responses.ResponseInputMessageContentListParam(parts)
return responses.ResponseInputItemParamOfMessage(contentList, role)
}
return responses.ResponseInputItemParamOfMessage(result.Content, role)
}
func (o *Client) extractText(resp *responses.Response) (ret string) {
var textParts []string
var citations []string
citationMap := make(map[string]bool) // To avoid duplicate citations
for _, item := range resp.Output {
if item.Type == "message" {
for _, c := range item.Content {
if c.Type == "output_text" {
outputText := c.AsOutputText()
textParts = append(textParts, outputText.Text)
// Extract citations from annotations
for _, annotation := range outputText.Annotations {
if annotation.Type == "url_citation" {
urlCitation := annotation.AsURLCitation()
citationKey := urlCitation.URL + "|" + urlCitation.Title
if !citationMap[citationKey] {
citationMap[citationKey] = true
citationText := fmt.Sprintf("- [%s](%s)", urlCitation.Title, urlCitation.URL)
citations = append(citations, citationText)
}
}
}
}
}
break
}
}
ret = strings.Join(textParts, "")
// Append citations if any were found
if len(citations) > 0 {
ret += "\n\n## Sources\n\n" + strings.Join(citations, "\n")
}
return
}

View File

@@ -0,0 +1,146 @@
package openai
// This file contains helper methods for image generation and processing
// using OpenAI's Responses API and Image API.
import (
"encoding/base64"
"fmt"
"os"
"path/filepath"
"strings"
"github.com/danielmiessler/fabric/common"
"github.com/openai/openai-go/packages/param"
"github.com/openai/openai-go/responses"
)
// ImageGenerationResponseType is the type used for image generation calls in responses
const ImageGenerationResponseType = "image_generation_call"
const ImageGenerationToolType = "image_generation"
// ImageGenerationSupportedModels lists all models that support image generation
var ImageGenerationSupportedModels = []string{
"gpt-4o",
"gpt-4o-mini",
"gpt-4.1",
"gpt-4.1-mini",
"gpt-4.1-nano",
"o3",
}
// supportsImageGeneration checks if the given model supports the image_generation tool
func supportsImageGeneration(model string) bool {
for _, supportedModel := range ImageGenerationSupportedModels {
if model == supportedModel {
return true
}
}
return false
}
// getOutputFormatFromExtension determines the API output format based on file extension
func getOutputFormatFromExtension(imagePath string) string {
if imagePath == "" {
return "png" // Default format
}
ext := strings.ToLower(filepath.Ext(imagePath))
switch ext {
case ".png":
return "png"
case ".webp":
return "webp"
case ".jpg":
return "jpeg"
case ".jpeg":
return "jpeg"
default:
return "png" // Default fallback
}
}
// addImageGenerationTool adds the image generation tool to the request if needed
func (o *Client) addImageGenerationTool(opts *common.ChatOptions, tools []responses.ToolUnionParam) []responses.ToolUnionParam {
// Check if the request seems to be asking for image generation
if o.shouldUseImageGeneration(opts) {
outputFormat := getOutputFormatFromExtension(opts.ImageFile)
// Build the image generation tool with user parameters
imageGenTool := responses.ToolUnionParam{
OfImageGeneration: &responses.ToolImageGenerationParam{
Type: ImageGenerationToolType,
Model: "gpt-image-1",
OutputFormat: outputFormat,
},
}
// Set quality if specified by user (otherwise let OpenAI use default)
if opts.ImageQuality != "" {
imageGenTool.OfImageGeneration.Quality = opts.ImageQuality
}
// Set size if specified by user (otherwise let OpenAI use default)
if opts.ImageSize != "" {
imageGenTool.OfImageGeneration.Size = opts.ImageSize
}
// Set background if specified by user (otherwise let OpenAI use default)
if opts.ImageBackground != "" {
imageGenTool.OfImageGeneration.Background = opts.ImageBackground
}
// Set compression if specified by user (only for jpeg/webp)
if opts.ImageCompression != 0 {
imageGenTool.OfImageGeneration.OutputCompression = param.NewOpt(int64(opts.ImageCompression))
}
tools = append(tools, imageGenTool)
}
return tools
}
// shouldUseImageGeneration determines if image generation should be enabled
// This is a heuristic based on the presence of --image-file flag
func (o *Client) shouldUseImageGeneration(opts *common.ChatOptions) bool {
return opts.ImageFile != ""
}
// extractAndSaveImages extracts generated images from the response and saves them
func (o *Client) extractAndSaveImages(resp *responses.Response, opts *common.ChatOptions) error {
if opts.ImageFile == "" {
return nil // No image file specified, skip saving
}
// Extract image data from response
for _, item := range resp.Output {
if item.Type == ImageGenerationResponseType {
imageCall := item.AsImageGenerationCall()
if imageCall.Status == "completed" && imageCall.Result != "" {
// Decode base64 image data
imageData, err := base64.StdEncoding.DecodeString(imageCall.Result)
if err != nil {
return fmt.Errorf("failed to decode image data: %w", err)
}
// Ensure directory exists
dir := filepath.Dir(opts.ImageFile)
if dir != "." {
if err := os.MkdirAll(dir, 0755); err != nil {
return fmt.Errorf("failed to create directory %s: %w", dir, err)
}
}
// Save image to file
if err := os.WriteFile(opts.ImageFile, imageData, 0644); err != nil {
return fmt.Errorf("failed to save image to %s: %w", opts.ImageFile, err)
}
fmt.Printf("Image saved to: %s\n", opts.ImageFile)
return nil
}
}
}
return nil
}

View File

@@ -0,0 +1,444 @@
package openai
import (
"fmt"
"strings"
"testing"
"github.com/danielmiessler/fabric/chat"
"github.com/danielmiessler/fabric/common"
"github.com/openai/openai-go/responses"
"github.com/stretchr/testify/assert"
)
func TestShouldUseImageGeneration(t *testing.T) {
client := NewClient()
// Test with image file specified
opts := &common.ChatOptions{
ImageFile: "output.png",
}
assert.True(t, client.shouldUseImageGeneration(opts), "Should use image generation when image file is specified")
// Test without image file
opts = &common.ChatOptions{
ImageFile: "",
}
assert.False(t, client.shouldUseImageGeneration(opts), "Should not use image generation when no image file is specified")
}
func TestAddImageGenerationTool(t *testing.T) {
client := NewClient()
// Test with image generation enabled
opts := &common.ChatOptions{
ImageFile: "output.png",
}
tools := []responses.ToolUnionParam{}
result := client.addImageGenerationTool(opts, tools)
assert.Len(t, result, 1, "Should add one image generation tool")
assert.NotNil(t, result[0].OfImageGeneration, "Should have image generation tool")
assert.Equal(t, "image_generation", string(result[0].OfImageGeneration.Type))
assert.Equal(t, "gpt-image-1", result[0].OfImageGeneration.Model)
assert.Equal(t, "png", result[0].OfImageGeneration.OutputFormat)
// Test without image generation
opts = &common.ChatOptions{
ImageFile: "",
}
tools = []responses.ToolUnionParam{}
result = client.addImageGenerationTool(opts, tools)
assert.Len(t, result, 0, "Should not add image generation tool when not needed")
}
func TestBuildResponseParams_WithImageGeneration(t *testing.T) {
client := NewClient()
opts := &common.ChatOptions{
Model: "gpt-image-1",
ImageFile: "output.png",
}
msgs := []*chat.ChatCompletionMessage{
{Role: "user", Content: "Generate an image of a cat"},
}
params := client.buildResponseParams(msgs, opts)
assert.NotNil(t, params.Tools, "Expected tools when image generation is enabled")
// Should have image generation tool
hasImageTool := false
for _, tool := range params.Tools {
if tool.OfImageGeneration != nil {
hasImageTool = true
assert.Equal(t, "image_generation", string(tool.OfImageGeneration.Type))
assert.Equal(t, "gpt-image-1", tool.OfImageGeneration.Model)
break
}
}
assert.True(t, hasImageTool, "Should have image generation tool")
}
func TestBuildResponseParams_WithBothSearchAndImage(t *testing.T) {
client := NewClient()
opts := &common.ChatOptions{
Model: "gpt-image-1",
Search: true,
SearchLocation: "America/Los_Angeles",
ImageFile: "output.png",
}
msgs := []*chat.ChatCompletionMessage{
{Role: "user", Content: "Search for cat images and generate one"},
}
params := client.buildResponseParams(msgs, opts)
assert.NotNil(t, params.Tools, "Expected tools when both search and image generation are enabled")
assert.Len(t, params.Tools, 2, "Should have both search and image generation tools")
hasSearchTool := false
hasImageTool := false
for _, tool := range params.Tools {
if tool.OfWebSearchPreview != nil {
hasSearchTool = true
}
if tool.OfImageGeneration != nil {
hasImageTool = true
}
}
assert.True(t, hasSearchTool, "Should have web search tool")
assert.True(t, hasImageTool, "Should have image generation tool")
}
func TestGetOutputFormatFromExtension(t *testing.T) {
tests := []struct {
name string
imagePath string
expectedFormat string
}{
{
name: "PNG extension",
imagePath: "/tmp/output.png",
expectedFormat: "png",
},
{
name: "WEBP extension",
imagePath: "/tmp/output.webp",
expectedFormat: "webp",
},
{
name: "JPG extension",
imagePath: "/tmp/output.jpg",
expectedFormat: "jpeg",
},
{
name: "JPEG extension",
imagePath: "/tmp/output.jpeg",
expectedFormat: "jpeg",
},
{
name: "Uppercase PNG extension",
imagePath: "/tmp/output.PNG",
expectedFormat: "png",
},
{
name: "Mixed case JPEG extension",
imagePath: "/tmp/output.JpEg",
expectedFormat: "jpeg",
},
{
name: "Empty path",
imagePath: "",
expectedFormat: "png",
},
{
name: "No extension",
imagePath: "/tmp/output",
expectedFormat: "png",
},
{
name: "Unsupported extension",
imagePath: "/tmp/output.gif",
expectedFormat: "png",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := getOutputFormatFromExtension(tt.imagePath)
assert.Equal(t, tt.expectedFormat, result)
})
}
}
func TestAddImageGenerationToolWithDynamicFormat(t *testing.T) {
client := NewClient()
tests := []struct {
name string
imageFile string
expectedFormat string
}{
{
name: "PNG file",
imageFile: "/tmp/output.png",
expectedFormat: "png",
},
{
name: "WEBP file",
imageFile: "/tmp/output.webp",
expectedFormat: "webp",
},
{
name: "JPG file",
imageFile: "/tmp/output.jpg",
expectedFormat: "jpeg",
},
{
name: "JPEG file",
imageFile: "/tmp/output.jpeg",
expectedFormat: "jpeg",
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
opts := &common.ChatOptions{
ImageFile: tt.imageFile,
}
tools := client.addImageGenerationTool(opts, []responses.ToolUnionParam{})
assert.Len(t, tools, 1, "Should have one tool")
assert.NotNil(t, tools[0].OfImageGeneration, "Should be image generation tool")
assert.Equal(t, tt.expectedFormat, tools[0].OfImageGeneration.OutputFormat, "Output format should match file extension")
})
}
}
func TestSupportsImageGeneration(t *testing.T) {
tests := []struct {
name string
model string
expected bool
}{
{
name: "gpt-4o supports image generation",
model: "gpt-4o",
expected: true,
},
{
name: "gpt-4o-mini supports image generation",
model: "gpt-4o-mini",
expected: true,
},
{
name: "gpt-4.1 supports image generation",
model: "gpt-4.1",
expected: true,
},
{
name: "gpt-4.1-mini supports image generation",
model: "gpt-4.1-mini",
expected: true,
},
{
name: "gpt-4.1-nano supports image generation",
model: "gpt-4.1-nano",
expected: true,
},
{
name: "o3 supports image generation",
model: "o3",
expected: true,
},
{
name: "o1 does not support image generation",
model: "o1",
expected: false,
},
{
name: "o1-mini does not support image generation",
model: "o1-mini",
expected: false,
},
{
name: "o3-mini does not support image generation",
model: "o3-mini",
expected: false,
},
{
name: "gpt-4 does not support image generation",
model: "gpt-4",
expected: false,
},
{
name: "gpt-3.5-turbo does not support image generation",
model: "gpt-3.5-turbo",
expected: false,
},
{
name: "empty model does not support image generation",
model: "",
expected: false,
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
result := supportsImageGeneration(tt.model)
assert.Equal(t, tt.expected, result)
})
}
}
func TestModelValidationLogic(t *testing.T) {
t.Run("Unsupported model with image file should return validation error", func(t *testing.T) {
opts := &common.ChatOptions{
Model: "o1-mini",
ImageFile: "/tmp/output.png",
}
// Test the validation logic directly
if opts.ImageFile != "" && !supportsImageGeneration(opts.Model) {
err := fmt.Errorf("model '%s' does not support image generation. Supported models: %s", opts.Model, strings.Join(ImageGenerationSupportedModels, ", "))
assert.Contains(t, err.Error(), "does not support image generation")
assert.Contains(t, err.Error(), "o1-mini")
assert.Contains(t, err.Error(), "Supported models:")
} else {
t.Error("Expected validation to trigger")
}
})
t.Run("Supported model with image file should not trigger validation", func(t *testing.T) {
opts := &common.ChatOptions{
Model: "gpt-4o",
ImageFile: "/tmp/output.png",
}
// Test the validation logic directly
shouldFail := opts.ImageFile != "" && !supportsImageGeneration(opts.Model)
assert.False(t, shouldFail, "Validation should not trigger for supported model")
})
t.Run("Unsupported model without image file should not trigger validation", func(t *testing.T) {
opts := &common.ChatOptions{
Model: "o1-mini",
ImageFile: "", // No image file
}
// Test the validation logic directly
shouldFail := opts.ImageFile != "" && !supportsImageGeneration(opts.Model)
assert.False(t, shouldFail, "Validation should not trigger when no image file is specified")
})
}
func TestAddImageGenerationToolWithUserParameters(t *testing.T) {
client := NewClient()
tests := []struct {
name string
opts *common.ChatOptions
expected map[string]interface{}
}{
{
name: "All parameters specified",
opts: &common.ChatOptions{
ImageFile: "/tmp/test.png",
ImageSize: "1536x1024",
ImageQuality: "high",
ImageBackground: "transparent",
ImageCompression: 0, // Not applicable for PNG
},
expected: map[string]interface{}{
"size": "1536x1024",
"quality": "high",
"background": "transparent",
"output_format": "png",
},
},
{
name: "JPEG with compression",
opts: &common.ChatOptions{
ImageFile: "/tmp/test.jpg",
ImageSize: "1024x1024",
ImageQuality: "medium",
ImageBackground: "opaque",
ImageCompression: 75,
},
expected: map[string]interface{}{
"size": "1024x1024",
"quality": "medium",
"background": "opaque",
"output_format": "jpeg",
"output_compression": int64(75),
},
},
{
name: "Only some parameters specified",
opts: &common.ChatOptions{
ImageFile: "/tmp/test.webp",
ImageQuality: "low",
},
expected: map[string]interface{}{
"quality": "low",
"output_format": "webp",
},
},
{
name: "No parameters specified (defaults)",
opts: &common.ChatOptions{
ImageFile: "/tmp/test.png",
},
expected: map[string]interface{}{
"output_format": "png",
},
},
}
for _, tt := range tests {
t.Run(tt.name, func(t *testing.T) {
tools := client.addImageGenerationTool(tt.opts, []responses.ToolUnionParam{})
assert.Len(t, tools, 1)
assert.NotNil(t, tools[0].OfImageGeneration)
tool := tools[0].OfImageGeneration
// Check required fields
assert.Equal(t, "gpt-image-1", tool.Model)
assert.Equal(t, tt.expected["output_format"], tool.OutputFormat)
// Check optional fields
if expectedSize, ok := tt.expected["size"]; ok {
assert.Equal(t, expectedSize, tool.Size)
} else {
assert.Empty(t, tool.Size, "Size should not be set when not specified")
}
if expectedQuality, ok := tt.expected["quality"]; ok {
assert.Equal(t, expectedQuality, tool.Quality)
} else {
assert.Empty(t, tool.Quality, "Quality should not be set when not specified")
}
if expectedBackground, ok := tt.expected["background"]; ok {
assert.Equal(t, expectedBackground, tool.Background)
} else {
assert.Empty(t, tool.Background, "Background should not be set when not specified")
}
if expectedCompression, ok := tt.expected["output_compression"]; ok {
assert.Equal(t, expectedCompression, tool.OutputCompression.Value)
} else {
assert.Equal(t, int64(0), tool.OutputCompression.Value, "Compression should not be set when not specified")
}
})
}
}

View File

@@ -1,102 +1,177 @@
package openai
import (
"strings"
"testing"
"github.com/danielmiessler/fabric/chat"
"github.com/danielmiessler/fabric/common"
"github.com/sashabaranov/go-openai"
goopenai "github.com/sashabaranov/go-openai"
openai "github.com/openai/openai-go"
"github.com/openai/openai-go/responses"
"github.com/openai/openai-go/shared"
"github.com/stretchr/testify/assert"
)
func TestBuildChatCompletionRequestPinSeed(t *testing.T) {
func TestBuildResponseRequestWithMaxTokens(t *testing.T) {
var msgs []*goopenai.ChatCompletionMessage
var msgs []*chat.ChatCompletionMessage
for i := 0; i < 2; i++ {
msgs = append(msgs, &goopenai.ChatCompletionMessage{
msgs = append(msgs, &chat.ChatCompletionMessage{
Role: "User",
Content: "My msg",
})
}
opts := &common.ChatOptions{
Temperature: 0.8,
TopP: 0.9,
PresencePenalty: 0.1,
FrequencyPenalty: 0.2,
Raw: false,
Seed: 1,
}
var expectedMessages []openai.ChatCompletionMessage
for i := 0; i < 2; i++ {
expectedMessages = append(expectedMessages,
openai.ChatCompletionMessage{
Role: msgs[i].Role,
Content: msgs[i].Content,
},
)
}
var expectedRequest = goopenai.ChatCompletionRequest{
Model: opts.Model,
Temperature: float32(opts.Temperature),
TopP: float32(opts.TopP),
PresencePenalty: float32(opts.PresencePenalty),
FrequencyPenalty: float32(opts.FrequencyPenalty),
Messages: expectedMessages,
Seed: &opts.Seed,
Temperature: 0.8,
TopP: 0.9,
Raw: false,
MaxTokens: 50,
}
var client = NewClient()
request := client.buildChatCompletionRequest(msgs, opts)
assert.Equal(t, expectedRequest, request)
request := client.buildResponseParams(msgs, opts)
assert.Equal(t, shared.ResponsesModel(opts.Model), request.Model)
assert.Equal(t, openai.Float(opts.Temperature), request.Temperature)
assert.Equal(t, openai.Float(opts.TopP), request.TopP)
assert.Equal(t, openai.Int(int64(opts.MaxTokens)), request.MaxOutputTokens)
}
func TestBuildChatCompletionRequestNilSeed(t *testing.T) {
func TestBuildResponseRequestNoMaxTokens(t *testing.T) {
var msgs []*goopenai.ChatCompletionMessage
var msgs []*chat.ChatCompletionMessage
for i := 0; i < 2; i++ {
msgs = append(msgs, &goopenai.ChatCompletionMessage{
msgs = append(msgs, &chat.ChatCompletionMessage{
Role: "User",
Content: "My msg",
})
}
opts := &common.ChatOptions{
Temperature: 0.8,
TopP: 0.9,
PresencePenalty: 0.1,
FrequencyPenalty: 0.2,
Raw: false,
Seed: 0,
}
var expectedMessages []openai.ChatCompletionMessage
for i := 0; i < 2; i++ {
expectedMessages = append(expectedMessages,
openai.ChatCompletionMessage{
Role: msgs[i].Role,
Content: msgs[i].Content,
},
)
}
var expectedRequest = goopenai.ChatCompletionRequest{
Model: opts.Model,
Temperature: float32(opts.Temperature),
TopP: float32(opts.TopP),
PresencePenalty: float32(opts.PresencePenalty),
FrequencyPenalty: float32(opts.FrequencyPenalty),
Messages: expectedMessages,
Seed: nil,
Temperature: 0.8,
TopP: 0.9,
Raw: false,
}
var client = NewClient()
request := client.buildChatCompletionRequest(msgs, opts)
assert.Equal(t, expectedRequest, request)
request := client.buildResponseParams(msgs, opts)
assert.Equal(t, shared.ResponsesModel(opts.Model), request.Model)
assert.Equal(t, openai.Float(opts.Temperature), request.Temperature)
assert.Equal(t, openai.Float(opts.TopP), request.TopP)
assert.False(t, request.MaxOutputTokens.Valid())
}
func TestBuildResponseParams_WithoutSearch(t *testing.T) {
client := NewClient()
opts := &common.ChatOptions{
Model: "gpt-4o",
Temperature: 0.7,
Search: false,
}
msgs := []*chat.ChatCompletionMessage{
{Role: "user", Content: "Hello"},
}
params := client.buildResponseParams(msgs, opts)
assert.Nil(t, params.Tools, "Expected no tools when search is disabled")
assert.Equal(t, shared.ResponsesModel(opts.Model), params.Model)
assert.Equal(t, openai.Float(opts.Temperature), params.Temperature)
}
func TestBuildResponseParams_WithSearch(t *testing.T) {
client := NewClient()
opts := &common.ChatOptions{
Model: "gpt-4o",
Temperature: 0.7,
Search: true,
}
msgs := []*chat.ChatCompletionMessage{
{Role: "user", Content: "What's the weather today?"},
}
params := client.buildResponseParams(msgs, opts)
assert.NotNil(t, params.Tools, "Expected tools when search is enabled")
assert.Len(t, params.Tools, 1, "Expected exactly one tool")
tool := params.Tools[0]
assert.NotNil(t, tool.OfWebSearchPreview, "Expected web search tool")
assert.Equal(t, responses.WebSearchToolType("web_search_preview"), tool.OfWebSearchPreview.Type)
}
func TestBuildResponseParams_WithSearchAndLocation(t *testing.T) {
client := NewClient()
opts := &common.ChatOptions{
Model: "gpt-4o",
Temperature: 0.7,
Search: true,
SearchLocation: "America/Los_Angeles",
}
msgs := []*chat.ChatCompletionMessage{
{Role: "user", Content: "What's the weather in San Francisco?"},
}
params := client.buildResponseParams(msgs, opts)
assert.NotNil(t, params.Tools, "Expected tools when search is enabled")
tool := params.Tools[0]
assert.NotNil(t, tool.OfWebSearchPreview, "Expected web search tool")
userLocation := tool.OfWebSearchPreview.UserLocation
assert.Equal(t, "approximate", string(userLocation.Type))
assert.True(t, userLocation.Timezone.Valid(), "Expected timezone to be set")
assert.Equal(t, opts.SearchLocation, userLocation.Timezone.Value)
}
func TestCitationFormatting(t *testing.T) {
// Test the citation formatting logic by simulating the citation extraction
var textParts []string
var citations []string
citationMap := make(map[string]bool)
// Simulate text content
textParts = append(textParts, "Based on recent research, artificial intelligence is advancing rapidly.")
// Simulate citations (as they would be extracted from OpenAI response)
mockCitations := []struct {
URL string
Title string
}{
{"https://example.com/ai-research", "AI Research Advances 2025"},
{"https://another-source.com/tech-news", "Technology News Today"},
{"https://example.com/ai-research", "AI Research Advances 2025"}, // Duplicate to test deduplication
}
for _, citation := range mockCitations {
citationKey := citation.URL + "|" + citation.Title
if !citationMap[citationKey] {
citationMap[citationKey] = true
citationText := "- [" + citation.Title + "](" + citation.URL + ")"
citations = append(citations, citationText)
}
}
result := strings.Join(textParts, "")
if len(citations) > 0 {
result += "\n\n## Sources\n\n" + strings.Join(citations, "\n")
}
// Verify the result contains the expected text
expectedText := "Based on recent research, artificial intelligence is advancing rapidly."
assert.Contains(t, result, expectedText, "Expected result to contain original text")
// Verify citations are included
assert.Contains(t, result, "## Sources", "Expected result to contain Sources section")
assert.Contains(t, result, "[AI Research Advances 2025](https://example.com/ai-research)", "Expected result to contain first citation")
assert.Contains(t, result, "[Technology News Today](https://another-source.com/tech-news)", "Expected result to contain second citation")
// Verify deduplication - should only have 2 unique citations, not 3
citationCount := strings.Count(result, "- [")
assert.Equal(t, 2, citationCount, "Expected 2 unique citations")
}

View File

@@ -1,13 +1,17 @@
package openai_compatible
import (
"os"
"strings"
"github.com/danielmiessler/fabric/plugins/ai/openai"
)
// ProviderConfig defines the configuration for an OpenAI-compatible API provider
type ProviderConfig struct {
Name string
BaseURL string
Name string
BaseURL string
ImplementsResponses bool // Whether the provider supports OpenAI's new Responses API
}
// Client is the common structure for all OpenAI-compatible providers
@@ -18,49 +22,98 @@ type Client struct {
// NewClient creates a new OpenAI-compatible client for the specified provider
func NewClient(providerConfig ProviderConfig) *Client {
client := &Client{}
client.Client = openai.NewClientCompatible(providerConfig.Name, providerConfig.BaseURL, nil)
client.Client = openai.NewClientCompatibleWithResponses(
providerConfig.Name,
providerConfig.BaseURL,
providerConfig.ImplementsResponses,
nil,
)
return client
}
// ProviderMap is a map of provider name to ProviderConfig for O(1) lookup
var ProviderMap = map[string]ProviderConfig{
"Mistral": {
Name: "Mistral",
BaseURL: "https://api.mistral.ai/v1",
},
"LiteLLM": {
Name: "LiteLLM",
BaseURL: "http://localhost:4000",
},
"Groq": {
Name: "Groq",
BaseURL: "https://api.groq.com/openai/v1",
},
"GrokAI": {
Name: "GrokAI",
BaseURL: "https://api.x.ai/v1",
},
"DeepSeek": {
Name: "DeepSeek",
BaseURL: "https://api.deepseek.com",
"AIML": {
Name: "AIML",
BaseURL: "https://api.aimlapi.com/v1",
ImplementsResponses: false,
},
"Cerebras": {
Name: "Cerebras",
BaseURL: "https://api.cerebras.ai/v1",
Name: "Cerebras",
BaseURL: "https://api.cerebras.ai/v1",
ImplementsResponses: false,
},
"DeepSeek": {
Name: "DeepSeek",
BaseURL: "https://api.deepseek.com",
ImplementsResponses: false,
},
"GrokAI": {
Name: "GrokAI",
BaseURL: "https://api.x.ai/v1",
ImplementsResponses: false,
},
"Groq": {
Name: "Groq",
BaseURL: "https://api.groq.com/openai/v1",
ImplementsResponses: false,
},
"Langdock": {
Name: "Langdock",
BaseURL: "https://api.langdock.com/openai/{{REGION=us}}/v1",
ImplementsResponses: false,
},
"LiteLLM": {
Name: "LiteLLM",
BaseURL: "http://localhost:4000",
ImplementsResponses: false,
},
"Mistral": {
Name: "Mistral",
BaseURL: "https://api.mistral.ai/v1",
ImplementsResponses: false,
},
"OpenRouter": {
Name: "OpenRouter",
BaseURL: "https://openrouter.ai/api/v1",
Name: "OpenRouter",
BaseURL: "https://openrouter.ai/api/v1",
ImplementsResponses: false,
},
"SiliconCloud": {
Name: "SiliconCloud",
BaseURL: "https://api.siliconflow.cn/v1",
Name: "SiliconCloud",
BaseURL: "https://api.siliconflow.cn/v1",
ImplementsResponses: false,
},
}
// GetProviderByName returns the provider configuration for a given name with O(1) lookup
func GetProviderByName(name string) (ProviderConfig, bool) {
provider, found := ProviderMap[name]
if strings.Contains(provider.BaseURL, "{{") && strings.Contains(provider.BaseURL, "}}") {
// Extract the template variable and default value
start := strings.Index(provider.BaseURL, "{{")
end := strings.Index(provider.BaseURL, "}}") + 2
template := provider.BaseURL[start:end]
// Parse the template to get variable name and default value
inner := template[2 : len(template)-2] // Remove {{ and }}
parts := strings.Split(inner, "=")
if len(parts) == 2 {
varName := strings.TrimSpace(parts[0])
defaultValue := strings.TrimSpace(parts[1])
// Create environment variable name
envVarName := strings.ToUpper(provider.Name) + "_" + varName
// Get value from environment or use default
envValue := os.Getenv(envVarName)
if envValue == "" {
envValue = defaultValue
}
// Replace the template with the actual value
provider.BaseURL = strings.Replace(provider.BaseURL, template, envValue, 1)
}
}
return provider, found
}

View File

@@ -0,0 +1,246 @@
package perplexity
import (
"context"
"fmt"
"os"
"sync" // Added sync package
"github.com/danielmiessler/fabric/common"
"github.com/danielmiessler/fabric/plugins"
perplexity "github.com/sgaunet/perplexity-go/v2"
"github.com/danielmiessler/fabric/chat"
)
const (
providerName = "Perplexity"
)
var models = []string{
"r1-1776", "sonar", "sonar-pro", "sonar-reasoning", "sonar-reasoning-pro",
}
type Client struct {
*plugins.PluginBase
APIKey *plugins.SetupQuestion
client *perplexity.Client
}
func NewClient() *Client {
c := &Client{}
c.PluginBase = &plugins.PluginBase{
Name: providerName,
EnvNamePrefix: plugins.BuildEnvVariablePrefix(providerName),
ConfigureCustom: c.Configure, // Assign the Configure method
}
c.APIKey = c.AddSetupQuestion("API_KEY", true)
return c
}
func (c *Client) Configure() error {
// The PluginBase.Configure() is called by the framework if needed.
// We only need to handle specific logic for this plugin.
if c.APIKey.Value == "" {
// Attempt to get from environment variable if not set by user during setup
envKey := c.EnvNamePrefix + "API_KEY"
apiKeyFromEnv := os.Getenv(envKey)
if apiKeyFromEnv != "" {
c.APIKey.Value = apiKeyFromEnv
} else {
return fmt.Errorf("%s API key not configured. Please set the %s environment variable or run 'fabric --setup %s'", providerName, envKey, providerName)
}
}
c.client = perplexity.NewClient(c.APIKey.Value)
return nil
}
func (c *Client) ListModels() ([]string, error) {
// Perplexity API does not have a ListModels endpoint.
// We return a predefined list.
return models, nil
}
func (c *Client) Send(ctx context.Context, msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions) (string, error) {
if c.client == nil {
if err := c.Configure(); err != nil {
return "", fmt.Errorf("failed to configure Perplexity client: %w", err)
}
}
var perplexityMessages []perplexity.Message
for _, msg := range msgs {
perplexityMessages = append(perplexityMessages, perplexity.Message{
Role: msg.Role,
Content: msg.Content,
})
}
requestOptions := []perplexity.CompletionRequestOption{
perplexity.WithModel(opts.Model),
perplexity.WithMessages(perplexityMessages),
}
if opts.MaxTokens > 0 {
requestOptions = append(requestOptions, perplexity.WithMaxTokens(opts.MaxTokens))
}
if opts.Temperature > 0 { // Perplexity default is 1.0, only set if user specifies
requestOptions = append(requestOptions, perplexity.WithTemperature(opts.Temperature))
}
if opts.TopP > 0 { // Perplexity default is not specified, typically 1.0
requestOptions = append(requestOptions, perplexity.WithTopP(opts.TopP))
}
if opts.PresencePenalty != 0 {
// Corrected: Pass float64 directly
requestOptions = append(requestOptions, perplexity.WithPresencePenalty(opts.PresencePenalty))
}
if opts.FrequencyPenalty != 0 {
// Corrected: Pass float64 directly
requestOptions = append(requestOptions, perplexity.WithFrequencyPenalty(opts.FrequencyPenalty))
}
request := perplexity.NewCompletionRequest(requestOptions...)
// Corrected: Use SendCompletionRequest method from perplexity-go library
resp, err := c.client.SendCompletionRequest(request) // Pass request directly
if err != nil {
return "", fmt.Errorf("perplexity API request failed: %w", err) // Corrected capitalization
}
content := resp.GetLastContent()
// Append citations if available
citations := resp.GetCitations()
if len(citations) > 0 {
content += "\n\n# CITATIONS\n\n"
for i, citation := range citations {
content += fmt.Sprintf("- [%d] %s\n", i+1, citation)
}
}
return content, nil
}
func (c *Client) SendStream(msgs []*chat.ChatCompletionMessage, opts *common.ChatOptions, channel chan string) error {
if c.client == nil {
if err := c.Configure(); err != nil {
close(channel) // Ensure channel is closed on error
return fmt.Errorf("failed to configure Perplexity client: %w", err)
}
}
var perplexityMessages []perplexity.Message
for _, msg := range msgs {
perplexityMessages = append(perplexityMessages, perplexity.Message{
Role: msg.Role,
Content: msg.Content,
})
}
requestOptions := []perplexity.CompletionRequestOption{
perplexity.WithModel(opts.Model),
perplexity.WithMessages(perplexityMessages),
perplexity.WithStream(true), // Enable streaming
}
if opts.MaxTokens > 0 {
requestOptions = append(requestOptions, perplexity.WithMaxTokens(opts.MaxTokens))
}
if opts.Temperature > 0 {
requestOptions = append(requestOptions, perplexity.WithTemperature(opts.Temperature))
}
if opts.TopP > 0 {
requestOptions = append(requestOptions, perplexity.WithTopP(opts.TopP))
}
if opts.PresencePenalty != 0 {
// Corrected: Pass float64 directly
requestOptions = append(requestOptions, perplexity.WithPresencePenalty(opts.PresencePenalty))
}
if opts.FrequencyPenalty != 0 {
// Corrected: Pass float64 directly
requestOptions = append(requestOptions, perplexity.WithFrequencyPenalty(opts.FrequencyPenalty))
}
request := perplexity.NewCompletionRequest(requestOptions...)
responseChan := make(chan perplexity.CompletionResponse)
var wg sync.WaitGroup // Use sync.WaitGroup
wg.Add(1)
go func() {
err := c.client.SendSSEHTTPRequest(&wg, request, responseChan)
if err != nil {
// Log error, can't send to string channel directly.
// Consider a mechanism to propagate this error if needed.
fmt.Fprintf(os.Stderr, "perplexity streaming error: %v\\n", err) // Corrected capitalization
// If the error occurs during stream setup, the channel might not have been closed by the receiver loop.
// However, closing it here might cause a panic if the receiver loop also tries to close it.
// close(channel) // Caution: Uncommenting this may cause panic, as channel is closed in the receiver goroutine.
}
}()
go func() {
defer close(channel) // Ensure the output channel is closed when this goroutine finishes
var lastResponse *perplexity.CompletionResponse
for resp := range responseChan {
lastResponse = &resp
if len(resp.Choices) > 0 {
content := ""
// Corrected: Check Delta.Content and Message.Content directly for non-emptiness
// as Delta and Message are structs, not pointers, in perplexity.Choice
if resp.Choices[0].Delta.Content != "" {
content = resp.Choices[0].Delta.Content
} else if resp.Choices[0].Message.Content != "" {
content = resp.Choices[0].Message.Content
}
if content != "" {
channel <- content
}
}
}
// Send citations at the end if available
if lastResponse != nil {
citations := lastResponse.GetCitations()
if len(citations) > 0 {
channel <- "\n\n# CITATIONS\n\n"
for i, citation := range citations {
channel <- fmt.Sprintf("- [%d] %s\n", i+1, citation)
}
}
}
}()
return nil
}
func (c *Client) NeedsRawMode(modelName string) bool {
return true
}
// Setup is called by the fabric CLI framework to guide the user through configuration.
func (c *Client) Setup() error {
return c.PluginBase.Setup()
}
// GetName returns the name of the plugin.
func (c *Client) GetName() string {
return c.PluginBase.Name
}
// GetEnvNamePrefix returns the environment variable prefix for the plugin.
// Corrected: Receiver name
func (c *Client) GetEnvNamePrefix() string {
return c.PluginBase.EnvNamePrefix
}
// AddSetupQuestion adds a setup question to the plugin.
// This is a helper method, usually called from NewClient.
func (c *Client) AddSetupQuestion(text string, isSensitive bool) *plugins.SetupQuestion {
return c.PluginBase.AddSetupQuestion(text, isSensitive)
}
// GetSetupQuestions returns the setup questions for the plugin.
// Corrected: Return the slice of setup questions from PluginBase
func (c *Client) GetSetupQuestions() []*plugins.SetupQuestion {
return c.PluginBase.SetupQuestions
}

View File

@@ -3,8 +3,8 @@ package ai
import (
"context"
"github.com/danielmiessler/fabric/chat"
"github.com/danielmiessler/fabric/plugins"
goopenai "github.com/sashabaranov/go-openai"
"github.com/danielmiessler/fabric/common"
)
@@ -12,6 +12,7 @@ import (
type Vendor interface {
plugins.Plugin
ListModels() ([]string, error)
SendStream([]*goopenai.ChatCompletionMessage, *common.ChatOptions, chan string) error
Send(context.Context, []*goopenai.ChatCompletionMessage, *common.ChatOptions) (string, error)
SendStream([]*chat.ChatCompletionMessage, *common.ChatOptions, chan string) error
Send(context.Context, []*chat.ChatCompletionMessage, *common.ChatOptions) (string, error)
NeedsRawMode(modelName string) bool
}

View File

@@ -9,5 +9,5 @@ type Storage[T any] interface {
Rename(oldName, newName string) (err error)
Save(name string, content []byte) (err error)
Load(name string) (ret []byte, err error)
ListNames() (err error)
ListNames(shellCompleteList bool) (err error)
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"os"
"path/filepath"
"strings"
"time"
"github.com/joho/godotenv"
@@ -19,6 +20,7 @@ func NewDb(dir string) (db *Db) {
StorageEntity: &StorageEntity{Label: "Patterns", Dir: db.FilePath("patterns"), ItemIsDir: true},
SystemPatternFile: "system.md",
UniquePatternsFilePath: db.FilePath("unique_patterns.txt"),
CustomPatternsDir: "", // Will be set after loading .env file
}
db.Sessions = &SessionsEntity{
@@ -49,6 +51,18 @@ func (o *Db) Configure() (err error) {
return
}
// Set custom patterns directory after loading .env file
customPatternsDir := os.Getenv("CUSTOM_PATTERNS_DIRECTORY")
if customPatternsDir != "" {
// Expand home directory if needed
if strings.HasPrefix(customPatternsDir, "~/") {
if homeDir, err := os.UserHomeDir(); err == nil {
customPatternsDir = filepath.Join(homeDir, customPatternsDir[2:])
}
}
o.Patterns.CustomPatternsDir = customPatternsDir
}
if err = o.Patterns.Configure(); err != nil {
return
}

View File

@@ -4,6 +4,7 @@ import (
"fmt"
"os"
"path/filepath"
"sort"
"strings"
"github.com/danielmiessler/fabric/common"
@@ -16,6 +17,7 @@ type PatternsEntity struct {
*StorageEntity
SystemPatternFile string
UniquePatternsFilePath string
CustomPatternsDir string
}
// Pattern represents a single pattern with its metadata
@@ -43,7 +45,7 @@ func (o *PatternsEntity) GetApplyVariables(
}
// Use the resolved absolute path to get the pattern
pattern, err = o.getFromFile(absPath)
pattern, _ = o.getFromFile(absPath)
} else {
// Otherwise, get the pattern from the database
pattern, err = o.getFromDB(source)
@@ -89,6 +91,19 @@ func (o *PatternsEntity) applyVariables(
// retrieves a pattern from the database by name
func (o *PatternsEntity) getFromDB(name string) (ret *Pattern, err error) {
// First check custom patterns directory if it exists
if o.CustomPatternsDir != "" {
customPatternPath := filepath.Join(o.CustomPatternsDir, name, o.SystemPatternFile)
if pattern, customErr := os.ReadFile(customPatternPath); customErr == nil {
ret = &Pattern{
Name: name,
Pattern: string(pattern),
}
return ret, nil
}
}
// Fallback to main patterns directory
patternPath := filepath.Join(o.Dir, name, o.SystemPatternFile)
var pattern []byte
@@ -145,8 +160,84 @@ func (o *PatternsEntity) getFromFile(pathStr string) (pattern *Pattern, err erro
return
}
// GetNames overrides StorageEntity.GetNames to include custom patterns directory
func (o *PatternsEntity) GetNames() (ret []string, err error) {
// Get names from main patterns directory
mainNames, err := o.StorageEntity.GetNames()
if err != nil {
return nil, err
}
// Create a map to track unique pattern names (custom patterns override main ones)
nameMap := make(map[string]bool)
for _, name := range mainNames {
nameMap[name] = true
}
// Get names from custom patterns directory if it exists
if o.CustomPatternsDir != "" {
// Create a temporary StorageEntity for the custom directory
customStorage := &StorageEntity{
Dir: o.CustomPatternsDir,
ItemIsDir: o.StorageEntity.ItemIsDir,
FileExtension: o.StorageEntity.FileExtension,
}
customNames, customErr := customStorage.GetNames()
if customErr == nil {
// Add custom patterns, they will override main patterns with same name
for _, name := range customNames {
nameMap[name] = true
}
}
// Ignore errors from custom directory (it might not exist)
}
// Convert map keys back to slice
ret = make([]string, 0, len(nameMap))
for name := range nameMap {
ret = append(ret, name)
}
// Sort the patterns alphabetically
sort.Strings(ret)
return ret, nil
}
// ListNames overrides StorageEntity.ListNames to use PatternsEntity.GetNames
func (o *PatternsEntity) ListNames(shellCompleteList bool) (err error) {
var names []string
if names, err = o.GetNames(); err != nil {
return
}
if len(names) == 0 {
if !shellCompleteList {
fmt.Printf("\nNo %v\n", o.StorageEntity.Label)
}
return
}
for _, item := range names {
fmt.Printf("%s\n", item)
}
return
}
// Get required for Storage interface
func (o *PatternsEntity) Get(name string) (*Pattern, error) {
// Use GetPattern with no variables
return o.GetApplyVariables(name, nil, "")
}
func (o *PatternsEntity) Save(name string, content []byte) (err error) {
patternDir := filepath.Join(o.Dir, name)
if err = os.MkdirAll(patternDir, os.ModePerm); err != nil {
return fmt.Errorf("could not create pattern directory: %v", err)
}
patternPath := filepath.Join(patternDir, o.SystemPatternFile)
if err = os.WriteFile(patternPath, content, 0644); err != nil {
return fmt.Errorf("could not save pattern: %v", err)
}
return nil
}

View File

@@ -144,3 +144,141 @@ func TestGetApplyVariables(t *testing.T) {
})
}
}
func TestPatternsEntity_Save(t *testing.T) {
entity, cleanup := setupTestPatternsEntity(t)
defer cleanup()
name := "new-pattern"
content := []byte("test pattern content")
require.NoError(t, entity.Save(name, content))
patternDir := filepath.Join(entity.Dir, name)
info, err := os.Stat(patternDir)
require.NoError(t, err)
assert.True(t, info.IsDir())
data, err := os.ReadFile(filepath.Join(patternDir, entity.SystemPatternFile))
require.NoError(t, err)
assert.Equal(t, content, data)
}
func TestPatternsEntity_CustomPatterns(t *testing.T) {
// Create main patterns directory
mainDir, err := os.MkdirTemp("", "test-main-patterns-*")
require.NoError(t, err)
defer os.RemoveAll(mainDir)
// Create custom patterns directory
customDir, err := os.MkdirTemp("", "test-custom-patterns-*")
require.NoError(t, err)
defer os.RemoveAll(customDir)
entity := &PatternsEntity{
StorageEntity: &StorageEntity{
Dir: mainDir,
Label: "patterns",
ItemIsDir: true,
},
SystemPatternFile: "system.md",
CustomPatternsDir: customDir,
}
// Create a pattern in main directory
createTestPattern(t, &PatternsEntity{
StorageEntity: &StorageEntity{
Dir: mainDir,
Label: "patterns",
ItemIsDir: true,
},
SystemPatternFile: "system.md",
}, "main-pattern", "Main pattern content")
// Create a pattern in custom directory
createTestPattern(t, &PatternsEntity{
StorageEntity: &StorageEntity{
Dir: customDir,
Label: "patterns",
ItemIsDir: true,
},
SystemPatternFile: "system.md",
}, "custom-pattern", "Custom pattern content")
// Create a pattern with same name in both directories (custom should override)
createTestPattern(t, &PatternsEntity{
StorageEntity: &StorageEntity{
Dir: mainDir,
Label: "patterns",
ItemIsDir: true,
},
SystemPatternFile: "system.md",
}, "shared-pattern", "Main shared pattern")
createTestPattern(t, &PatternsEntity{
StorageEntity: &StorageEntity{
Dir: customDir,
Label: "patterns",
ItemIsDir: true,
},
SystemPatternFile: "system.md",
}, "shared-pattern", "Custom shared pattern")
// Test GetNames includes both directories
names, err := entity.GetNames()
require.NoError(t, err)
assert.Contains(t, names, "main-pattern")
assert.Contains(t, names, "custom-pattern")
assert.Contains(t, names, "shared-pattern")
// Test that custom pattern overrides main pattern
pattern, err := entity.getFromDB("shared-pattern")
require.NoError(t, err)
assert.Equal(t, "Custom shared pattern", pattern.Pattern)
// Test that main pattern is accessible when not overridden
pattern, err = entity.getFromDB("main-pattern")
require.NoError(t, err)
assert.Equal(t, "Main pattern content", pattern.Pattern)
// Test that custom pattern is accessible
pattern, err = entity.getFromDB("custom-pattern")
require.NoError(t, err)
assert.Equal(t, "Custom pattern content", pattern.Pattern)
}
func TestPatternsEntity_CustomPatternsEmpty(t *testing.T) {
// Test behavior when custom patterns directory is empty or doesn't exist
mainDir, err := os.MkdirTemp("", "test-main-patterns-*")
require.NoError(t, err)
defer os.RemoveAll(mainDir)
entity := &PatternsEntity{
StorageEntity: &StorageEntity{
Dir: mainDir,
Label: "patterns",
ItemIsDir: true,
},
SystemPatternFile: "system.md",
CustomPatternsDir: "/nonexistent/directory",
}
// Create a pattern in main directory
createTestPattern(t, &PatternsEntity{
StorageEntity: &StorageEntity{
Dir: mainDir,
Label: "patterns",
ItemIsDir: true,
},
SystemPatternFile: "system.md",
}, "main-pattern", "Main pattern content")
// Test GetNames works even with nonexistent custom directory
names, err := entity.GetNames()
require.NoError(t, err)
assert.Contains(t, names, "main-pattern")
// Test that main pattern is accessible
pattern, err := entity.getFromDB("main-pattern")
require.NoError(t, err)
assert.Equal(t, "Main pattern content", pattern.Pattern)
}

View File

@@ -3,8 +3,8 @@ package fsdb
import (
"fmt"
"github.com/danielmiessler/fabric/chat"
"github.com/danielmiessler/fabric/common"
goopenai "github.com/sashabaranov/go-openai"
)
type SessionsEntity struct {
@@ -38,16 +38,16 @@ func (o *SessionsEntity) SaveSession(session *Session) (err error) {
type Session struct {
Name string
Messages []*goopenai.ChatCompletionMessage
Messages []*chat.ChatCompletionMessage
vendorMessages []*goopenai.ChatCompletionMessage
vendorMessages []*chat.ChatCompletionMessage
}
func (o *Session) IsEmpty() bool {
return len(o.Messages) == 0
}
func (o *Session) Append(messages ...*goopenai.ChatCompletionMessage) {
func (o *Session) Append(messages ...*chat.ChatCompletionMessage) {
if o.vendorMessages != nil {
for _, message := range messages {
o.Messages = append(o.Messages, message)
@@ -58,7 +58,7 @@ func (o *Session) Append(messages ...*goopenai.ChatCompletionMessage) {
}
}
func (o *Session) GetVendorMessages() (ret []*goopenai.ChatCompletionMessage) {
func (o *Session) GetVendorMessages() (ret []*chat.ChatCompletionMessage) {
if len(o.vendorMessages) == 0 {
for _, message := range o.Messages {
o.appendVendorMessage(message)
@@ -68,13 +68,13 @@ func (o *Session) GetVendorMessages() (ret []*goopenai.ChatCompletionMessage) {
return
}
func (o *Session) appendVendorMessage(message *goopenai.ChatCompletionMessage) {
func (o *Session) appendVendorMessage(message *chat.ChatCompletionMessage) {
if message.Role != common.ChatMessageRoleMeta {
o.vendorMessages = append(o.vendorMessages, message)
}
}
func (o *Session) GetLastMessage() (ret *goopenai.ChatCompletionMessage) {
func (o *Session) GetLastMessage() (ret *chat.ChatCompletionMessage) {
if len(o.Messages) > 0 {
ret = o.Messages[len(o.Messages)-1]
}
@@ -86,9 +86,9 @@ func (o *Session) String() (ret string) {
ret += fmt.Sprintf("\n--- \n[%v]\n%v", message.Role, message.Content)
if message.MultiContent != nil {
for _, part := range message.MultiContent {
if part.Type == goopenai.ChatMessagePartTypeImageURL {
if part.Type == chat.ChatMessagePartTypeImageURL {
ret += fmt.Sprintf("\n%v: %v", part.Type, *part.ImageURL)
} else if part.Type == goopenai.ChatMessagePartTypeText {
} else if part.Type == chat.ChatMessagePartTypeText {
ret += fmt.Sprintf("\n%v: %v", part.Type, part.Text)
}
}

View File

@@ -3,7 +3,7 @@ package fsdb
import (
"testing"
goopenai "github.com/sashabaranov/go-openai"
"github.com/danielmiessler/fabric/chat"
)
func TestSessions_GetOrCreateSession(t *testing.T) {
@@ -27,7 +27,7 @@ func TestSessions_SaveSession(t *testing.T) {
StorageEntity: &StorageEntity{Dir: dir, FileExtension: ".json"},
}
sessionName := "testSession"
session := &Session{Name: sessionName, Messages: []*goopenai.ChatCompletionMessage{{Content: "message1"}}}
session := &Session{Name: sessionName, Messages: []*chat.ChatCompletionMessage{{Content: "message1"}}}
err := sessions.SaveSession(session)
if err != nil {
t.Fatalf("failed to save session: %v", err)

View File

@@ -100,14 +100,16 @@ func (o *StorageEntity) Load(name string) (ret []byte, err error) {
return
}
func (o *StorageEntity) ListNames() (err error) {
func (o *StorageEntity) ListNames(shellCompleteList bool) (err error) {
var names []string
if names, err = o.GetNames(); err != nil {
return
}
if len(names) == 0 {
fmt.Printf("\nNo %v\n", o.Label)
if !shellCompleteList {
fmt.Printf("\nNo %v\n", o.Label)
}
return
}

View File

@@ -8,6 +8,7 @@ import (
)
const AnswerReset = "reset"
const SettingTypeBool = "bool"
type Plugin interface {
GetName() string
@@ -60,6 +61,21 @@ func (o *PluginBase) AddSetupQuestionCustom(name string, required bool, question
return
}
func (o *PluginBase) AddSetupQuestionBool(name string, required bool) (ret *SetupQuestion) {
return o.AddSetupQuestionCustomBool(name, required, "")
}
func (o *PluginBase) AddSetupQuestionCustomBool(name string, required bool, question string) (ret *SetupQuestion) {
setting := o.AddSetting(name, required)
setting.Type = SettingTypeBool
ret = &SetupQuestion{Setting: setting, Question: question}
if ret.Question == "" {
ret.Question = fmt.Sprintf("Enable %v %v (true/false)", o.Name, strings.ToUpper(name))
}
o.SetupQuestions = append(o.SetupQuestions, ret)
return
}
func (o *PluginBase) Configure() (err error) {
if err = o.Settings.Configure(); err != nil {
return
@@ -98,16 +114,122 @@ func NewSetting(envVariable string, required bool) *Setting {
}
}
// In plugins/plugin.go
type Setting struct {
EnvVariable string
Value string
Required bool
Type string // "string" (default), "bool"
}
func (o *Setting) IsValid() bool {
if o.Type == SettingTypeBool {
_, err := ParseBool(o.Value)
return (err == nil) || !o.Required
}
return o.IsDefined() || !o.Required
}
func (o *Setting) Print() {
if o.Type == SettingTypeBool {
v, _ := ParseBool(o.Value)
fmt.Printf("%v: %v\n", o.EnvVariable, v)
} else {
fmt.Printf("%v: %v\n", o.EnvVariable, o.Value)
}
}
func (o *Setting) FillEnvFileContent(buffer *bytes.Buffer) {
if o.IsDefined() {
buffer.WriteString(o.EnvVariable)
buffer.WriteString("=")
if o.Type == SettingTypeBool {
v, _ := ParseBool(o.Value)
buffer.WriteString(fmt.Sprintf("%v", v))
} else {
buffer.WriteString(o.Value)
}
buffer.WriteString("\n")
}
}
func ParseBoolElseFalse(val string) (ret bool) {
ret, _ = ParseBool(val)
return
}
func ParseBool(val string) (bool, error) {
switch strings.ToLower(strings.TrimSpace(val)) {
case "1", "true", "yes", "on":
return true, nil
case "0", "false", "no", "off":
return false, nil
}
return false, fmt.Errorf("invalid bool: %q", val)
}
type SetupQuestion struct {
*Setting
Question string
}
func (o *SetupQuestion) Ask(label string) (err error) {
var prefix string
if label != "" {
prefix = fmt.Sprintf("[%v] ", label)
} else {
prefix = ""
}
fmt.Println()
if o.Type == SettingTypeBool {
current := "false"
if v, err := ParseBool(o.Value); err == nil && v {
current = "true"
}
fmt.Printf("%v%v (true/false, leave empty for '%s' or type '%v' to remove the value):\n",
prefix, o.Question, current, AnswerReset)
} else if o.Value != "" {
fmt.Printf("%v%v (leave empty for '%s' or type '%v' to remove the value):\n",
prefix, o.Question, o.Value, AnswerReset)
} else {
fmt.Printf("%v%v (leave empty to skip):\n", prefix, o.Question)
}
var answer string
fmt.Scanln(&answer)
answer = strings.TrimRight(answer, "\n")
if answer == "" {
answer = o.Value
} else if strings.ToLower(answer) == AnswerReset {
answer = ""
}
err = o.OnAnswer(answer)
return
}
func (o *SetupQuestion) OnAnswer(answer string) (err error) {
if o.Type == SettingTypeBool {
if answer == "" {
o.Value = ""
} else {
_, err := ParseBool(answer)
if err != nil {
return fmt.Errorf("invalid boolean value: %v", answer)
}
o.Value = strings.ToLower(answer)
}
} else {
o.Value = answer
}
if o.EnvVariable != "" {
if err = os.Setenv(o.EnvVariable, o.Value); err != nil {
return
}
}
err = o.IsValidErr()
return
}
func (o *Setting) IsValidErr() (err error) {
if !o.IsValid() {
err = fmt.Errorf("%v=%v, is not valid", o.EnvVariable, o.Value)
@@ -127,71 +249,10 @@ func (o *Setting) Configure() error {
return o.IsValidErr()
}
func (o *Setting) FillEnvFileContent(buffer *bytes.Buffer) {
if o.IsDefined() {
buffer.WriteString(o.EnvVariable)
buffer.WriteString("=")
//buffer.WriteString("\"")
buffer.WriteString(o.Value)
//buffer.WriteString("\"")
buffer.WriteString("\n")
}
return
}
func (o *Setting) Print() {
fmt.Printf("%v: %v\n", o.EnvVariable, o.Value)
}
func NewSetupQuestion(question string) *SetupQuestion {
return &SetupQuestion{Setting: &Setting{}, Question: question}
}
type SetupQuestion struct {
*Setting
Question string
}
func (o *SetupQuestion) Ask(label string) (err error) {
var prefix string
if label != "" {
prefix = fmt.Sprintf("[%v] ", label)
} else {
prefix = ""
}
fmt.Println()
if o.Value != "" {
fmt.Printf("%v%v (leave empty for '%s' or type '%v' to remove the value):\n",
prefix, o.Question, o.Value, AnswerReset)
} else {
fmt.Printf("%v%v (leave empty to skip):\n", prefix, o.Question)
}
var answer string
fmt.Scanln(&answer)
answer = strings.TrimRight(answer, "\n")
if answer == "" {
answer = o.Value
} else if strings.ToLower(answer) == AnswerReset {
answer = ""
}
err = o.OnAnswer(answer)
return
}
func (o *SetupQuestion) OnAnswer(answer string) (err error) {
o.Value = answer
if o.EnvVariable != "" {
if err = os.Setenv(o.EnvVariable, answer); err != nil {
return
}
}
err = o.IsValidErr()
return
}
type Settings []*Setting
func (o Settings) IsConfigured() (ret bool) {
@@ -217,7 +278,6 @@ func (o Settings) FillEnvFileContent(buffer *bytes.Buffer) {
for _, setting := range o {
setting.FillEnvFileContent(buffer)
}
return
}
type SetupQuestions []*SetupQuestion

View File

@@ -197,12 +197,13 @@ func LoadStrategy(filename string) (*Strategy, error) {
}
// ListStrategies prints available strategies
func (sm *StrategiesManager) ListStrategies() error {
func (sm *StrategiesManager) ListStrategies(shellCompleteList bool) error {
if len(sm.Strategies) == 0 {
return fmt.Errorf("no strategies found. Please run 'fabric --setup' to download strategies")
}
fmt.Print("Available Strategies:\n\n")
if !shellCompleteList {
fmt.Print("Available Strategies:\n\n")
}
// Get all strategy names for sorting
names := []string{}
for name := range sm.Strategies {
@@ -224,7 +225,11 @@ func (sm *StrategiesManager) ListStrategies() error {
formatString := "%-" + fmt.Sprintf("%d", maxNameLength+2) + "s %s\n"
for _, name := range names {
strategy := sm.Strategies[name]
fmt.Printf(formatString, strategy.Name, strategy.Description)
if shellCompleteList {
fmt.Printf("%s\n", strategy.Name)
} else {
fmt.Printf(formatString, strategy.Name, strategy.Description)
}
}
return nil

View File

@@ -117,8 +117,12 @@ func (e *ExtensionExecutor) executeWithFile(cmd *exec.Cmd, ext *ExtensionDefinit
// Create context with timeout
ctx, cancel := context.WithTimeout(context.Background(), timeout)
defer cancel()
// Store the original environment
originalEnv := cmd.Env
// Create a new command with context. This might reset Env, depending on the Go version.
cmd = exec.CommandContext(ctx, cmd.Path, cmd.Args[1:]...)
cmd.Env = cmd.Env
// Restore the environment variables explicitly
cmd.Env = originalEnv
fileConfig := ext.GetFileConfig()
if fileConfig == nil {

Some files were not shown because too many files have changed in this diff Show More