Compare commits

...

146 Commits

Author SHA1 Message Date
github-actions[bot]
b26d466394 Update version to v1.4.136 and commit 2025-02-24 20:21:43 +00:00
Eugen Eisler
12603b619b Update to upload-artifact@v4 because upload-artifact@v3 is deprecated 2025-02-24 21:20:51 +01:00
github-actions[bot]
cbc82ec045 Update version to v1.4.135 and commit 2025-02-24 20:10:17 +00:00
Eugen Eisler
c8af946c87 Merge pull request #1309 from jmd1010/feature/web-ui-enhancements
Feature/Web Svelte GUI Enhancements: Pattern Descriptions, Tags, Favorites, Search Bar, Language Integration, PDF file conversion, etc
2025-02-24 21:09:23 +01:00
Eugen Eisler
bc216fdfef Merge pull request #1312 from junaid18183/main
Added Create LOE Document Prompt
2025-02-24 20:59:43 +01:00
Eugen Eisler
8befac61af Merge pull request #1302 from verebes1/feat/add-lmstudio
feat: Add LM Studio compatibility
2025-02-24 20:58:36 +01:00
Juned Memon
31be01f3b3 Added create_loe_document prompt 2025-02-21 19:35:28 +05:30
Juned Memon
52e2995c55 Added create_loe_document prompt 2025-02-21 19:26:36 +05:30
jmd1010
f314671f65 Exclude static PNG files from PR 2025-02-20 15:18:32 -05:00
jmd1010
292fd75699 Remove PNG files from PR scope 2025-02-20 15:02:39 -05:00
jmd1010
0a07072be0 Enhance pattern handling and chat interface improvements 2025-02-20 13:27:31 -05:00
jmd1010
5d31e90650 Update .gitignore to exclude sensitive and generated files 2025-02-19 22:18:41 -05:00
jmd1010
8bff9764f8 Remove sensitive and generated files from tracking 2025-02-19 22:17:20 -05:00
jmd1010
40c4cb46be Remove personal development notes from tracking 2025-02-19 21:55:00 -05:00
jmd1010
8a0f9814e6 Development checkpoint - Web UI enhancements with complete directory structure 2025-02-19 21:47:10 -05:00
jmd1010
717eb585b5 Setup backup configuration and update dependencies 2025-02-18 14:10:19 -05:00
JM
e10a2c9b09 Update ENV 2025-02-18 11:27:14 -05:00
David
c6ebfd3ad7 feat: Add LM Studio compatibility
- Added LM Studio as a new plugin, now it can be used with Fabric.
- Updated the plugin registry with the new plugin name
- Updated the configuration with the required base url
2025-02-17 23:17:44 +00:00
Eugen Eisler
0369087b91 Merge pull request #1297 from Perchycs/patch-1
Create pattern_explanations.md
2025-02-14 08:52:40 +01:00
Daniel Miessler
d8a415698c updated extract domains 2025-02-13 18:36:46 -08:00
Daniel Miessler
2bfb087b55 updated extract domains 2025-02-13 18:33:30 -08:00
Daniel Miessler
8782f78178 Added extract_domains 2025-02-13 18:26:36 -08:00
Perchycs
90c4f244ae Create pattern_explanations.md
Spent way too long getting chatgpt to give a one-line summary, based on the contents of each prompt.
2025-02-12 18:36:05 +13:00
github-actions[bot]
2331d011c1 Update version to v1.4.134 and commit 2025-02-11 22:23:42 +00:00
Eugen Eisler
2568204395 Merge pull request #1289 from thevops/pull-transcript-with-timestamps
Add the ability to grab YouTube video transcript with timestamps
2025-02-11 23:22:51 +01:00
github-actions[bot]
eb56ead927 Update version to v1.4.133 and commit 2025-02-11 22:22:36 +00:00
Eugen Eisler
8e10a72f1d Merge pull request #1294 from TvisharajiK/main
Improved unit-test coverage from 0 to 100 (AI module) using Keploy's agent
2025-02-11 23:21:12 +01:00
Tvisha
5246a9699a Merge pull request #1 from TvisharajiK/unittests
Improved unit-test coverage from 0 to 100% using Keploy AI Agent
2025-02-11 02:54:52 +05:30
TvisharajiK
7a678dc175 Feat: Increase unit test coverage from 0 to 100% in the AI module using Keploy's Agent 2025-02-11 02:53:36 +05:30
Daniel Miessler
b2e2784cf4 Added h3 TELOS pattern. 2025-02-07 11:57:49 -08:00
Daniel Miessler
111e8c786a Added challenge handling pattern. 2025-02-07 08:13:52 -08:00
Daniel Miessler
b8b9cdfdae Added year in review pattern. 2025-02-07 08:08:35 -08:00
Daniel Miessler
bfcbe6f06a Adding more TELOS patterns. 2025-02-07 07:36:25 -08:00
Daniel Miessler
02c28ad8b8 Added additional Telos patterns. 2025-02-07 07:11:26 -08:00
Krzysztof Łuczak
f3a1982e30 Add the ability to grab YouTube video transcript with timestamps
This commit adds the ability to grab the transcript
of a YouTube video with timestamps. The timestamps
are formatted as HH:MM:SS and are prepended to
each line of the transcript. The feature is enabled
by the new `--transcript-with-timestamps` flag,
so it's similar to the existing `--transcript` flag.

Example future use-case:

Providing summary of a video that includes timestamps
for quick navigation to specific parts of the video.
2025-02-07 15:25:22 +01:00
Daniel Miessler
c4b629fe03 Updated panel topic extractor 2025-02-06 22:19:51 -08:00
Daniel Miessler
f962104a2d Added panel topic extractor 2025-02-06 22:16:25 -08:00
Daniel Miessler
cf32bdc012 Added intro sentences pattern 2025-02-06 22:13:45 -08:00
Daniel Miessler
1ccbb22866 Updated announcement at the top 2025-02-06 14:50:01 -08:00
github-actions[bot]
d5a2008c44 Update version to v1.4.132 and commit 2025-02-02 13:50:30 +00:00
Eugen Eisler
ff33c33ea5 Merge pull request #1278 from aicharles/feat/anthropic-plugin-update
feat(anthropic): enable custom API base URL support
2025-02-02 14:49:37 +01:00
aicharles
731ecc6b3c feat(anthropic): enable custom API base URL support
- Enable and improve custom API base URL configuration
- Add proper handling of v1 endpoint for UUID-containing URLs
- Implement URL formatting logic for consistent endpoint structure
- Clean up commented code and improve configuration flow
2025-01-30 08:48:38 -06:00
github-actions[bot]
31df56add8 Update version to v1.4.131 and commit 2025-01-30 00:50:21 +00:00
Eugen Eisler
0f8a403dba Merge pull request #1270 from wmahfoudh/adding-output-filename-support-to-to_pdf
Added output filename support for to_pdf
2025-01-30 01:49:25 +01:00
Eugen Eisler
8b33b9946e Merge pull request #1271 from wmahfoudh/adding-deepseek-support
Adding deepseek support
2025-01-30 01:48:10 +01:00
Walid
a77efada0e feat: Added Deepseek AI integration 2025-01-23 20:50:56 +04:00
Walid
3e8aaed268 Added output filename support for to_pdf 2025-01-23 19:25:18 +04:00
Eugen Eisler
c2fad4de80 Merge pull request #1258 from tuergeist/readme-patch-1
Minor README fix and additional Example
2025-01-18 19:35:42 +01:00
Christoph Becker
e558d535df doc: Add scrape URL example. Fix Example 4 2025-01-13 14:58:39 +01:00
Christoph Becker
1c05b37c76 doc: Custom patterns also work with Claude models 2025-01-13 14:18:41 +01:00
Eugen Eisler
e46c588b9c Merge pull request #1257 from jessefmoore/main
Create analyze_threat_report_cmds
2025-01-13 13:31:52 +01:00
Eugen Eisler
3bf6b7b000 Merge pull request #1256 from JOduMonT/patch-1
Update README.md
2025-01-13 12:02:25 +01:00
Daniel Miessler
82db18a8aa Updated conversion post. 2025-01-13 00:16:13 -08:00
Daniel Miessler
5a765bd8fc Adding markdown converter. 2025-01-12 23:34:25 -08:00
Daniel Miessler
339e1e6790 Updated prediction creator. 2025-01-12 21:38:21 -08:00
Daniel Miessler
a106e6de27 Updated predictor pattern. 2025-01-12 16:37:26 -08:00
Daniel Miessler
86eddbeb0a Added new prediction generator. 2025-01-12 13:34:18 -08:00
Jesse Moore
2daf0d90ce Create system.md
Create pattern to extract commands from videos and threat reports to obtain commands so pentesters or red teams or Threat hunters can use to either threat hunt or simulate the threat actor.
2025-01-12 09:48:28 -08:00
Jonathan DUMONT
03dfa03f46 Update README.md
## Change
1. Windows Command: Because actually curl does not exist natively on Windows
2. Syntax: Because like this; it makes the “click, cut and paste” easier
2025-01-12 13:55:37 +01:00
Eugen Eisler
92bbbfe88b Merge pull request #1247 from kevnk/update-suggest_pattern-user-prompt
Update suggest_pattern: refine summaries and add recently added patterns
2025-01-10 12:57:57 +01:00
Eugen Eisler
fb2dc00b9c Merge pull request #1252 from jeffmcjunkin/patch-1
Update README.md: Add PowerShell aliases
2025-01-10 12:57:00 +01:00
Eugen Eisler
0014a53c6e Merge pull request #1253 from abassel/fix/fix_few_typos
Fixed few typos that I could find
2025-01-10 12:56:25 +01:00
Alexandre Bassel
021d2738e4 Fixed few typos that I could find 2025-01-10 03:44:54 -03:00
Jeff McJunkin
f312ad0364 Update README.md: Add PowerShell aliases 2025-01-09 12:39:23 -08:00
Kevin Kirchner
02aa41e6aa Update summaries and add recently added patterns 2025-01-05 14:59:31 -06:00
github-actions[bot]
1f8039d996 Update version to v1.4.130 and commit 2025-01-03 20:53:29 +00:00
Eugen Eisler
977d902cdd Merge pull request #1240 from johnconnor-sec/main
Updates: ./web
2025-01-03 21:52:36 +01:00
John
12e4611d9a Update README 2025-01-01 04:38:59 -05:00
John
46a77de9e8 Remove inbox note 2025-01-01 03:10:35 -05:00
John Connor
87b55148fa Merge pull request #11 from danielmiessler/main
Updates from main
2025-01-01 02:54:15 -05:00
John
3931098aad Merge branch 'main' of https://github.com/johnconnor-sec/fabric 2025-01-01 02:48:25 -05:00
John
2aebc84c66 Update README 2025-01-01 02:48:15 -05:00
John
c107cce22e Updates for BUILD 2025-01-01 02:08:41 -05:00
John
71b049bffd Fixing indentation again
Removed backup side-nav and terminal.
updated toast for transcripts
2025-01-01 00:00:30 -05:00
John
d3e8ce5120 Added: Only dates are required for Posts now. 2024-12-31 23:27:19 -05:00
John
ce7fc78076 Fix: Chat.svelte indentation
Removed backup files
2024-12-31 23:19:37 -05:00
John
f911de41b5 Fix: NoteDrawer textarea sizing 2024-12-31 22:03:13 -05:00
John
7288001a01 Updated tags page to use Frontmatter instead of PostMetadata
buffer issues
2024-12-31 20:35:19 -05:00
John
7f808bcf43 Deleting old files that were moved or renamed
Folders deleted:
- `types`. The folders contained are now `lib/interfaces` and `lib/api`
- `types/markdown` now in `utils/markdown`
- `components/ui/{side-nav,terminal}` now `components/ui/toc` and
`terminal`
2024-12-31 20:09:20 -05:00
John
025dc8ed13 !NOTEDRAWER IS NOW CENTERED IN VIEWPORT! 2024-12-31 20:04:59 -05:00
John
fc0fd00e16 !!CHAT IS NOW CENTERED IN VIEWPORT!!
!Chat is now centered in the viewport!
2024-12-31 16:44:38 -05:00
John
a3da84f459 Edit: styling on Posts page 2024-12-31 16:36:56 -05:00
John
58a6f0404a Indented Toc 2024-12-31 13:13:58 -05:00
John
643403192a Added: blinking cursor to Terminal. Removed / added comments
Removed from ChatMessages
Added to api/context
2024-12-31 12:58:47 -05:00
John
416cee4f54 Update: References
Moved
- `lib/types/interfaces` to `lib/interfaces`.
- `components/ui/side-nav` to `components/ui/toc`.
- `components/ui/terminal` to `components/terminal`.
- `types/markdown` to `utils/markdown`
- `lib/types/chat` to `lib/api`
2024-12-31 12:21:40 -05:00
John
e42be19347 Edit: type/note to note
type/note was causing problems.
2024-12-31 12:15:20 -05:00
John
78bae7a6e7 House Keeping: Added missing png. Removed more unused 2024-12-30 10:13:46 -05:00
John
ec31f11abf Update README to reflect current @12-30-24 2024-12-30 10:12:33 -05:00
John
2d3ebcd09c House Keeping: Fixing Indentation 2024-12-30 09:52:07 -05:00
John
5da749f994 Update: Contact page 2024-12-30 08:54:07 -05:00
John
85891f0106 Updated copy 2024-12-30 08:33:58 -05:00
John
229287510a Indentation 2024-12-30 04:03:41 -05:00
John
d42ba42bb2 Add: Templates for posts 2024-12-30 03:59:31 -05:00
John
574bb2c450 Fix: Obsidian Card. Indented app.html 2024-12-30 02:21:55 -05:00
John
3797b7ac6a Update: removed grid from PostContent 2024-12-29 18:02:04 -05:00
John
ed7c28958f Update: ui/button component 2024-12-29 18:00:54 -05:00
John
74a134eec0 Rename chat.ts to chat-store.ts 2024-12-29 17:59:45 -05:00
John
4094296a4c Rename noteStore to note-store 2024-12-29 17:58:24 -05:00
John Connor
00a706eb36 Merge pull request #10 from danielmiessler/main
Merge Main to my fork
2024-12-29 12:16:57 -05:00
John
f8f39b92c3 Deleted lib/layouts/files. Renamed lib/store/theme
lib/layouts/files are not longer in use. Renamed lib/store/theme to
adhere to current naming convention
2024-12-28 07:10:14 -05:00
John Connor
eb8d40dfb6 Merge pull request #9 from danielmiessler/main
Another merge nightmare. This one wasn't so bad
2024-12-28 06:52:42 -05:00
John Connor
343cbba5ec Update version.nix
Update version to v..1 and commit

Update version.go

Update version to v..1 and commit

Update version.nix

Update version to v..1 and commit

Update version.go

Update version.nix
2024-12-28 06:52:42 -05:00
John Connor
ac3e0b5ba0 Merge pull request #9 from danielmiessler/main
Another merge nightmare. This one wasn't so bad
2024-12-28 06:48:01 -05:00
John Connor
55c11a3861 Update version.nix 2024-12-28 06:47:25 -05:00
John Connor
013c6cb1e5 Update version.go 2024-12-28 06:47:03 -05:00
github-actions[bot]
fc54f0e32e Update version to v..1 and commit 2024-12-28 11:45:59 +00:00
John Connor
5a63c6b260 Update version.nix 2024-12-28 06:44:51 -05:00
github-actions[bot]
157b0a6109 Update version to v..1 and commit 2024-12-28 11:44:47 +00:00
John Connor
b10455ff76 Update version.go 2024-12-28 06:43:44 -05:00
github-actions[bot]
a7b4a7160a Update version to v..1 and commit 2024-12-28 11:42:43 +00:00
John Connor
65bb9fee84 Update version.nix 2024-12-28 06:41:51 -05:00
John
b701c767fc Update: Post page styling and layout
Indented unused Search.svelte file
2024-12-28 06:14:11 -05:00
John
2a450cf1be Minor styling improvements 2024-12-28 05:42:48 -05:00
John
1f1b51edcf Remove: Docs in Posts 2024-12-28 05:33:19 -05:00
John
eae691aa8c Update: NotesDrawer now saves notes to lib/content/inbox 2024-12-27 03:57:35 -05:00
John
9d8d5ca924 WIP: Restyling Chat page 2024-12-27 02:13:49 -05:00
John
84e3ff9386 Indented Main page 2024-12-27 02:13:31 -05:00
John
002e87ffbb Moved NotesDrawer to ModelConfig component 2024-12-27 00:13:39 -05:00
John
4be9cf42b4 Deleted: Moved Components from /home to respective dirs 2024-12-26 23:32:39 -05:00
John
75aad67a22 Add: NotesDrawer to header 2024-12-26 23:31:04 -05:00
John
b8a285bbbc Removed styling from /routes/chat/*.svelte 2024-12-26 23:19:35 -05:00
John
f8f892bfe0 WIP: Notes Drawer text color 2024-12-24 07:51:10 -05:00
John
8c68ebc0ee WIP: Notes Drawer. Updated default theme to rocket 2024-12-24 07:40:29 -05:00
John
cbd2ffe81d Updated POSTS to make main 24-12-08 2024-12-24 05:18:36 -05:00
John
86b76faa5b Update imports 2024-12-23 22:34:00 -05:00
John
9b38c8d5aa Updates 2024-12-16 18:40:15 -05:00
John Connor
4c0ed0a5f0 Merge pull request #4 from johnconnor-sec/dependabot/npm_and_yarn/web/npm_and_yarn-06d0dd15d9
build(deps-dev): bump @sveltejs/kit from 2.8.4 to 2.9.0 in /web in the npm_and_yarn group across 1 directory
2024-12-07 22:04:00 -05:00
John Connor
0bc220949a Update +page.svelte 2024-12-07 19:30:15 -05:00
github-actions[bot]
5fb18077eb Update version to v..1 and commit 2024-12-08 00:15:18 +00:00
John Connor
fcf073febd Update version.nix to reflect upstream/main 2024-12-07 19:14:30 -05:00
github-actions[bot]
565fea97cf Update version to v..1 and commit 2024-12-08 00:13:55 +00:00
John Connor
daf1259556 Update version.go to reflect upstream/main 2024-12-07 19:12:50 -05:00
John
0eab786030 removed arcanum gif 2024-12-07 18:44:43 -05:00
github-actions[bot]
9dfb911d4a Update version to v..1 and commit 2024-12-07 23:32:02 +00:00
John
197f0e5c0d Merge branch 'danielmiessler-main' 2024-12-07 18:30:11 -05:00
John
aef4a1a5d4 merging 2024-12-07 18:24:17 -05:00
John
2579d4e87d Add cards component 2024-12-05 05:25:28 -05:00
John
f4885c5cdd Update: packages, main page, styles 2024-12-05 05:24:07 -05:00
dependabot[bot]
79b27253cd build(deps-dev): bump @sveltejs/kit
Bumps the npm_and_yarn group with 1 update in the /web directory: [@sveltejs/kit](https://github.com/sveltejs/kit/tree/HEAD/packages/kit).


Updates `@sveltejs/kit` from 2.8.4 to 2.9.0
- [Release notes](https://github.com/sveltejs/kit/releases)
- [Changelog](https://github.com/sveltejs/kit/blob/main/packages/kit/CHANGELOG.md)
- [Commits](https://github.com/sveltejs/kit/commits/@sveltejs/kit@2.9.0/packages/kit)

---
updated-dependencies:
- dependency-name: "@sveltejs/kit"
  dependency-type: direct:development
  dependency-group: npm_and_yarn
...

Signed-off-by: dependabot[bot] <support@github.com>
2024-12-04 11:26:01 +00:00
github-actions[bot]
6deb4d69c0 Update version to v..1 and commit 2024-12-04 11:25:19 +00:00
John Connor
1b97a57cba Merge pull request #3 from danielmiessler/main
Update
2024-12-04 06:24:23 -05:00
John
0302e49ebd Merge remote-tracking branch 'refs/remotes/origin/main' 2024-12-04 06:19:02 -05:00
John
b9a5501f9d Style: Reordered columns. Improved responsive layout 2024-12-04 05:51:15 -05:00
John
faa83f9a49 Style: modified chat/+layout display. Update Header buttons 2024-12-04 05:41:53 -05:00
John
4888f8cb78 style: updates to ui components (components/ui) 2024-12-04 01:39:13 -05:00
John
f33ebb7e25 Moved pattern loader to ModelConfig. Editing styles in chat/. Added page fly transitions. Tidying. Removed - ChatHeader, unused modal from Transcripts, FlyandScaleParams from lib/types/utils. 2024-12-03 02:04:39 -05:00
255 changed files with 17009 additions and 10865 deletions

View File

@@ -81,14 +81,14 @@ jobs:
- name: Upload build artifact
if: matrix.os != 'windows-latest'
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: fabric-${OS}-${{ matrix.arch }}
path: fabric-${OS}-${{ matrix.arch }}
- name: Upload build artifact
if: matrix.os == 'windows-latest'
uses: actions/upload-artifact@v3
uses: actions/upload-artifact@v4
with:
name: fabric-windows-${{ matrix.arch }}.exe
path: fabric-windows-${{ matrix.arch }}.exe

5
.gitignore vendored
View File

@@ -342,3 +342,8 @@ web/.svelte-kit
# End of https://www.toptal.com/developers/gitignore/api/node
web/myfiles/Obsidian_perso_not_share/
ENV
web/package-lock.json
.gitignore_backup
web/static/*.png

10
Alma.md
View File

@@ -14,7 +14,7 @@ Those will be changes, updates, or modifications to the direction of the company
Alma Security was started by Chris Meyers, who was previously at Sigma Systems as CTO and HPE as a senior security engineer.
He started the company becuase, "I saw a gap in the authentication market, where companies were only looking at one or two aspects of one's identity to do authentication. They we're looking at the whole picture and turning that into a continuous authentication story."
He started the company because, "I saw a gap in the authentication market, where companies were only looking at one or two aspects of one's identity to do authentication. They we're looking at the whole picture and turning that into a continuous authentication story."
## Company Mission
@@ -52,11 +52,13 @@ NOTE: Some goals are things like project rollouts which serve the higher goals.
## Security Team Goals
- SG1: Secure all customer data -- especially biometric -- from security and privacy incidents.
- SG2: Protect Alma Security's intellectual property from being captured by unathorized parties.
- SG2: Protect Alma Security's intellectual property from being captured by unauthorized parties.
- SG3: Reach a time to detect malicious behavior of less than 4 minutes by January 2025
- SG4: Ensure the public trusts our product, because it's an authentication product we can't survive if people don't trust us.
- SG5: Reach a time to remediate critical vulnerabilties on crown jewel systems of less than 16 hours by August 2025
- SG6: Reach a time to remediate critical vulnerabilties on all systems of less than 3 days by August 2025
- SG5: Reach a time to remediate critical vulnerabilities on crown jewel systems of less than 16 hours by August 2025
- SG6: Reach a time to remediate critical vulnerabilities on all systems of less than 3 days by August 2025
- SG7: Complete audit of Apple Passkey integration by February 2025
- SG8: Complete remediation of Apple Passkey vulns by February 2025
@@ -129,7 +131,7 @@ $INSERT GRAPHS OF KPI PROGRESS OVER TIME HERE$
## Infrastructure Notes (a basic description of our tech stack and various context around it)
- We currenty have no WAF protecting our main web app but we're considering adding one
- We currently have no WAF protecting our main web app but we're considering adding one
- We have had some issues with S3 buckets becoming public, or being set up as public, which has lead to some close calls with customer data almost being exposed.
- alma.amazon-domain.com is our primary S3 bucket that contains everything, but it's not public readable or listable
- We have a root account for our AWS account that doesn't yet have 2FA on it, but we're working on fixing that within a few weeks (but it's been open for a few months)
@@ -137,7 +139,7 @@ $INSERT GRAPHS OF KPI PROGRESS OVER TIME HERE$
- Developers have root access to the all kubernetes nodes via SSH on port 45,001 using a shared developer key issued during laptop provisioning.
- We're a kubernetes shop and do everything through AWS
- We're logging most stuff to Cloudtrail and we kind of use guarduty, but we don't have a 24/7 team to monitor alerts and logs. We should add that to our list of challenges next time we update our overarll policy
- We also have a Windows infrastructure because some key personell came from Microsoft. The DC is hosted in our head office which is in Redwood City, and anyone who works in that office (most of the 300 employees) uses that to log in when tehy start work. The domain is ALMA.
- We also have a Windows infrastructure because some key personnel came from Microsoft. The DC is hosted in our head office which is in Redwood City, and anyone who works in that office (most of the 300 employees) uses that to log in when they start work. The domain is ALMA.
- There's a domain-joined fileserver running Windows 2012 that most people use to upload new ideas and plans for new products. It uses Windows authentication from the domain.
- We use a palo alto firewall with 2fa using windows authenticator tied to SSO.
- The name of the AI system doing all this context creation using SPQA is Alma, which is also the name of the company.

9
ENV
View File

@@ -1,9 +0,0 @@
DEFAULT_VENDOR=OpenRouter
DEFAULT_MODEL=openai/gpt-3.5-turbo-0125
DEFAULT_MODEL_CONTEXT_LENGTH=128K
PATTERNS_LOADER_GIT_REPO_URL=https://github.com/danielmiessler/fabric.git
PATTERNS_LOADER_GIT_REPO_PATTERNS_FOLDER=patterns
OPENROUTER_API_KEY=sk-or-v1-
OPENROUTER_API_BASE_URL=https://openrouter.ai/api/v1
YOUTUBE_API_KEY=AIzaS
JINA_AI_API_KEY=jina_57

View File

@@ -10,7 +10,7 @@
- The actions performed with a given model
- The configuration flow works like this for an **initial** call:
- The available vendors are called one by one, each of them being responsible for the data they collect. They return a set of environment variables under the form of a list of strings, or an empty list if the user does not want to setup this vendor. As we do not want each vendor to know which way the data they need will be collected (e.g., read from the command line, or a GUI), they will be asked for a list of questions, the configuration will inquire the user, and send back the questions with tthe collected answers to the Vendor. The Vendor is then either instantiating an instance (Vendor configured) and returning it, or returning `nil` if the Vendor should not be set up.
- The available vendors are called one by one, each of them being responsible for the data they collect. They return a set of environment variables under the form of a list of strings, or an empty list if the user does not want to setup this vendor. As we do not want each vendor to know which way the data they need will be collected (e.g., read from the command line, or a GUI), they will be asked for a list of questions, the configuration will inquire the user, and send back the questions with the collected answers to the Vendor. The Vendor is then either instantiating an instance (Vendor configured) and returning it, or returning `nil` if the Vendor should not be set up.
- the `.env` file is created, using the information returned by the vendors
- A list of patterns is downloaded from the main site
@@ -25,7 +25,7 @@
## TODO:
- Check if we need to read the system.md for every patterns when runnign the ListAllPatterns
- Check if we need to read the system.md for every patterns when running the ListAllPatterns
- Context management seems more complex than the one in the original fabric. Probably needs some work (at least to make it clear how it works)
- models on command line: give as well vendor (like `--model openai/gpt-4o`). If the vendor is not given, get it by retrieving all possible models and searching from that.
- if user gives the ollama url on command line, we need to update/init an ollama vendor.

View File

@@ -0,0 +1,124 @@
# Pattern Descriptions and Tags Management
This document explains the complete workflow for managing pattern descriptions and tags, including how to process new patterns and maintain metadata.
## System Overview
The pattern system follows this hierarchy:
1. `~/.config/fabric/patterns/` directory: The source of truth for available patterns
2. `pattern_extracts.json`: Contains first 500 words of each pattern for reference
3. `pattern_descriptions.json`: Stores pattern metadata (descriptions and tags)
4. `web/static/data/pattern_descriptions.json`: Web-accessible copy for the interface
## Pattern Processing Workflow
### 1. Adding New Patterns
- Add patterns to `~/.config/fabric/patterns/`
- Run extract_patterns.py to process new additions:
```bash
python extract_patterns.py
The Python Script automatically:
- Creates pattern extracts for reference
- Adds placeholder entries in descriptions file
- Syncs to web interface
### 2. Pattern Extract Creation
The script extracts first 500 words from each pattern's system.md file to:
- Provide context for writing descriptions
- Maintain reference material
- Aid in pattern categorization
### 3. Description and Tag Management
Pattern descriptions and tags are managed in pattern_descriptions.json:
{
"patterns": [
{
"patternName": "pattern_name",
"description": "[Description pending]",
"tags": []
}
]
}
## Completing Pattern Metadata
### Writing Descriptions
1. Check pattern_descriptions.json for "[Description pending]" entries
2. Reference pattern_extracts.json for context
3. How to update Pattern short descriptions (one sentence).
You can update your descriptions in pattern_descriptions.json manually or using LLM assistance (prefered approach).
Tell AI to look for "Description pending" entries in this file and write a short description based on the extract info in the pattern_extracts.json file. You can also ask your LLM to add tags for those newly added patterns, using other patterns tag assignments as example.
### Managing Tags
1. Add appropriate tags to new patterns
2. Update existing tags as needed
3. Tags are stored as arrays: ["TAG1", "TAG2"]
4. Edit pattern_descriptions.json directly to modify tags
5. Make tags your own. You can delete, replace, amend existing tags.
## File Synchronization
The script maintains synchronization between:
- Local pattern_descriptions.json
- Web interface copy in static/data/
- No manual file copying needed
## Best Practices
1. Run extract_patterns.py when:
- Adding new patterns
- Updating existing patterns
- Modifying pattern structure
2. Description Writing:
- Use pattern extracts for context
- Keep descriptions clear and concise
- Focus on pattern purpose and usage
3. Tag Management:
- Use consistent tag categories
- Apply multiple tags when relevant
- Update tags to reflect pattern evolution
## Troubleshooting
If patterns are not showing in the web interface:
1. Verify pattern_descriptions.json format
2. Check web static copy exists
3. Ensure proper file permissions
4. Run extract_patterns.py to resync
## File Structure
fabric/
├── patterns/ # Pattern source files
├── PATTERN_DESCRIPTIONS/
│ ├── extract_patterns.py # Pattern processing script
│ ├── pattern_extracts.json # Pattern content references
│ └── pattern_descriptions.json # Pattern metadata
└── web/
└── static/
└── data/
└── pattern_descriptions.json # Web interface copy

View File

@@ -0,0 +1,114 @@
import os
import json
import shutil
def load_existing_file(filepath):
"""Load existing JSON file or return default structure"""
if os.path.exists(filepath):
with open(filepath, 'r', encoding='utf-8') as f:
return json.load(f)
return {"patterns": []}
def get_pattern_extract(pattern_path):
"""Extract first 500 words from pattern's system.md file"""
system_md_path = os.path.join(pattern_path, "system.md")
with open(system_md_path, 'r', encoding='utf-8') as f:
content = ' '.join(f.read().split()[:500])
return content
def extract_pattern_info():
script_dir = os.path.dirname(os.path.abspath(__file__))
patterns_dir = os.path.expanduser("~/.config/fabric/patterns")
print(f"\nScanning patterns directory: {patterns_dir}")
extracts_path = os.path.join(script_dir, "pattern_extracts.json")
descriptions_path = os.path.join(script_dir, "pattern_descriptions.json")
existing_extracts = load_existing_file(extracts_path)
existing_descriptions = load_existing_file(descriptions_path)
existing_extract_names = {p["patternName"] for p in existing_extracts["patterns"]}
existing_description_names = {p["patternName"] for p in existing_descriptions["patterns"]}
print(f"Found existing patterns: {len(existing_extract_names)}")
new_extracts = []
new_descriptions = []
for dirname in sorted(os.listdir(patterns_dir)):
# Only log new pattern processing
if dirname not in existing_extract_names:
print(f"Processing new pattern: {dirname}")
pattern_path = os.path.join(patterns_dir, dirname)
system_md_path = os.path.join(pattern_path, "system.md")
print(f"Checking system.md at: {system_md_path}")
if os.path.isdir(pattern_path) and os.path.exists(system_md_path):
print(f"Valid pattern directory found: {dirname}")
try:
if dirname not in existing_extract_names:
print(f"Creating new extract for: {dirname}")
pattern_extract = get_pattern_extract(pattern_path) # Pass directory path
new_extracts.append({
"patternName": dirname,
"pattern_extract": pattern_extract
})
if dirname not in existing_description_names:
print(f"Creating new description for: {dirname}")
new_descriptions.append({
"patternName": dirname,
"description": "[Description pending]",
"tags": []
})
except Exception as e:
print(f"Error processing {dirname}: {str(e)}")
else:
print(f"Invalid pattern directory or missing system.md: {dirname}")
print(f"\nProcessing summary:")
print(f"New extracts created: {len(new_extracts)}")
print(f"New descriptions added: {len(new_descriptions)}")
existing_extracts["patterns"].extend(new_extracts)
existing_descriptions["patterns"].extend(new_descriptions)
return existing_extracts, existing_descriptions, len(new_descriptions)
def update_web_static(descriptions_path):
"""Copy pattern descriptions to web static directory"""
script_dir = os.path.dirname(os.path.abspath(__file__))
static_dir = os.path.join(script_dir, "..", "web", "static", "data")
os.makedirs(static_dir, exist_ok=True)
static_path = os.path.join(static_dir, "pattern_descriptions.json")
shutil.copy2(descriptions_path, static_path)
def save_pattern_files():
"""Save both pattern files and sync to web"""
script_dir = os.path.dirname(os.path.abspath(__file__))
extracts_path = os.path.join(script_dir, "pattern_extracts.json")
descriptions_path = os.path.join(script_dir, "pattern_descriptions.json")
pattern_extracts, pattern_descriptions, new_count = extract_pattern_info()
# Save files
with open(extracts_path, 'w', encoding='utf-8') as f:
json.dump(pattern_extracts, f, indent=2, ensure_ascii=False)
with open(descriptions_path, 'w', encoding='utf-8') as f:
json.dump(pattern_descriptions, f, indent=2, ensure_ascii=False)
# Update web static
update_web_static(descriptions_path)
print(f"\nProcessing complete:")
print(f"Total patterns: {len(pattern_descriptions['patterns'])}")
print(f"New patterns added: {new_count}")
if __name__ == "__main__":
save_pattern_files()

File diff suppressed because it is too large Load Diff

File diff suppressed because one or more lines are too long

View File

@@ -66,9 +66,9 @@
## Updates
> [!NOTE]
> November 8, 2024
> February 5, 2025
>
> - **Multimodal Support**: You can now use `-a` (attachment) for Multimodal submissions to OpenAI models that support it. Example: `fabric -a https://path/to/image "Give me a description of this image."`
> - Remember that `fabric` supports `o1` and `o3` models, but you need to 1) not use `-s`, and 2) use the `--raw` flag because the o1 and o3 models don't support the `--stream` option or temperature settings.
## What and why
@@ -126,22 +126,20 @@ To install Fabric, you can use the latest release binaries or install it from th
### Get Latest Release Binaries
```bash
# Windows:
curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-windows-amd64.exe > fabric.exe && fabric.exe --version
#### Windows:
`https://github.com/danielmiessler/fabric/releases/latest/download/fabric-windows-amd64.exe`
# MacOS (arm64):
curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-darwin-arm64 > fabric && chmod +x fabric && ./fabric --version
#### MacOS (arm64):
`curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-darwin-arm64 > fabric && chmod +x fabric && ./fabric --version`
# MacOS (amd64):
curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-darwin-amd64 > fabric && chmod +x fabric && ./fabric --version
#### MacOS (amd64):
`curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-darwin-amd64 > fabric && chmod +x fabric && ./fabric --version`
# Linux (amd64):
curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-linux-amd64 > fabric && chmod +x fabric && ./fabric --version
#### Linux (amd64):
`curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-linux-amd64 > fabric && chmod +x fabric && ./fabric --version`
# Linux (arm64):
curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-linux-arm64 > fabric && chmod +x fabric && ./fabric --version
```
#### Linux (arm64):
`curl -L https://github.com/danielmiessler/fabric/releases/latest/download/fabric-linux-arm64 > fabric && chmod +x fabric && ./fabric --version`
### From Source
@@ -211,6 +209,67 @@ yt() {
}
```
You can add the below code for the equivalent aliases inside PowerShell by running `notepad $PROFILE` inside a PowerShell window:
```powershell
# Path to the patterns directory
$patternsPath = Join-Path $HOME ".config/fabric/patterns"
foreach ($patternDir in Get-ChildItem -Path $patternsPath -Directory) {
$patternName = $patternDir.Name
# Dynamically define a function for each pattern
$functionDefinition = @"
function $patternName {
[CmdletBinding()]
param(
[Parameter(ValueFromPipeline = `$true)]
[string] `$InputObject,
[Parameter(ValueFromRemainingArguments = `$true)]
[String[]] `$patternArgs
)
begin {
# Initialize an array to collect pipeline input
`$collector = @()
}
process {
# Collect pipeline input objects
if (`$InputObject) {
`$collector += `$InputObject
}
}
end {
# Join all pipeline input into a single string, separated by newlines
`$pipelineContent = `$collector -join "`n"
# If there's pipeline input, include it in the call to fabric
if (`$pipelineContent) {
`$pipelineContent | fabric --pattern $patternName `$patternArgs
} else {
# No pipeline input; just call fabric with the additional args
fabric --pattern $patternName `$patternArgs
}
}
}
"@
# Add the function to the current session
Invoke-Expression $functionDefinition
}
# Define the 'yt' function as well
function yt {
[CmdletBinding()]
param(
[Parameter(Mandatory = $true)]
[string]$videoLink
)
fabric -y $videoLink --transcript
}
```
This also creates a `yt` alias that allows you to use `yt https://www.youtube.com/watch?v=4b0iet22VIk` to get transcripts, comments, and metadata.
#### Save your files in markdown using aliases
@@ -384,7 +443,15 @@ pbpaste | fabric --stream --pattern analyze_claims
fabric -y "https://youtube.com/watch?v=uXs-zPc63kM" --stream --pattern extract_wisdom
```
4. Create patterns- you must create a .md file with the pattern and save it to ~/.config/fabric/patterns/[yourpatternname].
4. Create patterns- you must create a .md file with the pattern and save it to `~/.config/fabric/patterns/[yourpatternname]`.
5. Run a `analyze_claims` pattern on a website. Fabric uses Jina AI to scrape the URL into markdown format before sending it to the model.
```bash
fabric -u https://github.com/danielmiessler/fabric/ -p analyze_claims
```
## Just use the Patterns
@@ -415,7 +482,6 @@ When you're ready to use them, copy them into:
You can then use them like any other Patterns, but they won't be public unless you explicitly submit them as Pull Requests to the Fabric project. So don't worry—they're private to you.
This feature works with all openai and ollama models but does NOT work with claude. You can specify your model with the -m flag
## Helper Apps

View File

@@ -0,0 +1,64 @@
# Enhanced Pattern Selection and WEB UI Improvements
This PR adds several Web UI and functionality improvements to make pattern selection more intuitive and provide better context for each pattern's purpose.
## Demo
Watch the demo video showcasing the new features: https://youtu.be/qVuKhCw_edk
## Major Improvements
### Pattern Selection and Description
- Added modal interface for pattern selection
- Added short pattern descriptions for each pattern
- Added Select Pattern to execute from Modal
- Added scroll functionality to System Instructions frame
- **Added search functionality in pattern selection modal**
- Real-time pattern filtering as you type
- Case-insensitive partial name matching
- Maintains favorites sorting while filtering
### User Experience
- Implemented favorites functionality for quick access to frequently used patterns
- Improved YouTube transcript handling
- Enhanced UI components for better user experience
- **Added Obsidian integration for pattern execution output**
- Save pattern results directly to Obsidian from web interface
- Configurable note naming
- Seamless integration with existing Obsidian workflow
## Technical Improvements
- Added backend support for new features
- Improved pattern management and selection
- Enhanced state management for patterns and favorites
## Key Files Modified
### Backend Changes
- `fabric/restapi/`: Added new endpoints and functionality for pattern management
- `chat.go`, `patterns.go`: Enhanced pattern handling
- `configuration.go`, `models.go`: Added support for new features
- **`obsidian.go`: New Obsidian integration endpoints**
### Frontend Changes
- `fabric/web/src/lib/components/`:
- `chat/`: Enhanced chat interface components
- `patterns/`: New pattern selection components
- **Added pattern search functionality**
- **Enhanced modal UI with search capabilities**
- `ui/modal/`: Modal interface implementation
- `fabric/web/src/lib/store/`:
- `favorites-store.ts`: New favorites functionality
- `pattern-store.ts`: Enhanced pattern management
- **`obsidian-store.ts`: New Obsidian integration state management**
- `fabric/web/src/lib/services/`:
- `transcriptService.ts`: Improved YouTube handling
### Pattern Descriptions
- `fabric/myfiles/`:
- `pattern_descriptions.json`: Added detailed pattern descriptions
- `extract_patterns.py`: Tool for pattern management
These improvements make the pattern selection process more intuitive and provide users with better context about each pattern's purpose and functionality. The addition of pattern search and Obsidian integration further enhances the user experience by providing quick access to patterns and seamless integration with external note-taking workflows.
## Note on Platform Compatibility
This implementation was developed and tested on macOS. Some modifications may not be required for Windows users, particularly around system-specific paths and configurations. Windows users may need to adjust certain paths or configurations to match their environment.

View File

@@ -0,0 +1,155 @@
# Language Support Implementation
## Overview
The language support allows switching between languages using qualifiers (--fr, --en) in the chat input. The implementation is simple and effective, working at multiple layers of the application.
## Components
### 1. Language Store (language-store.ts)
```typescript
// Manages language state
export const languageStore = writable<string>('');
```
### 2. Chat Input (ChatInput.svelte)
- Detects language qualifiers in user input
- Updates language store
- Strips qualifier from message
```typescript
// Language qualifier handling
if (qualifier === 'fr') {
languageStore.set('fr');
userInput = userInput.replace(/--fr\s*/, '');
} else if (qualifier === 'en') {
languageStore.set('en');
userInput = userInput.replace(/--en\s*/, '');
}
// After sending message
try {
await sendMessage(userInput);
languageStore.set('en'); // Reset to default after send
} catch (error) {
console.error('Failed to send message:', error);
}
```
### 3. Chat Service (ChatService.ts)
- Adds language instruction to prompts
- Defaults to English if no language specified
```typescript
const language = get(languageStore) || 'en';
const languageInstruction = language !== 'en'
? `. Please use the language '${language}' for the output.`
: '';
const fullInput = userInput + languageInstruction;
```
### 4. Global Settings UI (Chat.svelte)
```typescript
// Language selector in Global Settings
<div class="flex flex-col gap-2">
<Label>Language</Label>
<Select bind:value={selectedLanguage}>
<option value="">Default</option>
<option value="en">English</option>
<option value="fr">French</option>
</Select>
</div>
// Script section
let selectedLanguage = $languageStore;
$: languageStore.set(selectedLanguage);
```
## How It Works
1. User Input:
- User types message with language qualifier (e.g., "--fr Hello")
- ChatInput detects qualifier and updates language store
- Qualifier is stripped from message
- OR user selects language from Global Settings dropdown
2. Request Processing:
- ChatService gets language from store
- Adds language instruction to prompt
- Sends to backend
3. Response:
- AI responds in requested language
- Response is displayed without modification
- Language store is reset to English after message is sent
## Usage Examples
1. English (Default):
```
User: What is the weather?
AI: The weather information...
```
2. French:
```
User: --fr What is the weather?
AI: Voici les informations météo...
```
3. Using Global Settings:
```
1. Select "French" from language dropdown
2. Type: What is the weather?
3. AI responds in French
4. Language resets to English after response
```
## Implementation Notes
1. Simple Design:
- No complex language detection
- No translation layer
- Direct instruction to AI
2. Stateful:
- Language persists until changed
- Resets to English on page refresh
- Resets to English after each message
3. Extensible:
- Easy to add new languages
- Just add new qualifiers and store values
- Update Global Settings dropdown options
4. Error Handling:
- Invalid qualifiers are ignored
- Unknown languages default to English
- Store reset on error to prevent state issues
## Best Practices
1. Always reset language after message:
```typescript
// Reset stores after successful send
languageStore.set('en');
```
2. Default to English:
```typescript
const language = get(languageStore) || 'en';
```
3. Clear language instruction:
```typescript
const languageInstruction = language !== 'en'
? `. Please use the language '${language}' for the output.`
: '';
```
4. Handle UI State:
```typescript
// In Chat.svelte
let selectedLanguage = $languageStore;
$: {
languageStore.set(selectedLanguage);
// Update UI immediately when store changes
selectedLanguage = $languageStore;
}

View File

@@ -0,0 +1,79 @@
# Pattern Search Implementation Plan
## Component Changes (PatternList.svelte)
### 1. Add Search Input
```svelte
<div class="px-4 pb-4 flex gap-4 items-center">
<!-- Existing sort options -->
<div class="flex-1"> <!-- Add flex-1 to push search to right -->
<label class="flex items-center gap-2 text-sm text-muted-foreground">
<input type="radio" bind:group={sortBy} value="alphabetical">
Alphabetical
</label>
<label class="flex items-center gap-2 text-sm text-muted-foreground">
<input type="radio" bind:group={sortBy} value="favorites">
Favorites First
</label>
</div>
<!-- New search input -->
<div class="w-48"> <!-- Fixed width for search -->
<Input
type="text"
bind:value={searchText}
placeholder="Search patterns..."
/>
</div>
</div>
```
### 2. Add Search Logic
```typescript
// Add to script section
let searchText = ""; // For pattern filtering
// Modify sortedPatterns to include search
$: filteredPatterns = patterns.filter(p =>
p.patternName.toLowerCase().includes(searchText.toLowerCase())
);
$: sortedPatterns = sortBy === 'alphabetical'
? [...filteredPatterns].sort((a, b) => a.patternName.localeCompare(b.patternName))
: [
...filteredPatterns.filter(p => $favorites.includes(p.patternName)).sort((a, b) => a.patternName.localeCompare(b.patternName)),
...filteredPatterns.filter(p => !$favorites.includes(p.patternName)).sort((a, b) => a.patternName.localeCompare(b.patternName))
];
```
### 3. Reset Search on Selection
```typescript
// In pattern selection click handler
searchText = ""; // Reset search before closing modal
dispatch('select', pattern.patternName);
dispatch('close');
```
## Implementation Steps
1. Import Input component
```typescript
import { Input } from "$lib/components/ui/input";
```
2. Add searchText variable and filtering logic
3. Update template to include search input
4. Add reset logic in pattern selection handler
5. Test search functionality:
- Partial matches work
- Case-insensitive search
- Search resets on selection
- Layout maintains consistency
## Expected Behavior
- Search updates in real-time as user types
- Matches are case-insensitive
- Matches can be anywhere in pattern name
- Search box clears when pattern is selected
- Sort options (alphabetical/favorites) still work with filtered results
- Maintains existing modal layout and styling

View File

@@ -0,0 +1,269 @@
# Enhanced Pattern Selection, Pattern Descriptions, New Pattern TAG System, Language Support and other WEB UI Improvements V3
This Cummulative PR adds several Web UI and functionality improvements to make pattern selection more intuitive (pattern descriptions), ability to save favorite patterns, powerful multilingual capabilities, a Pattern TAG system, a help reference section, more robust Youtube processing and a variety of ui improvements.
## 🎥 Demo Video
https://youtu.be/IhE8Iey8hSU
## 🌟 Key Features
### 1. Web UI and Pattern Selection Improvements
- Enhanced pattern selection interface for better user experience
- New pattern descriptions section accessible via modal
- New pattern favorite list and pattern search functionnality
- New Tag system for better pattern organization and filtering
- Web UI refinements for clearer interaction
- Help section via modal
### 2. Multilingual Support System
- Seamless language switching via UI dropdown
- Persistent language state management
- Pattern processing now use the selected language seamlessly
### 3. YouTube Integration Enhancement
- Robust language handling for YouTube transcript processing
- Chunk-based language maintenance for long transcripts
- Consistent language output throughout transcript analysis
### 4. Enhanced Tag Management Integration
The tag filtering system has been deeply integrated into the Pattern Selection interface through several UI enhancements:
1. **Dual-Position Tag Panel**
- Sliding panel positioned to the right of pattern modal
- Dynamic toggle button that adapts position and text based on panel state
- Smooth transitions for opening/closing animations
2. **Tag Selection Visibility**
- New dedicated tag display section in pattern modal
- Visual separation through subtle background styling
- Immediate feedback showing selected tags with comma separation
- Inline reset capability for quick tag clearing
3. **Improved User Experience**
- Clear visual hierarchy between pattern list and tag filtering
- Multiple ways to manage tags (panel or quick reset)
- Consistent styling with existing design language
- Space-efficient tag brick layout in 3-column grid
4. **Technical Implementation**
- Reactive tag state management
- Efficient tag filtering logic
- Proper event dispatching between components
- Maintained accessibility standards
- Responsive design considerations
These enhancements create a more intuitive and efficient pattern discovery experience, allowing users to quickly filter and find relevant patterns while maintaining a clean, modern interface.
## 🛠 Technical Implementation
### Language Support Architecture
```typescript
// Language state management
export const languageStore = writable<string>('');
// Chat input language detection
if (qualifier === 'fr') {
languageStore.set('fr');
userInput = userInput.replace(/--fr\s*/, '');
}
// Service layer integration
const language = get(languageStore) || 'en';
const languageInstruction = language !== 'en'
? `. Please use the language '${language}' for the output.`
: '';
```
### YouTube Processing Enhancement
```typescript
// Process stream with language instruction per chunk
await chatService.processStream(
stream,
(content: string, response?: StreamResponse) => {
if (currentLanguage !== 'en') {
content = `${content}. Please use the language '${currentLanguage}' for the output.`;
}
// Update messages...
}
);
```
# Pattern Descriptions and Tags Management
This document explains the complete workflow for managing pattern descriptions and tags, including how to process new patterns and maintain metadata.
## System Overview
The pattern system follows this hierarchy:
1. `~/.config/fabric/patterns/` directory: The source of truth for available patterns
2. `pattern_extracts.json`: Contains first 500 words of each pattern for reference
3. `pattern_descriptions.json`: Stores pattern metadata (descriptions and tags)
4. `web/static/data/pattern_descriptions.json`: Web-accessible copy for the interface
## Pattern Processing Workflow
### 1. Adding New Patterns
- Add patterns to `~/.config/fabric/patterns/`
- Run extract_patterns.py to process new additions:
```bash
python extract_patterns.py
The Python Script automatically:
- Creates pattern extracts for reference
- Adds placeholder entries in descriptions file
- Syncs to web interface
### 2. Pattern Extract Creation
The script extracts first 500 words from each pattern's system.md file to:
- Provide context for writing descriptions
- Maintain reference material
- Aid in pattern categorization
### 3. Description and Tag Management
Pattern descriptions and tags are managed in pattern_descriptions.json:
{
"patterns": [
{
"patternName": "pattern_name",
"description": "[Description pending]",
"tags": []
}
]
}
## Completing Pattern Metadata
### Writing Descriptions
1. Check pattern_descriptions.json for "[Description pending]" entries
2. Reference pattern_extracts.json for context
3. How to update Pattern short descriptions (one sentence).
You can update your descriptions in pattern_descriptions.json manually or using LLM assistance (prefered approach).
Tell AI to look for "Description pending" entries in this file and write a short description based on the extract info in the pattern_extracts.json file. You can also ask your LLM to add tags for those newly added patterns, using other patterns tag assignments as example.
### Managing Tags
1. Add appropriate tags to new patterns
2. Update existing tags as needed
3. Tags are stored as arrays: ["TAG1", "TAG2"]
4. Edit pattern_descriptions.json directly to modify tags
5. Make tags your own. You can delete, replace, amend existing tags.
## File Synchronization
The script maintains synchronization between:
- Local pattern_descriptions.json
- Web interface copy in static/data/
- No manual file copying needed
## Best Practices
1. Run extract_patterns.py when:
- Adding new patterns
- Updating existing patterns
- Modifying pattern structure
2. Description Writing:
- Use pattern extracts for context
- Keep descriptions clear and concise
- Focus on pattern purpose and usage
3. Tag Management:
- Use consistent tag categories
- Apply multiple tags when relevant
- Update tags to reflect pattern evolution
## Troubleshooting
If patterns are not showing in the web interface:
1. Verify pattern_descriptions.json format
2. Check web static copy exists
3. Ensure proper file permissions
4. Run extract_patterns.py to resync
## File Structure
fabric/
├── patterns/ # Pattern source files
├── PATTERN_DESCRIPTIONS/
│ ├── extract_patterns.py # Pattern processing script
│ ├── pattern_extracts.json # Pattern content references
│ └── pattern_descriptions.json # Pattern metadata
└── web/
└── static/
└── data/
└── pattern_descriptions.json # Web interface copy
## 🎯 Usage Examples
### 1. Using Language Qualifiers
```
User: What is the weather?
AI: The weather information...
User: --fr What is the weather?
AI: Voici les informations météo...
```
### 2. Global Settings
1. Select language from dropdown
2. All interactions use selected language
3. Automatic reset to English after each message
### 3. YouTube Analysis
```
User: Analyze this YouTube video --fr
AI: [Provides analysis in French, maintaining language throughout the transcript]
```
## 💡 Key Benefits
1. **Enhanced User Experience**
- Intuitive language switching
- Consistent language handling
- Seamless integration with existing features
2. **Robust Implementation**
- Simple yet powerful design
- No complex language detection needed
- Direct AI instruction approach
3. **Maintainable Architecture**
- Clean separation of concerns
- Stateful language management
- Easy to extend for new languages
4. **YouTube Integration**
- Handles long transcripts effectively
- Maintains language consistency
- Robust chunk processing
## 🔄 Implementation Notes
1. **State Management**
- Language persists until changed
- Resets to English after each message
- Handles UI state updates efficiently
2. **Error Handling**
- Invalid qualifiers are ignored
- Unknown languages default to English
- Proper store reset on errors
3. **Best Practices**
- Clear language instructions
- Consistent state management
- Robust error handling

View File

@@ -287,7 +287,7 @@ func Cli(version string) (err error) {
func processYoutubeVideo(
flags *Flags, registry *core.PluginRegistry, videoId string) (message string, err error) {
if (!flags.YouTubeComments && !flags.YouTubeMetadata) || flags.YouTubeTranscript {
if (!flags.YouTubeComments && !flags.YouTubeMetadata) || flags.YouTubeTranscript || flags.YouTubeTranscriptWithTimestamps {
var transcript string
var language = "en"
if flags.Language != "" || registry.Language.DefaultLanguage.Value != "" {
@@ -297,8 +297,14 @@ func processYoutubeVideo(
language = registry.Language.DefaultLanguage.Value
}
}
if transcript, err = registry.YouTube.GrabTranscript(videoId, language); err != nil {
return
if flags.YouTubeTranscriptWithTimestamps {
if transcript, err = registry.YouTube.GrabTranscriptWithTimestamps(videoId, language); err != nil {
return
}
} else {
if transcript, err = registry.YouTube.GrabTranscript(videoId, language); err != nil {
return
}
}
message = AppendMessage(message, transcript)
}

View File

@@ -20,55 +20,56 @@ import (
// Flags create flags struct. the users flags go into this, this will be passed to the chat struct in cli
type Flags struct {
Pattern string `short:"p" long:"pattern" yaml:"pattern" description:"Choose a pattern from the available patterns" default:""`
PatternVariables map[string]string `short:"v" long:"variable" description:"Values for pattern variables, e.g. -v=#role:expert -v=#points:30"`
Context string `short:"C" long:"context" description:"Choose a context from the available contexts" default:""`
Session string `long:"session" description:"Choose a session from the available sessions"`
Attachments []string `short:"a" long:"attachment" description:"Attachment path or URL (e.g. for OpenAI image recognition messages)"`
Setup bool `short:"S" long:"setup" description:"Run setup for all reconfigurable parts of fabric"`
Temperature float64 `short:"t" long:"temperature" yaml:"temperature" description:"Set temperature" default:"0.7"`
TopP float64 `short:"T" long:"topp" yaml:"topp" description:"Set top P" default:"0.9"`
Stream bool `short:"s" long:"stream" yaml:"stream" description:"Stream"`
PresencePenalty float64 `short:"P" long:"presencepenalty" yaml:"presencepenalty" description:"Set presence penalty" default:"0.0"`
Raw bool `short:"r" long:"raw" yaml:"raw" description:"Use the defaults of the model without sending chat options (like temperature etc.) and use the user role instead of the system role for patterns."`
FrequencyPenalty float64 `short:"F" long:"frequencypenalty" yaml:"frequencypenalty" description:"Set frequency penalty" default:"0.0"`
ListPatterns bool `short:"l" long:"listpatterns" description:"List all patterns"`
ListAllModels bool `short:"L" long:"listmodels" description:"List all available models"`
ListAllContexts bool `short:"x" long:"listcontexts" description:"List all contexts"`
ListAllSessions bool `short:"X" long:"listsessions" description:"List all sessions"`
UpdatePatterns bool `short:"U" long:"updatepatterns" description:"Update patterns"`
Message string `hidden:"true" description:"Messages to send to chat"`
Copy bool `short:"c" long:"copy" description:"Copy to clipboard"`
Model string `short:"m" long:"model" yaml:"model" description:"Choose model"`
ModelContextLength int `long:"modelContextLength" yaml:"modelContextLength" description:"Model context length (only affects ollama)"`
Output string `short:"o" long:"output" description:"Output to file" default:""`
OutputSession bool `long:"output-session" description:"Output the entire session (also a temporary one) to the output file"`
LatestPatterns string `short:"n" long:"latest" description:"Number of latest patterns to list" default:"0"`
ChangeDefaultModel bool `short:"d" long:"changeDefaultModel" description:"Change default model"`
YouTube string `short:"y" long:"youtube" description:"YouTube video or play list \"URL\" to grab transcript, comments from it and send to chat or print it put to the console and store it in the output file"`
YouTubePlaylist bool `long:"playlist" description:"Prefer playlist over video if both ids are present in the URL"`
YouTubeTranscript bool `long:"transcript" description:"Grab transcript from YouTube video and send to chat (it is used per default)."`
YouTubeComments bool `long:"comments" description:"Grab comments from YouTube video and send to chat"`
YouTubeMetadata bool `long:"metadata" description:"Output video metadata"`
Language string `short:"g" long:"language" description:"Specify the Language Code for the chat, e.g. -g=en -g=zh" default:""`
ScrapeURL string `short:"u" long:"scrape_url" description:"Scrape website URL to markdown using Jina AI"`
ScrapeQuestion string `short:"q" long:"scrape_question" description:"Search question using Jina AI"`
Seed int `short:"e" long:"seed" yaml:"seed" description:"Seed to be used for LMM generation"`
WipeContext string `short:"w" long:"wipecontext" description:"Wipe context"`
WipeSession string `short:"W" long:"wipesession" description:"Wipe session"`
PrintContext string `long:"printcontext" description:"Print context"`
PrintSession string `long:"printsession" description:"Print session"`
HtmlReadability bool `long:"readability" description:"Convert HTML input into a clean, readable view"`
InputHasVars bool `long:"input-has-vars" description:"Apply variables to user input"`
DryRun bool `long:"dry-run" description:"Show what would be sent to the model without actually sending it"`
Serve bool `long:"serve" description:"Serve the Fabric Rest API"`
ServeOllama bool `long:"serveOllama" description:"Serve the Fabric Rest API with ollama endpoints"`
ServeAddress string `long:"address" description:"The address to bind the REST API" default:":8080"`
Config string `long:"config" description:"Path to YAML config file"`
Version bool `long:"version" description:"Print current version"`
ListExtensions bool `long:"listextensions" description:"List all registered extensions"`
AddExtension string `long:"addextension" description:"Register a new extension from config file path"`
RemoveExtension string `long:"rmextension" description:"Remove a registered extension by name"`
Pattern string `short:"p" long:"pattern" yaml:"pattern" description:"Choose a pattern from the available patterns" default:""`
PatternVariables map[string]string `short:"v" long:"variable" description:"Values for pattern variables, e.g. -v=#role:expert -v=#points:30"`
Context string `short:"C" long:"context" description:"Choose a context from the available contexts" default:""`
Session string `long:"session" description:"Choose a session from the available sessions"`
Attachments []string `short:"a" long:"attachment" description:"Attachment path or URL (e.g. for OpenAI image recognition messages)"`
Setup bool `short:"S" long:"setup" description:"Run setup for all reconfigurable parts of fabric"`
Temperature float64 `short:"t" long:"temperature" yaml:"temperature" description:"Set temperature" default:"0.7"`
TopP float64 `short:"T" long:"topp" yaml:"topp" description:"Set top P" default:"0.9"`
Stream bool `short:"s" long:"stream" yaml:"stream" description:"Stream"`
PresencePenalty float64 `short:"P" long:"presencepenalty" yaml:"presencepenalty" description:"Set presence penalty" default:"0.0"`
Raw bool `short:"r" long:"raw" yaml:"raw" description:"Use the defaults of the model without sending chat options (like temperature etc.) and use the user role instead of the system role for patterns."`
FrequencyPenalty float64 `short:"F" long:"frequencypenalty" yaml:"frequencypenalty" description:"Set frequency penalty" default:"0.0"`
ListPatterns bool `short:"l" long:"listpatterns" description:"List all patterns"`
ListAllModels bool `short:"L" long:"listmodels" description:"List all available models"`
ListAllContexts bool `short:"x" long:"listcontexts" description:"List all contexts"`
ListAllSessions bool `short:"X" long:"listsessions" description:"List all sessions"`
UpdatePatterns bool `short:"U" long:"updatepatterns" description:"Update patterns"`
Message string `hidden:"true" description:"Messages to send to chat"`
Copy bool `short:"c" long:"copy" description:"Copy to clipboard"`
Model string `short:"m" long:"model" yaml:"model" description:"Choose model"`
ModelContextLength int `long:"modelContextLength" yaml:"modelContextLength" description:"Model context length (only affects ollama)"`
Output string `short:"o" long:"output" description:"Output to file" default:""`
OutputSession bool `long:"output-session" description:"Output the entire session (also a temporary one) to the output file"`
LatestPatterns string `short:"n" long:"latest" description:"Number of latest patterns to list" default:"0"`
ChangeDefaultModel bool `short:"d" long:"changeDefaultModel" description:"Change default model"`
YouTube string `short:"y" long:"youtube" description:"YouTube video or play list \"URL\" to grab transcript, comments from it and send to chat or print it put to the console and store it in the output file"`
YouTubePlaylist bool `long:"playlist" description:"Prefer playlist over video if both ids are present in the URL"`
YouTubeTranscript bool `long:"transcript" description:"Grab transcript from YouTube video and send to chat (it is used per default)."`
YouTubeTranscriptWithTimestamps bool `long:"transcript-with-timestamps" description:"Grab transcript from YouTube video with timestamps and send to chat"`
YouTubeComments bool `long:"comments" description:"Grab comments from YouTube video and send to chat"`
YouTubeMetadata bool `long:"metadata" description:"Output video metadata"`
Language string `short:"g" long:"language" description:"Specify the Language Code for the chat, e.g. -g=en -g=zh" default:""`
ScrapeURL string `short:"u" long:"scrape_url" description:"Scrape website URL to markdown using Jina AI"`
ScrapeQuestion string `short:"q" long:"scrape_question" description:"Search question using Jina AI"`
Seed int `short:"e" long:"seed" yaml:"seed" description:"Seed to be used for LMM generation"`
WipeContext string `short:"w" long:"wipecontext" description:"Wipe context"`
WipeSession string `short:"W" long:"wipesession" description:"Wipe session"`
PrintContext string `long:"printcontext" description:"Print context"`
PrintSession string `long:"printsession" description:"Print session"`
HtmlReadability bool `long:"readability" description:"Convert HTML input into a clean, readable view"`
InputHasVars bool `long:"input-has-vars" description:"Apply variables to user input"`
DryRun bool `long:"dry-run" description:"Show what would be sent to the model without actually sending it"`
Serve bool `long:"serve" description:"Serve the Fabric Rest API"`
ServeOllama bool `long:"serveOllama" description:"Serve the Fabric Rest API with ollama endpoints"`
ServeAddress string `long:"address" description:"The address to bind the REST API" default:":8080"`
Config string `long:"config" description:"Path to YAML config file"`
Version bool `long:"version" description:"Print current version"`
ListExtensions bool `long:"listextensions" description:"List all registered extensions"`
AddExtension string `long:"addextension" description:"Register a new extension from config file path"`
RemoveExtension string `long:"rmextension" description:"Remove a registered extension by name"`
}
var debug = false

View File

@@ -131,7 +131,7 @@ func (o *Chatter) BuildSession(request *common.ChatRequest, raw bool) (session *
var patternContent string
if request.PatternName != "" {
pattern, err := o.db.Patterns.GetApplyVariables(request.PatternName, request.PatternVariables, request.Message.Content)
// pattrn will now contain user input, and all variables will be resolved, or errored
// pattern will now contain user input, and all variables will be resolved, or errored
if err != nil {
return nil, fmt.Errorf("could not get pattern %s: %v", request.PatternName, err)

View File

@@ -14,9 +14,11 @@ import (
"github.com/danielmiessler/fabric/plugins/ai"
"github.com/danielmiessler/fabric/plugins/ai/anthropic"
"github.com/danielmiessler/fabric/plugins/ai/azure"
"github.com/danielmiessler/fabric/plugins/ai/deepseek"
"github.com/danielmiessler/fabric/plugins/ai/dryrun"
"github.com/danielmiessler/fabric/plugins/ai/gemini"
"github.com/danielmiessler/fabric/plugins/ai/groq"
"github.com/danielmiessler/fabric/plugins/ai/lmstudio"
"github.com/danielmiessler/fabric/plugins/ai/mistral"
"github.com/danielmiessler/fabric/plugins/ai/ollama"
"github.com/danielmiessler/fabric/plugins/ai/openai"
@@ -53,7 +55,7 @@ func NewPluginRegistry(db *fsdb.Db) (ret *PluginRegistry, err error) {
gemini.NewClient(),
//gemini_openai.NewClient(),
anthropic.NewClient(), siliconcloud.NewClient(),
openrouter.NewClient(), mistral.NewClient())
openrouter.NewClient(), lmstudio.NewClient(), mistral.NewClient(), deepseek.NewClient())
_ = ret.Configure()
return

2345
coverage.out Normal file

File diff suppressed because it is too large Load Diff

52
go.mod
View File

@@ -12,7 +12,7 @@ require (
github.com/gin-gonic/gin v1.10.0
github.com/go-git/go-git/v5 v5.12.0
github.com/go-shiori/go-readability v0.0.0-20241012063810-92284fa8a71f
github.com/google/generative-ai-go v0.18.0
github.com/google/generative-ai-go v0.19.0
github.com/jessevdk/go-flags v1.6.1
github.com/joho/godotenv v1.5.1
github.com/ollama/ollama v0.4.1
@@ -20,18 +20,19 @@ require (
github.com/pkg/errors v0.9.1
github.com/samber/lo v1.47.0
github.com/sashabaranov/go-openai v1.35.6
github.com/stretchr/testify v1.9.0
golang.org/x/text v0.20.0
google.golang.org/api v0.205.0
github.com/stretchr/testify v1.10.0
golang.org/x/text v0.21.0
google.golang.org/api v0.220.0
gopkg.in/yaml.v2 v2.4.0
gopkg.in/yaml.v3 v3.0.1
)
require (
cloud.google.com/go v0.116.0 // indirect
cloud.google.com/go/ai v0.8.0 // indirect
cloud.google.com/go/auth v0.10.1 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.5 // indirect
cloud.google.com/go/compute/metadata v0.5.2 // indirect
cloud.google.com/go/auth v0.14.1 // indirect
cloud.google.com/go/auth/oauth2adapt v0.2.7 // indirect
cloud.google.com/go/compute/metadata v0.6.0 // indirect
cloud.google.com/go/longrunning v0.5.7 // indirect
dario.cat/mergo v1.0.1 // indirect
github.com/Microsoft/go-winio v0.6.2 // indirect
@@ -59,10 +60,10 @@ require (
github.com/goccy/go-json v0.10.3 // indirect
github.com/gogs/chardet v0.0.0-20211120154057-b7413eaefb8f // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/google/s2a-go v0.1.8 // indirect
github.com/google/s2a-go v0.1.9 // indirect
github.com/google/uuid v1.6.0 // indirect
github.com/googleapis/enterprise-certificate-proxy v0.3.4 // indirect
github.com/googleapis/gax-go/v2 v2.13.0 // indirect
github.com/googleapis/gax-go/v2 v2.14.1 // indirect
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 // indirect
github.com/json-iterator/go v1.1.12 // indirect
github.com/kevinburke/ssh_config v1.2.0 // indirect
@@ -83,23 +84,22 @@ require (
github.com/twitchyliquid64/golang-asm v0.15.1 // indirect
github.com/ugorji/go/codec v1.2.12 // indirect
github.com/xanzy/ssh-agent v0.3.3 // indirect
go.opencensus.io v0.24.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 // indirect
go.opentelemetry.io/otel v1.32.0 // indirect
go.opentelemetry.io/otel/metric v1.32.0 // indirect
go.opentelemetry.io/otel/trace v1.32.0 // indirect
go.opentelemetry.io/auto/sdk v1.1.0 // indirect
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 // indirect
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 // indirect
go.opentelemetry.io/otel v1.34.0 // indirect
go.opentelemetry.io/otel/metric v1.34.0 // indirect
go.opentelemetry.io/otel/trace v1.34.0 // indirect
golang.org/x/arch v0.12.0 // indirect
golang.org/x/crypto v0.29.0 // indirect
golang.org/x/net v0.31.0 // indirect
golang.org/x/oauth2 v0.24.0 // indirect
golang.org/x/sync v0.9.0 // indirect
golang.org/x/sys v0.27.0 // indirect
golang.org/x/time v0.7.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 // indirect
google.golang.org/grpc v1.68.0 // indirect
google.golang.org/protobuf v1.35.1 // indirect
golang.org/x/crypto v0.32.0 // indirect
golang.org/x/net v0.34.0 // indirect
golang.org/x/oauth2 v0.25.0 // indirect
golang.org/x/sync v0.10.0 // indirect
golang.org/x/sys v0.29.0 // indirect
golang.org/x/time v0.9.0 // indirect
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 // indirect
google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287 // indirect
google.golang.org/grpc v1.70.0 // indirect
google.golang.org/protobuf v1.36.4 // indirect
gopkg.in/warnings.v0 v0.1.2 // indirect
gopkg.in/yaml.v3 v3.0.1 // indirect
)

248
go.sum
View File

@@ -1,24 +1,28 @@
cloud.google.com/go v0.26.0/go.mod h1:aQUYkXzVsufM+DwF1aE+0xfcU+56JwCaLick0ClmMTw=
cel.dev/expr v0.19.0/go.mod h1:MrpN08Q+lEBs+bGYdLxxHkZoUSsCp0nSKTs0nTymJgw=
cloud.google.com/go v0.116.0 h1:B3fRrSDkLRt5qSHWe40ERJvhvnQwdZiHu0bJOpldweE=
cloud.google.com/go v0.116.0/go.mod h1:cEPSRWPzZEswwdr9BxE6ChEn01dWlTaF05LiC2Xs70U=
cloud.google.com/go/ai v0.8.0 h1:rXUEz8Wp2OlrM8r1bfmpF2+VKqc1VJpafE3HgzRnD/w=
cloud.google.com/go/ai v0.8.0/go.mod h1:t3Dfk4cM61sytiggo2UyGsDVW3RF1qGZaUKDrZFyqkE=
cloud.google.com/go/auth v0.10.1 h1:TnK46qldSfHWt2a0b/hciaiVJsmDXWy9FqyUan0uYiI=
cloud.google.com/go/auth v0.10.1/go.mod h1:xxA5AqpDrvS+Gkmo9RqrGGRh6WSNKKOXhY3zNOr38tI=
cloud.google.com/go/auth/oauth2adapt v0.2.5 h1:2p29+dePqsCHPP1bqDJcKj4qxRyYCcbzKpFyKGt3MTk=
cloud.google.com/go/auth/oauth2adapt v0.2.5/go.mod h1:AlmsELtlEBnaNTL7jCj8VQFLy6mbZv0s4Q7NGBeQ5E8=
cloud.google.com/go/compute/metadata v0.5.2 h1:UxK4uu/Tn+I3p2dYWTfiX4wva7aYlKixAHn3fyqngqo=
cloud.google.com/go/compute/metadata v0.5.2/go.mod h1:C66sj2AluDcIqakBq/M8lw8/ybHgOZqin2obFxa/E5k=
cloud.google.com/go/auth v0.14.1 h1:AwoJbzUdxA/whv1qj3TLKwh3XX5sikny2fc40wUl+h0=
cloud.google.com/go/auth v0.14.1/go.mod h1:4JHUxlGXisL0AW8kXPtUF6ztuOksyfUQNFjfsOCXkPM=
cloud.google.com/go/auth/oauth2adapt v0.2.7 h1:/Lc7xODdqcEw8IrZ9SvwnlLX6j9FHQM74z6cBk9Rw6M=
cloud.google.com/go/auth/oauth2adapt v0.2.7/go.mod h1:NTbTTzfvPl1Y3V1nPpOgl2w6d/FjO7NNUQaWSox6ZMc=
cloud.google.com/go/compute/metadata v0.6.0 h1:A6hENjEsCDtC1k8byVsgwvVcioamEHvZ4j01OwKxG9I=
cloud.google.com/go/compute/metadata v0.6.0/go.mod h1:FjyFAW1MW0C203CEOMDTu3Dk1FlqW3Rga40jzHL4hfg=
cloud.google.com/go/iam v1.2.0/go.mod h1:zITGuWgsLZxd8OwAlX+eMFgZDXzBm7icj1PVTYG766Q=
cloud.google.com/go/longrunning v0.5.7 h1:WLbHekDbjK1fVFD3ibpFFVoyizlLRl73I7YKuAKilhU=
cloud.google.com/go/longrunning v0.5.7/go.mod h1:8GClkudohy1Fxm3owmBGid8W0pSgodEMwEAztp38Xng=
cloud.google.com/go/storage v1.43.0/go.mod h1:ajvxEa7WmZS1PxvKRq4bq0tFT3vMd502JwstCcYv0Q0=
cloud.google.com/go/translate v1.10.3/go.mod h1:GW0vC1qvPtd3pgtypCv4k4U8B7EdgK9/QEF2aJEUovs=
dario.cat/mergo v1.0.1 h1:Ra4+bf83h2ztPIQYNP99R6m+Y7KfnARDfID+a+vLl4s=
dario.cat/mergo v1.0.1/go.mod h1:uNxQE+84aUszobStD9th8a29P2fMDhsBdgRYvZOxGmk=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/GoogleCloudPlatform/opentelemetry-operations-go/detectors/gcp v1.25.0/go.mod h1:obipzmGjfSjam60XLwGfqUkJsfiheAl+TUjG+4yzyPM=
github.com/Microsoft/go-winio v0.5.2/go.mod h1:WpS1mjBmmwHBEWmogvA2mj8546UReBk4v8QkMxJ6pZY=
github.com/Microsoft/go-winio v0.6.2 h1:F2VQgta7ecxGYO8k3ZZz3RS8fVIXVxONVUPlNERoyfY=
github.com/Microsoft/go-winio v0.6.2/go.mod h1:yd8OoFMLzJbo9gZq8j5qaps8bJ9aShtEA8Ipt1oGCvU=
github.com/ProtonMail/go-crypto v1.1.2 h1:A7JbD57ThNqh7XjmHE+PXpQ3Dqt3BrSAC0AL0Go3KS0=
github.com/ProtonMail/go-crypto v1.1.2/go.mod h1:rA3QumHc/FZ8pAHreoekgiAbzpNsfQAosU5td4SnOrE=
github.com/agnivade/levenshtein v1.1.1/go.mod h1:veldBMzWxcCG2ZvUTKD2kJNRdCk5hVbJomOvKkmgYbo=
github.com/anaskhan96/soup v1.2.5 h1:V/FHiusdTrPrdF4iA1YkVxsOpdNcgvqT1hG+YtcZ5hM=
github.com/anaskhan96/soup v1.2.5/go.mod h1:6YnEp9A2yywlYdM4EgDz9NEHclocMepEtku7wg6Cq3s=
github.com/andybalholm/cascadia v1.3.2 h1:3Xi6Dw5lHF15JtdcmAHD3i1+T8plmv7BQ/nsViSLyss=
@@ -27,28 +31,48 @@ github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be h1:9AeTilPcZAjCFI
github.com/anmitsu/go-shlex v0.0.0-20200514113438-38f4b401e2be/go.mod h1:ySMOLuWl6zY27l47sB3qLNK6tF2fkHG55UZxx8oIVo4=
github.com/anthropics/anthropic-sdk-go v0.2.0-alpha.4 h1:TdGQS+RoR4AUO6gqUL74yK1dz/Arrt/WG+dxOj6Yo6A=
github.com/anthropics/anthropic-sdk-go v0.2.0-alpha.4/go.mod h1:GJxtdOs9K4neo8Gg65CjJ7jNautmldGli5/OFNabOoo=
github.com/apache/arrow/go/arrow v0.0.0-20211112161151-bc219186db40/go.mod h1:Q7yQnSMnLvcXlZ8RV+jwz/6y1rQTqbX6C82SndT52Zs=
github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de h1:FxWPpzIjnTlhPwqqXc4/vE0f7GvRjuAsbW+HOIe8KnA=
github.com/araddon/dateparse v0.0.0-20210429162001-6b43995a97de/go.mod h1:DCaWoUhZrYW9p1lxo/cm8EmUOOzAPSEZNGF2DK1dJgw=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio=
github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs=
github.com/atotto/clipboard v0.1.4 h1:EH0zSVneZPSuFR11BlR9YppQTVDbh5+16AmcJi4g1z4=
github.com/atotto/clipboard v0.1.4/go.mod h1:ZY9tmq7sm5xIbd9bOK4onWV4S6X0u6GY7Vn0Yu86PYI=
github.com/aws/aws-sdk-go-v2 v1.30.3/go.mod h1:nIQjQVp5sfpQcTc9mPSr1B0PaWK5ByX9MOoDadSN4lc=
github.com/aws/aws-sdk-go-v2/aws/protocol/eventstream v1.6.3/go.mod h1:UbnqO+zjqk3uIt9yCACHJ9IVNhyhOCnYk8yA19SAWrM=
github.com/aws/aws-sdk-go-v2/config v1.27.27/go.mod h1:MVYamCg76dFNINkZFu4n4RjDixhVr51HLj4ErWzrVwg=
github.com/aws/aws-sdk-go-v2/credentials v1.17.27/go.mod h1:gniiwbGahQByxan6YjQUMcW4Aov6bLC3m+evgcoN4r4=
github.com/aws/aws-sdk-go-v2/feature/ec2/imds v1.16.11/go.mod h1:SeSUYBLsMYFoRvHE0Tjvn7kbxaUhl75CJi1sbfhMxkU=
github.com/aws/aws-sdk-go-v2/internal/configsources v1.3.15/go.mod h1:U9ke74k1n2bf+RIgoX1SXFed1HLs51OgUSs+Ph0KJP8=
github.com/aws/aws-sdk-go-v2/internal/endpoints/v2 v2.6.15/go.mod h1:ZQLZqhcu+JhSrA9/NXRm8SkDvsycE+JkV3WGY41e+IM=
github.com/aws/aws-sdk-go-v2/internal/ini v1.8.0/go.mod h1:8tu/lYfQfFe6IGnaOdrpVgEL2IrrDOf6/m9RQum4NkY=
github.com/aws/aws-sdk-go-v2/service/internal/accept-encoding v1.11.3/go.mod h1:GlAeCkHwugxdHaueRr4nhPuY+WW+gR8UjlcqzPr1SPI=
github.com/aws/aws-sdk-go-v2/service/internal/presigned-url v1.11.17/go.mod h1:RkZEx4l0EHYDJpWppMJ3nD9wZJAa8/0lq9aVC+r2UII=
github.com/aws/aws-sdk-go-v2/service/sso v1.22.4/go.mod h1:ooyCOXjvJEsUw7x+ZDHeISPMhtwI3ZCB7ggFMcFfWLU=
github.com/aws/aws-sdk-go-v2/service/ssooidc v1.26.4/go.mod h1:0oxfLkpz3rQ/CHlx5hB7H69YUpFiI1tql6Q6Ne+1bCw=
github.com/aws/aws-sdk-go-v2/service/sts v1.30.3/go.mod h1:zwySh8fpFyXp9yOr/KVzxOl8SRqgf/IDw5aUt9UKFcQ=
github.com/aws/smithy-go v1.20.3/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
github.com/bwesterb/go-ristretto v1.2.3/go.mod h1:fUIoIZaG73pV5biE2Blr2xEzDoMj7NFEuV9ekS419A0=
github.com/bytedance/sonic v1.12.4 h1:9Csb3c9ZJhfUWeMtpCDCq6BUoH5ogfDFLUgQ/jG+R0k=
github.com/bytedance/sonic v1.12.4/go.mod h1:B8Gt/XvtZ3Fqj+iSKMypzymZxw/FVwgIGKzMzT9r/rk=
github.com/bytedance/sonic/loader v0.1.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/bytedance/sonic/loader v0.2.1 h1:1GgorWTqf12TA8mma4DDSbaQigE2wOgQo7iCjjJv3+E=
github.com/bytedance/sonic/loader v0.2.1/go.mod h1:ncP89zfokxS5LZrJxl5z0UJcsk4M4yY2JpfqGeCtNLU=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
github.com/census-instrumentation/opencensus-proto v0.4.1/go.mod h1:4T9NM4+4Vw91VeyqjLS6ao50K5bOcLKN6Q42XnYaRYw=
github.com/cespare/xxhash/v2 v2.3.0/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chewxy/hm v1.0.0/go.mod h1:qg9YI4q6Fkj/whwHR1D+bOGeF7SniIP40VweVepLjg0=
github.com/chewxy/math32 v1.10.1/go.mod h1:dOB2rcuFrCn6UHrze36WSLVPKtzPMRAQvBvUwkSsLqs=
github.com/cloudflare/circl v1.5.0 h1:hxIWksrX6XN5a1L2TI/h53AGPhNHoUBo+TD1ms9+pys=
github.com/cloudflare/circl v1.5.0/go.mod h1:uddAzsPgqdMAYatqJ0lsjX1oECcQLIlRpzZh3pJrofs=
github.com/cloudwego/base64x v0.1.4 h1:jwCgWpFanWmN8xoIUHa2rtzmkd5J2plF/dnLS6Xd/0Y=
github.com/cloudwego/base64x v0.1.4/go.mod h1:0zlkT4Wn5C6NdauXdJRhSKRlJvmclQ1hhJgA0rcu/8w=
github.com/cloudwego/iasm v0.2.0 h1:1KNIy1I1H9hNNFEEH3DVnI4UujN+1zjpuk6gwHLTssg=
github.com/cloudwego/iasm v0.2.0/go.mod h1:8rXZaNYT2n95jn+zTI1sDr+IgcD2GVs0nlbbQPiEFhY=
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/xds/go v0.0.0-20240905190251-b4127c9b8d78/go.mod h1:W+zGtBO5Y1IgJhy4+A9GOqVhqLpfZi+vwmdNXUehLA8=
github.com/containerd/console v1.0.3/go.mod h1:7LqA/THxQ86k76b8c/EMSiaJ3h1eZkMkXar0TQ1gf3U=
github.com/cyphar/filepath-securejoin v0.3.4 h1:VBWugsJh2ZxJmLFSM06/0qzQyiQX2Qs0ViKrUAcqdZ8=
github.com/cyphar/filepath-securejoin v0.3.4/go.mod h1:8s/MCNJREmFK0H02MF6Ihv1nakJe4L/w3WZLHNkvlYM=
github.com/d4l3k/go-bfloat16 v0.0.0-20211005043715-690c3bdd05f1/go.mod h1:uw2gLcxEuYUlAd/EXyjc/v55nd3+47YAgWbSXVxPrNI=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
@@ -56,14 +80,13 @@ github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a h1:mATvB/9r/3gvcej
github.com/elazarl/goproxy v0.0.0-20230808193330-2592e75ae04a/go.mod h1:Ro8st/ElPeALwNFlcTpWmkr6IoMFfkjXAvTHpevnDsM=
github.com/emirpasic/gods v1.18.1 h1:FXtiHYKDGKCW2KzwZKx0iC0PQmdlorYgdFG9jPXJ1Bc=
github.com/emirpasic/gods v1.18.1/go.mod h1:8tpGGwCnJ5H4r6BWwaV6OrWmMoPhUl5jm/FMNAnJvWQ=
github.com/envoyproxy/go-control-plane v0.9.0/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.1-0.20191026205805-5f8ba28d4473/go.mod h1:YTl/9mNaCwkRvm6d1a2C3ymFceY/DCBVvsKhRF0iEA4=
github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1mIlRU8Am5FuJP05cCM98=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/envoyproxy/go-control-plane v0.13.1/go.mod h1:X45hY0mufo6Fd0KW3rqsGvQMw58jvjymeCzBU3mWyHw=
github.com/envoyproxy/protoc-gen-validate v1.1.0/go.mod h1:sXRDRVmzEbkM7CVcM06s9shE/m23dg3wzjl0UWqJ2q4=
github.com/felixge/httpsnoop v1.0.4 h1:NFTV2Zj1bL4mc9sqWACXbQFVBBg2W3GPvqp8/ESS2Wg=
github.com/felixge/httpsnoop v1.0.4/go.mod h1:m8KPJKqk1gH5J9DgRY2ASl2lWCfGKXixSwevea8zH2U=
github.com/gabriel-vasile/mimetype v1.4.6 h1:3+PzJTKLkvgjeTbts6msPJt4DixhT4YtFNf1gtGe3zc=
github.com/gabriel-vasile/mimetype v1.4.6/go.mod h1:JX1qVKqZd40hUPpAfiNTe0Sne7hdfKSbOqqmkq8GCXc=
github.com/gin-contrib/cors v1.7.2/go.mod h1:SUJVARKgQ40dmrzgXEVxj2m7Ig1v1qIboQkPDTQ9t2E=
github.com/gin-contrib/sse v0.1.0 h1:Y/yl/+YNO8GZSjAhjMsSuLt29uWRFHdHYUb5lYOV9qE=
github.com/gin-contrib/sse v0.1.0/go.mod h1:RHrZQHXnP2xjPF+u1gW/2HnVO7nvIa9PG3Gm+fLHvGI=
github.com/gin-gonic/gin v1.10.0 h1:nTuyha1TYqgedzytsKYqna+DfLos46nTv2ygFy86HFU=
@@ -97,44 +120,32 @@ github.com/go-shiori/go-readability v0.0.0-20241012063810-92284fa8a71f h1:cypj7S
github.com/go-shiori/go-readability v0.0.0-20241012063810-92284fa8a71f/go.mod h1:YWa00ashoPZMAOElrSn4E1cJErhDVU6PWAll4Hxzn+w=
github.com/goccy/go-json v0.10.3 h1:KZ5WoDbxAIgm2HNbYckL0se1fHD6rz5j4ywS6ebzDqA=
github.com/goccy/go-json v0.10.3/go.mod h1:oq7eo15ShAhp70Anwd5lgX2pLfOS3QCiwU/PULtXL6M=
github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q=
github.com/gogs/chardet v0.0.0-20211120154057-b7413eaefb8f h1:3BSP1Tbs2djlpprl7wCLuiqMaUh5SJkkzI2gDs+FgLs=
github.com/gogs/chardet v0.0.0-20211120154057-b7413eaefb8f/go.mod h1:Pcatq5tYkCW2Q6yrR2VRHlbHpZ/R4/7qyL1TCF7vl14=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/glog v1.2.3/go.mod h1:6AhwSGph0fcJtXVM/PEHPqZlFeoLxhs7/t5UDAwmO+w=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.4.0-rc.1/go.mod h1:ceaxUfeHdC40wWswd/P6IGgMaK3YpKi5j83Wpe3EHw8=
github.com/golang/protobuf v1.4.0-rc.1.0.20200221234624-67d41d38c208/go.mod h1:xKAWHe0F5eneWXFV3EuXVDTCmh+JuBKY0li0aMyXATA=
github.com/golang/protobuf v1.4.0-rc.2/go.mod h1:LlEzMj4AhA7rCAGe4KMBDvJI+AwstrUpVNzEA03Pprs=
github.com/golang/protobuf v1.4.0-rc.4.0.20200313231945-b860323f09d0/go.mod h1:WU3c8KckQ9AFe+yFwt9sWVRKCVIyN9cPHBJSNnbL67w=
github.com/golang/protobuf v1.4.0/go.mod h1:jodUvKwWbYaEsadDk5Fwe5c77LiNKVO9IDvqG2KuDX0=
github.com/golang/protobuf v1.4.1/go.mod h1:U8fpvMrcmy5pZrNK1lt4xCsGvpyWQ/VVv6QDs8UjoX8=
github.com/golang/protobuf v1.4.3/go.mod h1:oDoupMAO8OvCJWAcko0GGGIgR6R6ocIYbsSw735rRwI=
github.com/golang/protobuf v1.5.4 h1:i7eJL8qZTpSEXOPTxNKhASYpMn+8e5Q6AdndVa1dWek=
github.com/golang/protobuf v1.5.4/go.mod h1:lnTiLA8Wa4RWRcIUkrtSVa5nRhsEGBg48fD6rSs7xps=
github.com/google/generative-ai-go v0.18.0 h1:6ybg9vOCLcI/UpBBYXOTVgvKmcUKFRNj+2Cj3GnebSo=
github.com/google/generative-ai-go v0.18.0/go.mod h1:JYolL13VG7j79kM5BtHz4qwONHkeJQzOCkKXnpqtS/E=
github.com/google/go-cmp v0.2.0/go.mod h1:oXzfMopK8JAjlY9xF4vHSVASa0yLyX7SntLO5aqRK0M=
github.com/google/go-cmp v0.3.0/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.3.1/go.mod h1:8QqcDgzrUqlUb/G2PQTWiueGozuR1884gddMywk6iLU=
github.com/google/go-cmp v0.4.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.0/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/golang/snappy v0.0.4/go.mod h1:/XxbfmMg8lxefKM7IXC3fBNl/7bRcc72aCRzEWrmP2Q=
github.com/google/flatbuffers v24.3.25+incompatible/go.mod h1:1AeVuKshWv4vARoZatz6mlQ0JxURH0Kv5+zNeJKJCa8=
github.com/google/generative-ai-go v0.19.0 h1:R71szggh8wHMCUlEMsW2A/3T+5LdEIkiaHSYgSpUgdg=
github.com/google/generative-ai-go v0.19.0/go.mod h1:JYolL13VG7j79kM5BtHz4qwONHkeJQzOCkKXnpqtS/E=
github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI=
github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/go-pkcs11 v0.3.0/go.mod h1:6eQoGcuNJpa7jnd5pMGdkSaQpNDYvPlXWMcjXXThLlY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/s2a-go v0.1.8 h1:zZDs9gcbt9ZPLV0ndSyQk6Kacx2g/X+SKYovpnz3SMM=
github.com/google/s2a-go v0.1.8/go.mod h1:6iNWHTpQ+nfNRN5E00MSdfDwVesa8hhS32PhPO8deJA=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/google/martian/v3 v3.3.3/go.mod h1:iEPrYcgCF7jA9OtScMFQyAlZZ4YXTKEtJ1E6RWzmBA0=
github.com/google/s2a-go v0.1.9 h1:LGD7gtMgezd8a/Xak7mEWL0PjoTQFvpRudN895yqKW0=
github.com/google/s2a-go v0.1.9/go.mod h1:YA0Ei2ZQL3acow2O62kdp9UlnvMmU7kA6Eutn0dXayM=
github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0=
github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/enterprise-certificate-proxy v0.3.4 h1:XYIDZApgAnrN1c855gTgghdIA6Stxb52D5RnLI1SLyw=
github.com/googleapis/enterprise-certificate-proxy v0.3.4/go.mod h1:YKe7cfqYXjKGpGvmSg28/fFvhNzinZQm8DGnaburhGA=
github.com/googleapis/gax-go/v2 v2.13.0 h1:yitjD5f7jQHhyDsnhKEBU52NdvvdSeGzlAnDPT0hH1s=
github.com/googleapis/gax-go/v2 v2.13.0/go.mod h1:Z/fvTZXF8/uw7Xu5GuslPw+bplx6SS338j1Is2S+B7A=
github.com/googleapis/gax-go/v2 v2.14.1 h1:hb0FFeiPaQskmvakKu5EbCbpntQn48jyHuvrkurSS/Q=
github.com/googleapis/gax-go/v2 v2.14.1/go.mod h1:Hb/NubMaVM88SrNkvl8X/o8XWwDJEPqouaLeN2IUxoA=
github.com/inconshreveable/mousetrap v1.1.0/go.mod h1:vpF70FUmC8bwa3OWnCshd2FqLfsEA9PFc4w1p2J65bw=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99 h1:BQSFePA1RWJOlocH6Fxy8MmwDt+yVQYULKfN0RoTN8A=
github.com/jbenet/go-context v0.0.0-20150711004518-d14ea06fba99/go.mod h1:1lJo3i6rXxKeerYnT8Nvf0QmHCRC1n8sfWVwXF2Frvo=
github.com/jessevdk/go-flags v1.6.1 h1:Cvu5U8UGrLay1rZfv/zP7iLpSHGUZ/Ou68T0iX1bBK4=
@@ -161,11 +172,15 @@ github.com/leodido/go-urn v1.4.0/go.mod h1:bvxc+MVxLKB4z00jd1z+Dvzr47oO32F/QSNjS
github.com/mattn/go-isatty v0.0.20 h1:xfD0iDuEKnDkl03q4limB+vH+GxLEtL/jb4xVJSWWEY=
github.com/mattn/go-isatty v0.0.20/go.mod h1:W+V8PltTTMOvKvAeJH7IuucS94S2C6jfK/D7dTCTo3Y=
github.com/mattn/go-runewidth v0.0.10/go.mod h1:RAqKPSqVFrSLVXbA8x7dzmKdmGzieGRCM46jaSJTDAk=
github.com/mattn/go-runewidth v0.0.14/go.mod h1:Jdepj2loyihRzMpdS35Xk/zdY8IAYHsh153qUoGf23w=
github.com/mmcloughlin/avo v0.5.0/go.mod h1:ChHFdoV7ql95Wi7vuq2YT1bwCJqiWdZrQ1im3VujLYM=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9Gz0M=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/nlpodyssey/gopickle v0.3.0/go.mod h1:f070HJ/yR+eLi5WmM1OXJEGaTpuJEUiib19olXgYha0=
github.com/olekukonko/tablewriter v0.0.5/go.mod h1:hPp6KlRPjbx+hW8ykQs1w3UBbZlj6HuIJcUGPhkA7kY=
github.com/ollama/ollama v0.4.1 h1:41x4/L6HrsmQUqG9loN0q2643PHkLpblIlVqXAdByWs=
github.com/ollama/ollama v0.4.1/go.mod h1:QDxM/t2teuubbfN/FT2pBRMPF0K1N3IakgT1OZBD4NY=
github.com/onsi/gomega v1.34.1 h1:EUMJIKUjM8sKjYbtxQI9A4z2o+rruxnzNvpknOXie6k=
@@ -174,18 +189,20 @@ github.com/otiai10/copy v1.14.0 h1:dCI/t1iTdYGtkvCuBG2BgR6KZa83PTclw4U5n2wAllU=
github.com/otiai10/copy v1.14.0/go.mod h1:ECfuL02W+/FkTWZWgQqXPWZgW9oeKCSQ5qVfSc4qc4w=
github.com/otiai10/mint v1.5.1 h1:XaPLeE+9vGbuyEHem1JNk3bYc7KKqyI/na0/mLd/Kks=
github.com/otiai10/mint v1.5.1/go.mod h1:MJm72SBthJjz8qhefc4z1PYEieWmy8Bku7CjcAqyUSM=
github.com/pdevine/tensor v0.0.0-20240510204454-f88f4562727c/go.mod h1:PSojXDXF7TbgQiD6kkd98IHOS0QqTyUEaWRiS8+BLu8=
github.com/pelletier/go-toml/v2 v2.2.3 h1:YmeHyLY8mFWbdkNWwpr+qIL2bEqT0o95WSdkNHvL12M=
github.com/pelletier/go-toml/v2 v2.2.3/go.mod h1:MfCQTFTvCcUyyvvwm1+G6H/jORL20Xlb6rzQu9GuUkc=
github.com/pjbgf/sha1cd v0.3.0 h1:4D5XXmUUBUl/xQ6IjCkEAbqXskkq/4O7LmGn0AqMDs4=
github.com/pjbgf/sha1cd v0.3.0/go.mod h1:nZ1rrWOcGJ5uZgEEVL1VUM9iRQiZvWdbZjkKyFzPPsI=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/planetscale/vtprotobuf v0.6.1-0.20240319094008-0393e58bdf10/go.mod h1:t/avpk3KcrXxUnYOhZhMXJlSEyie6gQbtLq5NM3loB8=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/rivo/uniseg v0.1.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.11.0 h1:cWPaGQEPrBb5/AsnsZesgZZ9yb1OQ+GOISoDNXVBh4M=
github.com/rogpeppe/go-internal v1.11.0/go.mod h1:ddIwULY96R17DhadqLgMfk9H9tvdUzkipdSkR5nkCZA=
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII=
github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o=
github.com/samber/lo v1.47.0 h1:z7RynLwP5nbyRscyvcD043DWYoOcYRv3mV8lBeqOCLc=
github.com/samber/lo v1.47.0/go.mod h1:RmDH9Ct32Qy3gduHQuKJ3gW1fMHAnE/fAzQuf6He5cU=
github.com/sashabaranov/go-openai v1.35.6 h1:oi0rwCvyxMxgFALDGnyqFTyCJm6n72OnEG3sybIFR0g=
@@ -194,11 +211,15 @@ github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3 h1:n661drycOFuPLCN3Uc8sB6B/s6Z4t2xvBgU1htSHuq8=
github.com/sergi/go-diff v1.3.2-0.20230802210424-5b0b94c5c0d3/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
github.com/sirupsen/logrus v1.9.3/go.mod h1:naHLuLoDiP4jHNo9R0sCBMtWGeIprob74mVsIT4qYEQ=
github.com/skeema/knownhosts v1.3.0 h1:AM+y0rI04VksttfwjkSTNQorvGqmwATnvnAHpSgc0LY=
github.com/skeema/knownhosts v1.3.0/go.mod h1:sPINvnADmT/qYH1kfv+ePMmOBTH6Tbl7b5LvTDjFK7M=
github.com/spf13/cobra v1.7.0/go.mod h1:uLxZILRyS/50WlhOIKD7W6V5bgeIt+4sICxh6uRMrb0=
github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw=
github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo=
github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA=
github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXfy6kDkUVs=
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
@@ -207,8 +228,8 @@ github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU=
github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4=
github.com/stretchr/testify v1.9.0 h1:HtqpIVDClZ4nwg75+f6Lvsy/wHu+3BoSGCbBAcpTsTg=
github.com/stretchr/testify v1.9.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA=
github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY=
github.com/tidwall/gjson v1.14.2/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
github.com/tidwall/gjson v1.14.4 h1:uo0p8EbA09J7RQaflQ1aBRffTR7xedD2bcIVSYxLnkM=
github.com/tidwall/gjson v1.14.4/go.mod h1:/wbyibRr2FHMks5tjHJ5F8dMZh3AcwJEMf5vlfC0lxk=
@@ -223,67 +244,61 @@ github.com/twitchyliquid64/golang-asm v0.15.1 h1:SU5vSMR7hnwNxj24w34ZyCi/FmDZTkS
github.com/twitchyliquid64/golang-asm v0.15.1/go.mod h1:a1lVb/DtPvCB8fslRZhAngC2+aY1QWCk3Cedj/Gdt08=
github.com/ugorji/go/codec v1.2.12 h1:9LC83zGrHhuUA9l16C9AHXAqEV/2wBQ4nkvumAE65EE=
github.com/ugorji/go/codec v1.2.12/go.mod h1:UNopzCgEMSXjBc6AOMqYvWC1ktqTAfzJZUZgYf6w6lg=
github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg=
github.com/xanzy/ssh-agent v0.3.3 h1:+/15pJfg/RsTxqYcX6fHqOXZwwMP+2VyYWJeWM2qQFM=
github.com/xanzy/ssh-agent v0.3.3/go.mod h1:6dzNDKs0J9rVPHPhaGCukekBHKqfl+L3KghI1Bc68Uw=
github.com/xtgo/set v1.0.0/go.mod h1:d3NHzGzSa0NmB2NhFyECA+QdRp29oEn2xbT+TpeFoM8=
github.com/yuin/goldmark v1.4.13/go.mod h1:6yULJ656Px+3vBD8DxQVa3kxgyrAnzto9xy5taEt/CY=
go.opencensus.io v0.24.0 h1:y73uSU6J157QMP2kn2r30vwW1A2W2WFwSCGnAVxeaD0=
go.opencensus.io v0.24.0/go.mod h1:vNK8G9p7aAivkbmorf4v+7Hgx+Zs0yY+0fOtgBfjQKo=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0 h1:r6I7RJCN86bpD/FQwedZ0vSixDpwuWREjW9oRMsmqDc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.54.0/go.mod h1:B9yO6b04uB80CzjedvewuqDhxJxi11s7/GtiGa8bAjI=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0 h1:DheMAlT6POBP+gh8RUH19EOTnQIor5QE0uSRPtzCpSw=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.57.0/go.mod h1:wZcGmeVO9nzP67aYSLDqXNWK87EZWhi7JWj1v7ZXf94=
go.opentelemetry.io/otel v1.32.0 h1:WnBN+Xjcteh0zdk01SVqV55d/m62NJLJdIyb4y/WO5U=
go.opentelemetry.io/otel v1.32.0/go.mod h1:00DCVSB0RQcnzlwyTfqtxSm+DRr9hpYrHjNGiBHVQIg=
go.opentelemetry.io/otel/metric v1.32.0 h1:xV2umtmNcThh2/a/aCP+h64Xx5wsj8qqnkYZktzNa0M=
go.opentelemetry.io/otel/metric v1.32.0/go.mod h1:jH7CIbbK6SH2V2wE16W05BHCtIDzauciCRLoc/SyMv8=
go.opentelemetry.io/otel/trace v1.32.0 h1:WIC9mYrXf8TmY/EXuULKc8hR17vE+Hjv2cssQDe03fM=
go.opentelemetry.io/otel/trace v1.32.0/go.mod h1:+i4rkvCraA+tG6AzwloGaCtkx53Fa+L+V8e9a7YvhT8=
go.opentelemetry.io/auto/sdk v1.1.0 h1:cH53jehLUN6UFLY71z+NDOiNJqDdPRaXzTel0sJySYA=
go.opentelemetry.io/auto/sdk v1.1.0/go.mod h1:3wSPjt5PWp2RhlCcmmOial7AvC4DQqZb7a7wCow3W8A=
go.opentelemetry.io/contrib/detectors/gcp v1.32.0/go.mod h1:TVqo0Sda4Cv8gCIixd7LuLwW4EylumVWfhjZJjDD4DU=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0 h1:PS8wXpbyaDJQ2VDHHncMe9Vct0Zn1fEjpsjrLxGJoSc=
go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc v0.58.0/go.mod h1:HDBUsEjOuRC0EzKZ1bSaRGZWUBAzo+MhAcUUORSr4D0=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0 h1:yd02MEjBdJkG3uabWP9apV+OuWRIXGDuJEUJbOHmCFU=
go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp v0.58.0/go.mod h1:umTcuxiv1n/s/S6/c2AT/g2CQ7u5C59sHDNmfSwgz7Q=
go.opentelemetry.io/otel v1.34.0 h1:zRLXxLCgL1WyKsPVrgbSdMN4c0FMkDAskSTQP+0hdUY=
go.opentelemetry.io/otel v1.34.0/go.mod h1:OWFPOQ+h4G8xpyjgqo4SxJYdDQ/qmRH+wivy7zzx9oI=
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
go.opentelemetry.io/otel/sdk/metric v1.32.0 h1:rZvFnvmvawYb0alrYkjraqJq0Z4ZUJAiyYCU9snn1CU=
go.opentelemetry.io/otel/sdk/metric v1.32.0/go.mod h1:PWeZlq0zt9YkYAp3gjKZ0eicRYvOh1Gd+X99x6GHpCQ=
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
go.opentelemetry.io/otel/trace v1.34.0/go.mod h1:Svm7lSjQD7kG7KJ/MUHPVXSDGz2OX4h0M2jHBhmSfRE=
go4.org/unsafe/assume-no-moving-gc v0.0.0-20231121144256-b99613f794b6/go.mod h1:FftLjUGFEDu5k8lt0ddY+HcrH/qU/0qk+H8j9/nTl3E=
golang.org/x/arch v0.12.0 h1:UsYJhbzPYGsT0HbEdmYcqtCv8UNGvnaL561NnIUvaKg=
golang.org/x/arch v0.12.0/go.mod h1:FEVrYAQjsQXMVJ1nsMoVVXPZg6p2JE2mx8psSWTDQys=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5ydBHafDWAxML/pGHZbMvKqRZ5+Abc=
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/crypto v0.29.0 h1:L5SG1JTTXupVV3n6sUqMTeWbjAyfPwoda2DLX8J8FrQ=
golang.org/x/crypto v0.29.0/go.mod h1:+F4F4N5hv6v38hfeYwTdx20oUvLLc+QfrE9Ax9HtgRg=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56 h1:2dVuKD2vS7b0QIHQbpyTISPd0LeHDbnYEryqj5Q1ug8=
golang.org/x/exp v0.0.0-20240719175910-8a7402abbf56/go.mod h1:M4RDyNAINzryxdtnbRXRL/OHtkFuWGRjvuhBJpk2IlY=
golang.org/x/lint v0.0.0-20181026193005-c67002cb31c3/go.mod h1:UVdnD1Gm6xHRNCYTkRU2/jEulfH38KcIWyp/GAMgvoE=
golang.org/x/lint v0.0.0-20190227174305-5b3e6a55c961/go.mod h1:wehouNa3lNwaWXcvxsM5YxQ5yQlVC4a0KAMCusXpPoU=
golang.org/x/lint v0.0.0-20190313153728-d0100b6bd8b3/go.mod h1:6SW0HCj/g11FgYtHlgUYUwCkIfeOF89ocIRzGO/8vkc=
golang.org/x/image v0.14.0/go.mod h1:HUYqC05R2ZcZ3ejNQsIHQDQiwWM4JBqmm6MKANTp4LE=
golang.org/x/mod v0.6.0-dev.0.20220419223038-86c51ed26bb4/go.mod h1:jJ57K6gSWd91VN4djpZkiMVwK6gcyfeH4XE8wZrZaV4=
golang.org/x/mod v0.8.0/go.mod h1:iBbtSCu2XBx23ZKBPSOrRkjjQPZFPuis4dIYUhu/chs=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/mod v0.17.0/go.mod h1:hTbmBsO62+eylJbnUtE2MGJUyE7QWk4xUqPFrRgJ+7c=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20200114155413-6afb5195e5aa/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20211112202133-69e39bad7dc2/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug+ECip1KBveYUHfp+8e9klMJ9c=
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
golang.org/x/net v0.9.0/go.mod h1:d48xBJpPfHeWQsugry2m+kC02ZBRGRgulfHnEXEuWns=
golang.org/x/net v0.31.0 h1:68CPQngjLL0r2AlUKiSxtQFKvzRVbnzLwMUn5SzcLHo=
golang.org/x/net v0.31.0/go.mod h1:P4fl1q7dY2hnZFxEk4pPSkDHF+QqjitcnDjUQyMM+pM=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.24.0 h1:KTBBxWqUa0ykRPLtV69rRto9TLXcqYkeswu48x/gvNE=
golang.org/x/oauth2 v0.24.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
golang.org/x/oauth2 v0.25.0/go.mod h1:XYTD2NtWslqkgxebSiOHnXEap4TF09sJSc7H1sXbhtI=
golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.9.0 h1:fEo0HyrW1GIgZdpbhCRO0PkJajUS5H9IFUztCgEo2jQ=
golang.org/x/sync v0.9.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190412213103-97732733099d/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191026070338-33540a1f6037/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@@ -294,63 +309,45 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.6.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.7.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.27.0 h1:wBqf8DvsY9Y/2P8gAfPDEYNuS30J4lPHJxXSb/nJZ+s=
golang.org/x/sys v0.27.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
golang.org/x/term v0.7.0/go.mod h1:P32HKFT3hSsZrRxla30E9HqToFYAQPCMs/zFMBUFqPY=
golang.org/x/term v0.26.0 h1:WEQa6V3Gja/BhNxg540hBip/kkaYtRg3cxg4oXSw4AU=
golang.org/x/term v0.26.0/go.mod h1:Si5m1o57C5nBNQo5z1iq+XDijt21BDBDp2bK0QI8e3E=
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
golang.org/x/text v0.20.0 h1:gK/Kv2otX8gz+wn7Rmb3vT96ZwuoxnQlY+HlJVj7Qug=
golang.org/x/text v0.20.0/go.mod h1:D4IsuqiFMhST5bX19pQ9ikHC2GsaKyk/oF+pn3ducp4=
golang.org/x/time v0.7.0 h1:ntUhktv3OPE6TgYxXWv9vKvUSJyIFJlyohwbkEwPrKQ=
golang.org/x/time v0.7.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY=
golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM=
golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190114222345-bf090417da8b/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ=
golang.org/x/tools v0.0.0-20190226205152-f727befe758c/go.mod h1:9Yl7xja0Znq3iFh3HoIrodX9oNMXvdceNzlUR8zjMvY=
golang.org/x/tools v0.0.0-20190311212946-11955173bddd/go.mod h1:LCzVGOaR6xXOjkQ3onu1FJEFr0SW1gC7cKk1uF8kGRs=
golang.org/x/tools v0.0.0-20190524140312-2c0ae7006135/go.mod h1:RgjU9mgBXZiqYHBnxXauZ1Gv1EHHAz9KjViQ78xBX0Q=
golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo=
golang.org/x/tools v0.1.12/go.mod h1:hNGJHUnrk76NpqgfD5Aqm5Crs+Hm0VOH/i9J2+nxYbc=
golang.org/x/tools v0.6.0/go.mod h1:Xwgl3UAJ/d3gWutnCtw505GrjyAbvKui8lOU390QaIU=
golang.org/x/tools v0.21.1-0.20240508182429-e35e4ccd0d2d/go.mod h1:aiJjzUbINMkxbQROHiO6hDPo2LHcIPhhQsa9DLh0yGk=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
google.golang.org/api v0.205.0 h1:LFaxkAIpDb/GsrWV20dMMo5MR0h8UARTbn24LmD+0Pg=
google.golang.org/api v0.205.0/go.mod h1:NrK1EMqO8Xk6l6QwRAmrXXg2v6dzukhlOyvkYtnvUuc=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/genproto v0.0.0-20180817151627-c66870c02cf8/go.mod h1:JiN7NxoALGmiZfu7CAH4rXhgtRTLTxftemlI0sWmxmc=
google.golang.org/genproto v0.0.0-20190819201941-24fa4b261c55/go.mod h1:DMBHOl98Agz4BDEuKkezgsaosCRResVns1a3J2ZsMNc=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38 h1:2oV8dfuIkM1Ti7DwXc0BJfnwr9csz4TDXI9EmiI+Rbw=
google.golang.org/genproto/googleapis/api v0.0.0-20241021214115-324edc3d5d38/go.mod h1:vuAjtvlwkDKF6L1GQ0SokiRLCGFfeBUXWr/aFFkHACc=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28 h1:XVhgTWWV3kGQlwJHR3upFWZeTsei6Oks1apkZSeonIE=
google.golang.org/genproto/googleapis/rpc v0.0.0-20241104194629-dd2ea8efbc28/go.mod h1:GX3210XPVPUjJbTUbvwI8f2IpZDMZuPJWDzDuebbviI=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.23.0/go.mod h1:Y5yQAOtifL1yxbo5wqy6BxZv8vAUGQwXBOALyacEbxg=
google.golang.org/grpc v1.25.1/go.mod h1:c3i+UQWmh7LiEpx4sFZnkU36qjEYZ0imhYfXVyQciAY=
google.golang.org/grpc v1.27.0/go.mod h1:qbnxyOmOxrQa7FizSgH+ReBfzJrCY1pSN7KXBS8abTk=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.68.0 h1:aHQeeJbo8zAkAa3pRzrVjZlbz6uSfeOXlJNQM0RAbz0=
google.golang.org/grpc v1.68.0/go.mod h1:fmSPC5AsjSBCK54MyHRx48kpOti1/jRfOlwEWywNjWA=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
google.golang.org/protobuf v0.0.0-20200228230310-ab0ca4ff8a60/go.mod h1:cfTl7dwQJ+fmap5saPgwCLgHXTUD7jkjRqWcaiX5VyM=
google.golang.org/protobuf v1.20.1-0.20200309200217-e05f789c0967/go.mod h1:A+miEFZTKqfCUM6K7xSMQL9OKL/b6hQv+e19PK+JZNE=
google.golang.org/protobuf v1.21.0/go.mod h1:47Nbq4nVaFHyn7ilMalzfO3qCViNmqZ2kzikPIcrTAo=
google.golang.org/protobuf v1.22.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.0/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpADcykh3NcUnDUJcl1+ZksZNG86OlYog2l/sGQquU=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.35.1 h1:m3LfL6/Ca+fqnjnlqQXNpFPABW1UD7mjh8KO2mKFytA=
google.golang.org/protobuf v1.35.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
gonum.org/v1/gonum v0.15.0/go.mod h1:xzZVBJBtS+Mz4q0Yl2LJTk+OxOg4jiXZ7qBoM0uISGo=
google.golang.org/api v0.220.0 h1:3oMI4gdBgB72WFVwE1nerDD8W3HUOS4kypK6rRLbGns=
google.golang.org/api v0.220.0/go.mod h1:26ZAlY6aN/8WgpCzjPNy18QpYaz7Zgg1h0qe1GkZEmY=
google.golang.org/appengine v1.6.8/go.mod h1:1jJ3jBArFh5pcgW8gCtRJnepW8FzD1V44FJffLiz/Ds=
google.golang.org/genproto v0.0.0-20240903143218-8af14fe29dc1/go.mod h1:hL97c3SYopEHblzpxRL4lSs523++l8DYxGM1FQiYmb4=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576 h1:CkkIfIt50+lT6NHAVoRYEyAvQGFM7xEwXUUywFvEb3Q=
google.golang.org/genproto/googleapis/api v0.0.0-20241209162323-e6fa225c2576/go.mod h1:1R3kvZ1dtP3+4p4d3G8uJ8rFk/fWlScl38vanWACI08=
google.golang.org/genproto/googleapis/bytestream v0.0.0-20250127172529-29210b9bc287/go.mod h1:7VGktjvijnuhf2AobFqsoaBGnG8rImcxqoL+QPBPRq4=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287 h1:J1H9f+LEdWAfHcez/4cvaVBox7cOYT+IU6rgqj5x++8=
google.golang.org/genproto/googleapis/rpc v0.0.0-20250127172529-29210b9bc287/go.mod h1:8BS3B93F/U1juMFq9+EDk+qOT5CO1R9IzXxG3PTqiRk=
google.golang.org/grpc v1.70.0 h1:pWFv03aZoHzlRKHWicjsZytKAiYCtNS0dHbXnIdq7jQ=
google.golang.org/grpc v1.70.0/go.mod h1:ofIJqVKDXx/JiXrwr2IG4/zwdH9txy3IlF40RmcJSQw=
google.golang.org/protobuf v1.36.4 h1:6A3ZDJHn/eNqc1i+IdefRzy/9PokBTPvcqMySR7NNIM=
google.golang.org/protobuf v1.36.4/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk=
@@ -363,6 +360,7 @@ gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA=
gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190523083050-ea95bdfd59fc/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
gorgonia.org/vecf32 v0.9.0/go.mod h1:NCc+5D2oxddRL11hd+pCB1PEyXWOyiQxfZ/1wwhOXCA=
gorgonia.org/vecf64 v0.9.0/go.mod h1:hp7IOWCnRiVQKON73kkC/AUMtEXyf9kGlVrtPQ9ccVA=
nullprogram.com/x/optparse v1.0.0/go.mod h1:KdyPE+Igbe0jQUrVfMqDMeJQIJZEuyV7pjYmp6pbG50=
rsc.io/pdf v0.1.1/go.mod h1:n8OzWcQ6Sp37PL01nO98y4iUCRdTGarVfzxY20ICaU4=

View File

@@ -8,14 +8,14 @@ schema = 3
version = "v0.8.0"
hash = "sha256-833SmzVY8+tci2RozAlcdKQZ63RlU2CmeY/8xttP+WI="
[mod."cloud.google.com/go/auth"]
version = "v0.10.1"
hash = "sha256-MCEvsZxxLYC/qGUiFNejtQnf4ptoFVKSNMS+XdjteJo="
version = "v0.14.1"
hash = "sha256-nBYUu/RQv3aAUgUaYbXJ3bCNkJfF9W05NThkwrL3sZg="
[mod."cloud.google.com/go/auth/oauth2adapt"]
version = "v0.2.5"
hash = "sha256-494whmtNBk1sF3ud3dre97U+mLSTs+XTqZK8w5zG/hk="
version = "v0.2.7"
hash = "sha256-U+pXaY0kPnSeBzHWxELZ75bZnb74nygwIVZDdXYcP5g="
[mod."cloud.google.com/go/compute/metadata"]
version = "v0.5.2"
hash = "sha256-EtBj20lhjM3SJVKCp70GHMnsItwJ9gOyJOW91wugojc="
version = "v0.6.0"
hash = "sha256-E8/cwio4xR8buCryR4HwR7+agb4M3zqgXSm7rBglmIY="
[mod."cloud.google.com/go/longrunning"]
version = "v0.5.7"
hash = "sha256-hZUbysdaEbFB2nDAg+wjOZHt6E99oEnH7Lo6IQr7FxU="
@@ -119,11 +119,11 @@ schema = 3
version = "v0.0.0-20210331224755-41bb18bfe9da"
hash = "sha256-7Gs7CS9gEYZkbu5P4hqPGBpeGZWC64VDwraSKFF+VR0="
[mod."github.com/google/generative-ai-go"]
version = "v0.18.0"
hash = "sha256-Ye+1rV3gzb2FG9ATq8cihlUiCynRv0eejMwsSfxOXcM="
version = "v0.19.0"
hash = "sha256-x2K1nkRwtne9MeP5B8FpwavYqQx564go5LzmcBJ0KT4="
[mod."github.com/google/s2a-go"]
version = "v0.1.8"
hash = "sha256-H4jy3iElh82CTujW3UpaSvsdfN7fZHBLJ4Z4M7kiMSk="
version = "v0.1.9"
hash = "sha256-0AdSpSTso4bATmM/9qamWzKrVtOLDf7afvDhoiT/UpA="
[mod."github.com/google/uuid"]
version = "v1.6.0"
hash = "sha256-VWl9sqUzdOuhW0KzQlv0gwwUQClYkmZwSydHG2sALYw="
@@ -131,8 +131,8 @@ schema = 3
version = "v0.3.4"
hash = "sha256-RVHWa0I68CTegjlXnM/GlishoZhmmwG4z+9KBucAJ1A="
[mod."github.com/googleapis/gax-go/v2"]
version = "v2.13.0"
hash = "sha256-p1SEjRjI/SkWSBWjeptQ5M/Tgrcj8IiH/beXBYqRVko="
version = "v2.14.1"
hash = "sha256-iRS/KsAVTePrvTlwA7vKcQnwY6Jz329WdgzFw0hF8wk="
[mod."github.com/jbenet/go-context"]
version = "v0.0.0-20150711004518-d14ea06fba99"
hash = "sha256-VANNCWNNpARH/ILQV9sCQsBWgyL2iFT+4AHZREpxIWE="
@@ -194,8 +194,8 @@ schema = 3
version = "v1.3.0"
hash = "sha256-piR5IdfqxK9nxyErJ+IRDLnkaeNQwX93ztTFZyPm5MQ="
[mod."github.com/stretchr/testify"]
version = "v1.9.0"
hash = "sha256-uUp/On+1nK+lARkTVtb5RxlW15zxtw2kaAFuIASA+J0="
version = "v1.10.0"
hash = "sha256-fJ4gnPr0vnrOhjQYQwJ3ARDKPsOtA7d4olQmQWR+wpI="
[mod."github.com/tidwall/gjson"]
version = "v1.14.4"
hash = "sha256-3DS2YNL95wG0qSajgRtIABD32J+oblaKVk8LIw+KSOc="
@@ -217,63 +217,63 @@ schema = 3
[mod."github.com/xanzy/ssh-agent"]
version = "v0.3.3"
hash = "sha256-l3pGB6IdzcPA/HLk93sSN6NM2pKPy+bVOoacR5RC2+c="
[mod."go.opencensus.io"]
version = "v0.24.0"
hash = "sha256-4H+mGZgG2c9I1y0m8avF4qmt8LUKxxVsTqR8mKgP4yo="
[mod."go.opentelemetry.io/auto/sdk"]
version = "v1.1.0"
hash = "sha256-cA9qCCu8P1NSJRxgmpfkfa5rKyn9X+Y/9FSmSd5xjyo="
[mod."go.opentelemetry.io/contrib/instrumentation/google.golang.org/grpc/otelgrpc"]
version = "v0.54.0"
hash = "sha256-wcGPcPYAsWQztlYRqNF5iTwIzmhf/i7N24n7AQhIkkA="
version = "v0.58.0"
hash = "sha256-OLDNgjHOItKSyzI3cWWZbMiap5QnULp3G4z8HhhJsrA="
[mod."go.opentelemetry.io/contrib/instrumentation/net/http/otelhttp"]
version = "v0.57.0"
hash = "sha256-cvG6gfqfX3IasDlC8SeS7u1sp3LG9ezbX+hU5LyWKBY="
version = "v0.58.0"
hash = "sha256-iqTPHfR1wXZY/yVTWtRBMjWlZkRxasaBGNhsNWHYxGw="
[mod."go.opentelemetry.io/otel"]
version = "v1.32.0"
hash = "sha256-Z2PoBBncuUkAksk8wT4lW6+uUu1wg24sGfwIYozIzaY="
version = "v1.34.0"
hash = "sha256-hnuuTSxaf9yMO/23xWdcTGNzvnnJiqUiL4nzYwUV5bc="
[mod."go.opentelemetry.io/otel/metric"]
version = "v1.32.0"
hash = "sha256-f2H8itkQflk/m98dSk1TCv37wvsnMojaGNZRJ6BcksU="
version = "v1.34.0"
hash = "sha256-JklGKJiMf1fpsE9pmnuLUq26g6wVp173v4GWJ7Xp5s4="
[mod."go.opentelemetry.io/otel/trace"]
version = "v1.32.0"
hash = "sha256-WtOrB2L8wQFiMb5BHK7a6FTw2wb3rW495whNjzdxC1I="
version = "v1.34.0"
hash = "sha256-u11KJ4WTDtcb0tVv7d/HOdhq8Ea+c1QPBO8MbsCQu9Q="
[mod."golang.org/x/arch"]
version = "v0.12.0"
hash = "sha256-olf8Pa5o8H4xC1gXTMlZiyxvMvK0jCablZyaPbqzlYA="
[mod."golang.org/x/crypto"]
version = "v0.29.0"
hash = "sha256-sqckobR2VWucCgb7xpY2wLktnAA+XyXJbhCm80yCo78="
version = "v0.32.0"
hash = "sha256-4l8XyVfpunL7d03otqfx3ouG3qkSF+LT7VuH1K3oo2I="
[mod."golang.org/x/net"]
version = "v0.31.0"
hash = "sha256-G+vGyCnn8jywmX3KvsIwhZkOv3+oAERNNeCeiQqfIL0="
version = "v0.34.0"
hash = "sha256-AZOLY4MUNxxDw5ZQtO9dmY/YRo1gFW87YvpX/eLTy4Q="
[mod."golang.org/x/oauth2"]
version = "v0.24.0"
hash = "sha256-808F4hzvNOQNoQZehOlIyPgwQG3L5aANiNPLLhaL9NQ="
version = "v0.25.0"
hash = "sha256-MpFrvO9Z54mFG1Zi6V1GIEJY8Paq9SCWbgvuy+J55+Y="
[mod."golang.org/x/sync"]
version = "v0.9.0"
hash = "sha256-sGvzGqaaXE5dxohKkpbJMnu+bMmismsSqr8YMtrK+Rc="
version = "v0.10.0"
hash = "sha256-HWruKClrdoBKVdxKCyoazxeQV4dIYLdkHekQvx275/o="
[mod."golang.org/x/sys"]
version = "v0.27.0"
hash = "sha256-BXQcF9RrJ55Pq7Nl67TeFGkgkyuKkQ8hHKN4/L4ggWc="
version = "v0.29.0"
hash = "sha256-qfsodJQ1H1CBI8yQWOvsXJgY5qHmiuw566HrrIseYHI="
[mod."golang.org/x/text"]
version = "v0.20.0"
hash = "sha256-YP8zSo2e9okqhxVB8me8sJyij2O0tTQEg5t+8bsIUx8="
version = "v0.21.0"
hash = "sha256-QaMwddBRnoS2mv9Y86eVC2x2wx/GZ7kr2zAJvwDeCPc="
[mod."golang.org/x/time"]
version = "v0.7.0"
hash = "sha256-o1ol/hTpfrc06KUXSepAgm4QUuWmH1S+vqg6kmFad64="
version = "v0.9.0"
hash = "sha256-ipaWVIk1+DZg0rfCzBSkz/Y6DEnB7xkX2RRYycHkhC0="
[mod."google.golang.org/api"]
version = "v0.205.0"
hash = "sha256-IoKjeItw89bhoEDQl52nOa9VC6/r1UtyeqKx1VOACXI="
version = "v0.220.0"
hash = "sha256-E59ap7v+RBpokxGmmZXw4gSpYP4KwjtzQdcI6fmm48o="
[mod."google.golang.org/genproto/googleapis/api"]
version = "v0.0.0-20241021214115-324edc3d5d38"
hash = "sha256-ASsqfJU1DA57PLRoitSkdlS/p10EEuzl0YuZTdbmMCw="
version = "v0.0.0-20241209162323-e6fa225c2576"
hash = "sha256-BE1xUsMUOYJLYuPm0AZtU5VxMTYgw0wyk7c+sDRsl+Q="
[mod."google.golang.org/genproto/googleapis/rpc"]
version = "v0.0.0-20241104194629-dd2ea8efbc28"
hash = "sha256-Fk+cG5bRI3BvnqhWzvMzbU36cC7PM+o2oAOJmvVx9M0="
version = "v0.0.0-20250127172529-29210b9bc287"
hash = "sha256-qtyowIn84hBTrwH3R0m80udJ7OOo6wgAZbkRHcmNzrY="
[mod."google.golang.org/grpc"]
version = "v1.68.0"
hash = "sha256-HeaHAeeuyGdCOg0hPF7+Q8XD9Ek9F45O4Hxl3rvc5Q8="
version = "v1.70.0"
hash = "sha256-7SCJx6Y35O/0P3cFtELDXrOSOb+HshxaTQYdzv2gVmg="
[mod."google.golang.org/protobuf"]
version = "v1.35.1"
hash = "sha256-4NtUQoBvlPGFGjo7c+E1EBS/sb8oy50MGy45KGWPpWo="
version = "v1.36.4"
hash = "sha256-+5wKklNoydwfYpx4BVhKjLNhqlisN9ddxhyGb8uP6a8="
[mod."gopkg.in/warnings.v0"]
version = "v0.1.2"
hash = "sha256-ATVL9yEmgYbkJ1DkltDGRn/auGAjqGOfjQyBYyUo8s8="

View File

@@ -0,0 +1,56 @@
# IDENTITY and PURPOSE
You are tasked with interpreting and responding to cybersecurity-related prompts by synthesizing information from a diverse panel of experts in the field. Your role involves extracting commands and specific command-line arguments from provided materials, as well as incorporating the perspectives of technical specialists, policy and compliance experts, management professionals, and interdisciplinary researchers. You will ensure that your responses are balanced, and provide actionable command line input. You should aim to clarify complex commands for non-experts. Provide commands as if a pentester or hacker will need to reuse the commands.
Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
# STEPS
- Extract commands related to cybersecurity from the given paper or video.
- Add specific command line arguments and additional details related to the tool use and application.
- Use a template that incorporates a diverse panel of cybersecurity experts for analysis.
- Reference recent research and reports from reputable sources.
- Use a specific format for citations.
- Maintain a professional tone while making complex topics accessible.
- Offer to clarify any technical terms or concepts that may be unfamiliar to non-experts.
# OUTPUT INSTRUCTIONS
- The only output format is Markdown.
- Ensure you follow ALL these instructions when creating your output.
## EXAMPLE
- Reconnaissance and Scanning Tools:
Nmap: Utilized for scanning and writing custom scripts via the Nmap Scripting Engine (NSE).
Commands:
nmap -p 1-65535 -T4 -A -v <Target IP>: A full scan of all ports with service detection, OS detection, script scanning, and traceroute.
nmap --script <NSE Script Name> <Target IP>: Executes a specific Nmap Scripting Engine script against the target.
- Exploits and Vulnerabilities:
CVE Exploits: Example usage of scripts to exploit known CVEs.
Commands:
CVE-2020-1472:
Exploited using a Python script or Metasploit module that exploits the Zerologon vulnerability.
CVE-2021-26084:
python confluence_exploit.py -u <Target URL> -c <Command>: Uses a Python script to exploit the Atlassian Confluence vulnerability.
- BloodHound: Used for Active Directory (AD) reconnaissance.
Commands:
SharpHound.exe -c All: Collects data from the AD environment to find attack paths.
CrackMapExec: Used for post-exploitation automation.
Commands:
cme smb <Target IP> -u <User> -p <Password> --exec-method smbexec --command <Command>: Executes a command on a remote system using the SMB protocol.
# INPUT
INPUT:

View File

@@ -1,6 +1,6 @@
# Uncle Duke
## IDENTITY
You go by the name Duke, or Uncle Duke. You are an advanced AI system that coordinates multiple teams of AI agents that answer questions about software development using the Java programing language, especially with the Spring Framework and Maven. You are also well versed in front-end technologies like HTML, CSS, and the various Javascript packages. You understand, implement, and promote software development best practices such as SOLID, DRY, Test Driven Development, and Clean coding.
You go by the name Duke, or Uncle Duke. You are an advanced AI system that coordinates multiple teams of AI agents that answer questions about software development using the Java programming language, especially with the Spring Framework and Maven. You are also well versed in front-end technologies like HTML, CSS, and the various Javascript packages. You understand, implement, and promote software development best practices such as SOLID, DRY, Test Driven Development, and Clean coding.
Your interlocutors are senior software developers and architects. However, if you are asked to simplify some output, you will patiently explain it in detail as if you were teaching a beginner. You tailor your responses to the tone of the questioner, if it is clear that the question is not related to software development, feel free to ignore the rest of these instructions and allow yourself to be playful without being offensive. Though you are not an expert in other areas, you should feel free to answer general knowledge questions making sure to clarify that these are not your expertise.

View File

@@ -0,0 +1,76 @@
# Identity and Purpose
You are an expert in software, cloud, and cybersecurity architecture. You specialize in creating clear, well-structured Level of Effort (LOE) documents for estimating work effort, resources, and costs associated with a given task or project.
# Goal
Given a description of a task or system, provide a detailed Level of Effort (LOE) document covering scope, business impact, resource requirements, estimated effort, risks, dependencies, and assumptions.
# Steps
1. Analyze the input task thoroughly to ensure full comprehension.
2. Map out all key components of the task, considering requirements, dependencies, risks, and effort estimation factors.
3. Consider business priorities and risk appetite based on the nature of the organization.
4. Break the LOE document into structured sections for clarity and completeness.
---
# Level of Effort (LOE) Document Structure
## Section 1: Task Overview
- Provide a high-level summary of the task, project, or initiative being estimated.
- Define objectives and expected outcomes.
- Identify key stakeholders and beneficiaries.
## Section 2: Business Impact
- Define the business problem this task is addressing.
- List the expected benefits and value to the organization.
- Highlight any business risks or regulatory considerations.
## Section 3: Scope & Deliverables
- Outline in-scope and out-of-scope work.
- Break down major deliverables and milestones.
- Specify acceptance criteria for successful completion.
## Section 4: Resource Requirements
- Identify required skill sets and roles (e.g., software engineers, security analysts, cloud architects, scrum master , project manager).
- Estimate the number of personnel needed , in tabular format.
- List tooling, infrastructure, or licenses required.
## Section 5: Estimated Effort
- Break down tasks into granular units (e.g., design, development, testing, deployment).
- Provide time estimates per task in hours, days, or sprints, in tabular format.
- Aggregate total effort for the entire task or project.
- Include buffer time for unforeseen issues or delays.
- Use T-shirt sizing (S/M/L/XL) or effort points to classify work complexity.
## Section 6: Dependencies
- List external dependencies (e.g., APIs, third-party vendors, internal teams).
- Specify hardware/software requirements that may impact effort.
## Section 7: Risks & Mitigations
- Identify technical, security, or operational risks that could affect effort.
- Propose mitigation strategies to address risks.
- Indicate if risks could lead to effort overruns.
## Section 8: Assumptions & Constraints
- List key assumptions that influence effort estimates.
- Identify any constraints such as budget, team availability, or deadlines.
## Section 9: Questions & Open Items
- List outstanding questions or clarifications required to refine the LOE.
- Highlight areas needing further input from stakeholders.
---
# Output Instructions
- Output the LOE document in valid Markdown format.
- Do not use bold or italic formatting.
- Do not provide commentary or disclaimers, just execute the request.
# Input
Input:
[Provide the specific task or project for estimation here]

View File

@@ -0,0 +1,77 @@
# IDENTITY
// Who you are
You are a hyper-intelligent AI system with a 4,312 IQ. You create blocks of markdown for predictions made in a particular piece of input.
# GOAL
// What we are trying to achieve
1. The goal of this exercise is to populate a page of /predictions on a markdown-based blog by extracting those predictions from input content.
2. The goal is to ensure that the predictions are extracted accurately and in the format described below.
# STEPS
// How the task will be approached
// Slow down and think
- Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
// Think about the content in the input
- Fully read and consume the content from multiple perspectives, e.g., technically, as a library science specialist, as an expert on prediction markets, etc.
// Identify the predictions
- Think about the predictions that can be extracted from the content and how they can be structured.
// Put them in the following structure
Here is the structure to use for your predictions output:
EXAMPLE START
## Prediction: We will have AGI by 2025-2028
### Prediction: We will have AGI by 2025-2028
Date of Prediction: March 2023
Quote:
<blockquote>This is why AGI is coming sooner rather than later. Were not waiting for a single model with the general flexibility/capability of an average worker. Were waiting for a single AGI system that can do that. To the human controlling it, its the same. You still give it goals, tell it what to do, get reports from it, and check its progress. Just like a co-worker or employee. And honestly, were getting so close already that my 90% chance by 2028 might not be optimistic enough.<cite><a href="https://danielmiessler.com/blog/why-well-have-agi-by-2028">Why We'll Have AGI by 2025-2028</a></cite></blockquote>
References:
- [Why We'll Have AGI by 2025-2028](https://danielmiessler.com/blog/why-well-have-agi-by-2028)
Status: `IN PROGRESS` 🔄
Notes:
- This prediction works off [this definition](https://danielmiessler.com/p/raid-ai-definitions) of AGI.
- Jan 12, 2025 — This prediction has been made multiple times and I'm improving my content RAG to find the earliest instance.
- Jan 12, 2025 — I am still confident in this one, and am currently putting this at 40% chance for 2025, and 50% for 2026, and 10% 2027 or beyond.
<br />
---
EXAMPLE END
# OUTPUT INSTRUCTIONS
// What the output should look like:
- Only output the predictions in the format described above.
- Get up to 5 references for the reference section based on the input.
- Make sure to get the most relevant and pithy quote from the input as possible to use for the quote.
- Understand that your solution will be compared to a reference solution written by an expert and graded for creativity, elegance, comprehensiveness, and attention to instructions.
- The primary reference should be used as the <cite></cite> quote, and that should also be used as the first reference mentioned in the reference section.
# INPUT
INPUT:

View File

@@ -10,7 +10,7 @@ Take a step back and think step-by-step about how to achieve the best possible r
- Extract a list of all exploited vulnerabilities. Include the assigned CVE if they are mentioned and the class of vulnerability into a section called VULNERABILITIES.
- Extract a timeline of the attacks demonstrated. Structure it in a chronological list with the steps as sub-lists. Include details such as used tools, file paths, URLs, verion information etc. The section is called TIMELINE.
- Extract a timeline of the attacks demonstrated. Structure it in a chronological list with the steps as sub-lists. Include details such as used tools, file paths, URLs, version information etc. The section is called TIMELINE.
- Extract all mentions of tools, websites, articles, books, reference materials and other sources of information mentioned by the speakers into a section called REFERENCES. This should include any and all references to something that the speaker mentioned.

View File

@@ -0,0 +1,19 @@
# IDENTITY and PURPOSE
You extract domains and URLs from input like articles and newsletters for the purpose of understanding the sources that were used for their content.
# STEPS
- For every story that was mentioned in the article, story, blog, newsletter, output the source it came from.
- The source should be the central source, not the exact URL necessarily, since the purpose is to find new sources to follow.
- As such, if it's a person, link their profile that was in the input. If it's a Github project, link the person or company's Github, If it's a company blog, output link the base blog URL. If it's a paper, link the publication site. Etc.
- Only output each source once.
- Only output the source, nothing else, one per line
# INPUT
INPUT:

View File

@@ -7,7 +7,7 @@ Take a step back and think step-by-step about how to achieve the best possible r
# STEPS
- Extract a short description of the meal. It should be at most three sentences. Include - if the source material specifies it - how hard it is to prepare this meal, the level of spicyness and how long it shoudl take to make the meal.
- Extract a short description of the meal. It should be at most three sentences. Include - if the source material specifies it - how hard it is to prepare this meal, the level of spicyness and how long it should take to make the meal.
- List the INGREDIENTS. Include the measurements.

View File

@@ -11,7 +11,7 @@ We tried it out on a long and tricky example: a story about "why dogs spin befor
* GPTZero: 87% AI
* Writer.com: 15% AI
Other example give 0% score, so it reall depends on the input text, which AI and wich scanner you use.
Other example give 0% score, so it reall depends on the input text, which AI and which scanner you use.
Like any Fabric pattern, use the power of piping from other patterns or even from **Humanize** itself. We used Gemini for this test, but it might work differently with other models. So play around and see what you find... and yes, this text have been Humanized (and revised) 😉

View File

@@ -0,0 +1,209 @@
Brief one-line summary from AI analysis of what each pattern does.
- Key pattern to use: **suggest_pattern**, suggests appropriate fabric patterns or commands based on user input.**
1. **agility_story**: Generate a user story and acceptance criteria in JSON format based on the given topic.
2. **ai**: Interpret questions deeply and provide concise, insightful answers in Markdown bullet points.
3. **analyse_answers**: Evaluate quiz answers for correctness based on learning objectives and generated quiz questions.
4. **analyse_candidates**: Compare and contrast two political candidates based on key issues and policies.
5. **analyse_cfp_submission**: Review and evaluate conference speaking session submissions based on clarity, relevance, depth, and engagement potential.
6. **analyse_claims**: Analyse and rate truth claims with evidence, counter-arguments, fallacies, and final recommendations.
7. **analyse_comments**: Evaluate internet comments for content, categorize sentiment, and identify reasons for praise, criticism, and neutrality.
8. **analyse_debate**: Rate debates on insight, emotionality, and present an unbiased, thorough analysis of arguments, agreements, and disagreements.
9. **analyse_email_headers**: Provide cybersecurity analysis and actionable insights on SPF, DKIM, DMARC, and ARC email header results.
10. **analyse_incident**: Efficiently extract and organize key details from cybersecurity breach articles, focusing on attack type, vulnerable components, attacker and target info, incident details, and remediation steps.
11. **analyse_interviewer_techniques**: This exercise involves analyzing interviewer techniques, identifying their unique qualities, and succinctly articulating what makes them stand out in a clear, simple format.
12. **analyse_logs**: Analyse server log files to identify patterns, anomalies, and issues, providing data-driven insights and recommendations for improving server reliability and performance.
13. **analyse_malware**: Analyse malware details, extract key indicators, techniques, and potential detection strategies, and summarize findings concisely for a malware analyst's use in identifying and responding to threats.
14. **analyse_military_strategy**: Analyse a historical battle, offering in-depth insights into strategic decisions, strengths, weaknesses, tactical approaches, logistical factors, pivotal moments, and consequences for a comprehensive military evaluation.
15. **analyse_mistakes**: Analyse past mistakes in thinking patterns, map them to current beliefs, and offer recommendations to improve accuracy in predictions.
16. **analyse_paper**: Analyses research papers by summarizing findings, evaluating rigor, and assessing quality to provide insights for documentation and review.
17. **analyse_patent**: Analyse a patent's field, problem, solution, novelty, inventive step, and advantages in detail while summarizing and extracting keywords.
18. **analyze_personality**: Performs a deep psychological analysis of a person in the input, focusing on their behavior, language, and psychological traits.
19. **analyze_presentation**: Reviews and critiques presentations by analyzing the content, speaker's underlying goals, self-focus, and entertainment value.
20. **analyze_product_feedback**: A prompt for analyzing and organizing user feedback by identifying themes, consolidating similar comments, and prioritizing them based on usefulness.
21. **analyze_proposition**: Analyzes a ballot proposition by identifying its purpose, impact, arguments for and against, and relevant background information.
22. **analyze_prose**: Evaluates writing for novelty, clarity, and prose, providing ratings, improvement recommendations, and an overall score.
23. **analyze_prose_json**: Evaluates writing for novelty, clarity, prose, and provides ratings, explanations, improvement suggestions, and an overall score in a JSON format.
24. **analyze_prose_pinker**: Evaluates prose based on Steven Pinker's The Sense of Style, analyzing writing style, clarity, and bad writing elements.
25. **analyze_risk**: Conducts a risk assessment of a third-party vendor, assigning a risk score and suggesting security controls based on analysis of provided documents and vendor website.
26. **analyze_sales_call**: Rates sales call performance across multiple dimensions, providing scores and actionable feedback based on transcript analysis.
27. **analyze_spiritual_text**: Compares and contrasts spiritual texts by analyzing claims and differences with the King James Bible.
28. **analyze_tech_impact**: Analyzes the societal impact, ethical considerations, and sustainability of technology projects, evaluating their outcomes and benefits.
29. **analyze_threat_report**: Extracts surprising insights, trends, statistics, quotes, references, and recommendations from cybersecurity threat reports, summarizing key findings and providing actionable information.
30. **analyse_threat_report_cmds**: Extract and synthesize actionable cybersecurity commands from provided materials, incorporating command-line arguments and expert insights for pentesters and non-experts.
31. **analyse_threat_report_trends**: Extract up to 50 surprising, insightful, and interesting trends from a cybersecurity threat report in markdown format.
32. **answer_interview_question**: Generates concise, tailored responses to technical interview questions, incorporating alternative approaches and evidence to demonstrate the candidate's expertise and experience.
33. **ask_secure_by_design_questions**: Generates a set of security-focused questions to ensure a project is built securely by design, covering key components and considerations.
34. **ask_uncle_duke**: Coordinates a team of AI agents to research and produce multiple software development solutions based on provided specifications, and conducts detailed code reviews to ensure adherence to best practices.
35. **capture_thinkers_work**: Analyze philosophers or philosophies and provide detailed summaries about their teachings, background, works, advice, and related concepts in a structured template.
36. **check_agreement**: Analyze contracts and agreements to identify important stipulations, issues, and potential gotchas, then summarize them in Markdown.
37. **clean_text**: Fix broken or malformatted text by correcting line breaks, punctuation, capitalization, and paragraphs without altering content or spelling.
38. **coding_master**: Explain a coding concept to a beginner, providing examples, and formatting code in markdown with specific output sections like ideas, recommendations, facts, and insights.
39. **compare_and_contrast**: Compare and contrast a list of items in a markdown table, with items on the left and topics on top.
40. **convert_to_markdown**: Convert content to clean, complete Markdown format, preserving all original structure, formatting, links, and code blocks without alterations.
41. **create_5_sentence_summary**: Create concise summaries or answers to input at 5 different levels of depth, from 5 words to 1 word.
42. **create_academic_paper**: Generate a high-quality academic paper in LaTeX format with clear concepts, structured content, and a professional layout.
43. **create_ai_jobs_analysis**: Analyze job categories' susceptibility to automation, identify resilient roles, and provide strategies for personal adaptation to AI-driven changes in the workforce.
44. **create_aphorisms**: Find and generate a list of brief, witty statements.
45. **create_art_prompt**: Generates a detailed, compelling visual description of a concept, including stylistic references and direct AI instructions for creating art.
46. **create_better_frame**: Identifies and analyzes different frames of interpreting reality, emphasizing the power of positive, productive lenses in shaping outcomes.
47. **create_coding_project**: Generate wireframes and starter code for any coding ideas that you have.
48. **create_command**: Helps determine the correct parameters and switches for penetration testing tools based on a brief description of the objective.
49. create_cyber_summary: Summarizes cybersecurity threats, vulnerabilities, incidents, and malware with a 25-word summary and categorized bullet points, after thoroughly analyzing and mapping the provided input.
50. **create_design_document**: Creates a detailed design document for a system using the C4 model, addressing business and security postures, and including a system context diagram.
51. **create_diy**: Creates structured "Do It Yourself" tutorial patterns by analyzing prompts, organizing requirements, and providing step-by-step instructions in Markdown format.
52. **create_formal_email**: Crafts professional, clear, and respectful emails by analyzing context, tone, and purpose, ensuring proper structure and formatting.
53. **create_git_diff_commit**: Generates Git commands and commit messages for reflecting changes in a repository, using conventional commits and providing concise shell commands for updates.
54. **create_graph_from_input**: Generates a CSV file with progress-over-time data for a security program, focusing on relevant metrics and KPIs.
55. **create_hormozi_offer**: Creates a customized business offer based on principles from Alex Hormozi's book, "$100M Offers."
56. **create_idea_compass**: Organizes and structures ideas by exploring their definition, evidence, sources, and related themes or consequences.
57. **create_investigation_visualization**: Creates detailed Graphviz visualizations of complex input, highlighting key aspects and providing clear, well-annotated diagrams for investigative analysis and conclusions.
58. **create_keynote**: Creates TED-style keynote presentations with a clear narrative, structured slides, and speaker notes, emphasizing impactful takeaways and cohesive flow.
59. **create_logo**: Creates simple, minimalist company logos without text, generating AI prompts for vector graphic logos based on input.
60. **create_markmap_visualization**: Transforms complex ideas into clear visualizations using MarkMap syntax, simplifying concepts into diagrams with relationships, boxes, arrows, and labels.
61. **create_mermaid_visualization**: Creates detailed, standalone visualizations of concepts using Mermaid (Markdown) syntax, ensuring clarity and coherence in diagrams.
62. **create_mermaid_visualization_for_github**: Creates standalone, detailed visualizations using Mermaid (Markdown) syntax to effectively explain complex concepts, ensuring clarity and precision.
63. **create_micro_summary**: Summarizes content into a concise, 20-word summary with main points and takeaways, formatted in Markdown.
64. **create_network_threat_landscape**: Analyzes open ports and services from a network scan and generates a comprehensive, insightful, and detailed security threat report in Markdown.
65. **create_newsletter_entry**: Condenses provided article text into a concise, objective, newsletter-style summary with a title in the style of Frontend Weekly.
66. **create_npc**: Generates a detailed D&D 5E NPC, including background, flaws, stats, appearance, personality, goals, and more in Markdown format.
67. **create_pattern**: Extracts, organizes, and formats LLM/AI prompts into structured sections, detailing the AIs role, instructions, output format, and any provided examples for clarity and accuracy.
68. **create_prd**: Creates a precise Product Requirements Document (PRD) in Markdown based on input.
69. **create_prediction_block**: Extracts and formats predictions from input into a structured Markdown block for a blog post.
70. **create_quiz**: Creates a three-phase reading plan based on an author or topic to help the user become significantly knowledgeable, including core, extended, and supplementary readings.
71. **create_reading_plan**: Generates review questions based on learning objectives from the input, adapted to the specified student level, and outputs them in a clear markdown format.
72. **create_recursive_outline**: Breaks down complex tasks or projects into manageable, hierarchical components with recursive outlining for clarity and simplicity.
73. **create_report_finding**: Creates a detailed, structured security finding report in markdown, including sections on Description, Risk, Recommendations, References, One-Sentence-Summary, and Quotes.
74. **create_rpg_summary**: Summarizes an in-person RPG session with key events, combat details, player stats, and role-playing highlights in a structured format.
75. **create_security_update**: Creates concise security updates for newsletters, covering stories, threats, advisories, vulnerabilities, and a summary of key issues.
76. **create_show_intro**: Creates compelling short intros for podcasts, summarizing key topics and themes discussed in the episode.
77. **create_sigma_rules**: Extracts Tactics, Techniques, and Procedures (TTPs) from security news and converts them into Sigma detection rules for host-based detections.
78. **create_story_explanation**: Summarizes complex content in a clear, approachable story format that makes the concepts easy to understand.
79. **create_stride_threat_model**: Create a STRIDE-based threat model for a system design, identifying assets, trust boundaries, data flows, and prioritizing threats with mitigations.
80. **create_summary**: Summarizes content into a 20-word sentence, 10 main points (16 words max), and 5 key takeaways in Markdown format.
81. **create_tags**: Identifies at least 5 tags from text content for mind mapping tools, including authors and existing tags if present.
82. **create_threat_scenarios**: Identifies likely attack methods for any system by providing a narrative-based threat model, balancing risk and opportunity.
83. **create_ttrc_graph**: Creates a CSV file showing the progress of Time to Remediate Critical Vulnerabilities over time using given data.
84. **create_ttrc_narrative**: Creates a persuasive narrative highlighting progress in reducing the Time to Remediate Critical Vulnerabilities metric over time.
85. **create_upgrade_pack**: Extracts world model and task algorithm updates from content, providing beliefs about how the world works and task performance.
86. **create_user_story**: Writes concise and clear technical user stories for new features in complex software programs, formatted for all stakeholders.
87. **create_video_chapters**: Extracts interesting topics and timestamps from a transcript, providing concise summaries of key moments.
88. **create_visualization**: Transforms complex ideas into visualizations using intricate ASCII art, simplifying concepts where necessary.
89. **dialog_with_socrates**: Engages in deep, meaningful dialogues to explore and challenge beliefs using the Socratic method.
90. **enrich_blog_post**: Enhances Markdown blog files by applying instructions to improve structure, visuals, and readability for HTML rendering.
91. **explain_code**: Explains code, security tool output, configuration text, and answers questions based on the provided input.
92. **explain_docs**: Improves and restructures tool documentation into clear, concise instructions, including overviews, usage, use cases, and key features.
93. **explain_math**: Helps you understand mathematical concepts in a clear and engaging way.
94. **explain_project**: Summarizes project documentation into clear, concise sections covering the project, problem, solution, installation, usage, and examples.
95. **explain_terms**: Produces a glossary of advanced terms from content, providing a definition, analogy, and explanation of why each term matters.
96. **export_data_as_csv**: Extracts and outputs all data structures from the input in properly formatted CSV data.
97. **extract_algorithm_update_recommendations**: Extracts concise, practical algorithm update recommendations from the input and outputs them in a bulleted list.
98. **extract_article_wisdom**: Extracts surprising, insightful, and interesting information from content, categorizing it into sections like summary, ideas, quotes, facts, references, and recommendations.
99. **extract_book_ideas**: Extracts and outputs 50 to 100 of the most surprising, insightful, and interesting ideas from a book's content.
100. **extract_book_recommendations**: Extracts and outputs 50 to 100 practical, actionable recommendations from a book's content.
101. **extract_business_ideas**: Extracts top business ideas from content and elaborates on the best 10 with unique differentiators.
102. **extract_controversial_ideas**: Extracts and outputs controversial statements and supporting quotes from the input in a structured Markdown list.
103. **extract_core_message**: Extracts and outputs a clear, concise sentence that articulates the core message of a given text or body of work.
104. **extract_ctf_writeup**: Extracts a short writeup from a warstory-like text about a cyber security engagement.
105. **extract_extraordinary_claims**: Extracts and outputs a list of extraordinary claims from conversations, focusing on scientifically disputed or false statements.
106. **extract_ideas**: Extracts and outputs all the key ideas from input, presented as 15-word bullet points in Markdown.
107. **extract_insights**: Extracts and outputs the most powerful and insightful ideas from text, formatted as 16-word bullet points in the INSIGHTS section, also IDEAS section.
108. **extract_insights_dm**: Extracts and outputs all valuable insights and a concise summary of the content, including key points and topics discussed.
109. **extract_instructions**: Extracts clear, actionable step-by-step instructions and main objectives from instructional video transcripts, organizing them into a concise list.
110. **extract_jokes**: Extracts jokes from text content, presenting each joke with its punchline in separate bullet points.
111. **extract_latest_video**: Extracts the latest video URL from a YouTube RSS feed and outputs the URL only.
112. **extract_main_idea**: Extracts the main idea and key recommendation from the input, summarizing them in 15-word sentences.
113. **extract_most_redeeming_thing**: Extracts the most redeeming aspect from an input, summarizing it in a single 15-word sentence.
114. **extract_patterns**: Extracts and analyzes recurring, surprising, and insightful patterns from input, providing detailed analysis and advice for builders.
115. **extract_poc**: Extracts proof of concept URLs and validation methods from security reports, providing the URL and command to run.
116. **extract_predictions**: Extracts predictions from input, including specific details such as date, confidence level, and verification method.
117. **extract_primary_problem**: Extracts the primary problem with the world as presented in a given text or body of work.
118. **extract_primary_solution**: Extracts the primary solution for the world as presented in a given text or body of work.
119. **extract_product_features**: Extracts and outputs a list of product features from the provided input in a bulleted format.
120. **extract_questions**: Extracts and outputs all questions asked by the interviewer in a conversation or interview.
121. **extract_recipe**: Extracts and outputs a recipe with a short meal description, ingredients with measurements, and preparation steps.
122. **extract_recommendations**: Extracts and outputs concise, practical recommendations from a given piece of content in a bulleted list.
123. **extract_references**: Extracts and outputs a bulleted list of references to art, stories, books, literature, and other sources from content.
124. **extract_skills**: Extracts and classifies skills from a job description into a table, separating each skill and classifying it as either hard or soft.
125. **extract_song_meaning**: Analyzes a song to provide a summary of its meaning, supported by detailed evidence from lyrics, artist commentary, and fan analysis.
126. **extract_sponsors** Extracts and lists official sponsors and potential sponsors from a provided transcript.
127. **extract_videoid**: Extracts and outputs the video ID from any given URL.
128. **extract_wisdom**: Extracts surprising, insightful, and interesting information from text on topics like human flourishing, AI, learning, and more.
129. **extract_wisdom_agents**: Extracts valuable insights, ideas, quotes, and references from content, emphasizing topics like human flourishing, AI, learning, and technology.
130. **extract_wisdom_dm**: Extracts all valuable, insightful, and thought-provoking information from content, focusing on topics like human flourishing, AI, learning, and technology.
131. **extract_wisdom_nometa**: Extracts insights, ideas, quotes, habits, facts, references, and recommendations from content, focusing on human flourishing, AI, technology, and related topics.
132. **find_hidden_message**: Extracts overt and hidden political messages, justifications, audience actions, and a cynical analysis from content.
133. **find_logical_fallacies**: Identifies and analyzes fallacies in arguments, classifying them as formal or informal with detailed reasoning.
134. **get_wow_per_minute**: Determines the wow-factor of content per minute based on surprise, novelty, insight, value, and wisdom, measuring how rewarding the content is for the viewer.
135. **get_youtube_rss**: Returns the RSS URL for a given YouTube channel based on the channel ID or URL.
136. **humanize**: Rewrites AI-generated text to sound natural, conversational, and easy to understand, maintaining clarity and simplicity.
137. **identify_dsrp_distinctions**: Encourages creative, systems-based thinking by exploring distinctions, boundaries, and their implications, drawing on insights from prominent systems thinkers.
138. **identify_dsrp_perspectives**: Explores the concept of distinctions in systems thinking, focusing on how boundaries define ideas, influence understanding, and reveal or obscure insights.
139. **identify_dsrp_relationships**: Encourages exploration of connections, distinctions, and boundaries between ideas, inspired by systems thinkers to reveal new insights and patterns in complex systems.
140. **identify_dsrp_systems**: Encourages organizing ideas into systems of parts and wholes, inspired by systems thinkers to explore relationships and how changes in organization impact meaning and understanding.
141. **identify_job_stories**: Identifies key job stories or requirements for roles.
142. **improve_academic_writing**: Refines text into clear, concise academic language while improving grammar, coherence, and clarity, with a list of changes.
143. **improve_prompt**: Improves an LLM/AI prompt by applying expert prompt writing strategies for better results and clarity.
144. **improve_report_finding**: Improves a penetration test security finding by providing detailed descriptions, risks, recommendations, references, quotes, and a concise summary in markdown format.
145. **improve_writing**: Refines text by correcting grammar, enhancing style, improving clarity, and maintaining the original meaning. skills.
146. **judge_output**: Evaluates Honeycomb queries by judging their effectiveness, providing critiques and outcomes based on language nuances and analytics relevance.
147. **label_and_rate**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
148. **md_callout**: Classifies content and generates a markdown callout based on the provided text, selecting the most appropriate type.
149. **official_pattern_template**: Template to use if you want to create new fabric patterns.
150. **prepare_7s_strategy**: Prepares a comprehensive briefing document from 7S's strategy capturing organizational profile, strategic elements, and market dynamics with clear, concise, and organized content.
151. **provide_guidance**: Provides psychological and life coaching advice, including analysis, recommendations, and potential diagnoses, with a compassionate and honest tone.
152. **rate_ai_response**: Rates the quality of AI responses by comparing them to top human expert performance, assigning a letter grade, reasoning, and providing a 1-100 score based on the evaluation.
153. **rate_ai_result**: Assesses the quality of AI/ML/LLM work by deeply analyzing content, instructions, and output, then rates performance based on multiple dimensions, including coverage, creativity, and interdisciplinary thinking.
154. **rate_content**: Labels content with up to 20 single-word tags and rates it based on idea count and relevance to human meaning, AI, and other related themes, assigning a tier (S, A, B, C, D) and a quality score.
155. **rate_value**: Produces the best possible output by deeply analyzing and understanding the input and its intended purpose.
156. **raw_query**: Fully digests and contemplates the input to produce the best possible result based on understanding the sender's intent.
157. **raycast**: Some scripts for Raycast, but think u need pro Raycast AI to use it
158. **recommend_artists**: Recommends a personalized festival schedule with artists aligned to your favorite styles and interests, including rationale.
159. **recommend_pipeline_upgrades**: Optimizes vulnerability-checking pipelines by incorporating new information and improving their efficiency, with detailed explanations of changes.
160. **recommend_talkpanel_topics**: Produces a clean set of proposed talks or panel talking points for a person based on their interests and goals, formatted for submission to a conference organizer.
161. **refine_design_document**: Refines a design document based on a design review by analyzing, mapping concepts, and implementing changes using valid Markdown.
162. **review_design**: Reviews and analyzes architecture design, focusing on clarity, component design, system integrations, security, performance, scalability, and data management.
163. **sanitize_broken_html_to_markdown**: Converts messy HTML into clean, properly formatted Markdown, applying custom styling and ensuring compatibility with Vite.
164. **show_fabric_options_markmap**: Visualizes the functionality of the Fabric framework by representing its components, commands, and features based on the provided input.
165. **solve_with_cot**: Provides detailed, step-by-step responses with chain of thought reasoning, using structured thinking, reflection, and output sections.
166. **suggest_pattern**: Suggests appropriate fabric patterns or commands based on user input, providing clear explanations and options for users.
167. **summarize**: Summarizes content into a 20-word sentence, main points, and takeaways, formatted with numbered lists in Markdown.
168. **summarize_debate**: Summarizes debates, identifies primary disagreement, extracts arguments, and provides analysis of evidence and argument strength to predict outcomes.
169. **summarize_git_changes**: Summarizes recent project updates from the last 7 days, focusing on key changes with enthusiasm.
170. **summarize_git_diff**: Summarizes and organizes Git diff changes with clear, succinct commit messages and bullet points.
171. **summarize_lecture**: Extracts relevant topics, definitions, and tools from lecture transcripts, providing structured summaries with timestamps and key takeaways.
172. **summarize_legislation**: Summarizes complex political proposals and legislation by analyzing key points, proposed changes, and providing balanced, positive, and cynical characterizations.
173. **summarize_meeting**: Analyzes meeting transcripts to extract a structured summary, including an overview, key points, tasks, decisions, challenges, timeline, references, and next steps.
174. **summarize_micro**: Summarizes content into a 20-word sentence, 3 main points, and 3 takeaways, formatted in clear, concise Markdown.
175. **summarize_newsletter**: Extracts the most meaningful, interesting, and useful content from a newsletter, summarizing key sections such as content, opinions, tools, companies, and follow-up items in clear, structured Markdown.
176. **summarize_paper**: Summarizes an academic paper by detailing its title, authors, technical approach, distinctive features, experimental setup, results, advantages, limitations, and conclusion in a clear, structured format using human-readable Markdown.
177. **summarize_prompt**: Summarizes AI chat prompts by describing the primary function, unique approach, and expected output in a concise paragraph. The summary is focused on the prompt's purpose without unnecessary details or formatting.
178. **summarize_pull-requests**: Summarizes pull requests for a coding project by providing a summary and listing the top PRs with human-readable descriptions.
179. **summarize_rpg_session**: Summarizes a role-playing game session by extracting key events, combat stats, character changes, quotes, and more.
180. **t_analyse_challenge_handling**: Provides 8-16 word bullet points evaluating how well challenges are being addressed, calling out any lack of effort.
181. **t_check_metrics**: Analyzes deep context from the TELOS file and input instruction, then provides a wisdom-based output while considering metrics and KPIs to assess recent improvements.
182. **t_create_h3_career**: Summarizes context and produces wisdom-based output by deeply analyzing both the TELOS File and the input instruction, considering the relationship between the two.
183. **t_create_opening_sentences**: Describes from TELOS file the persons identity, goals, and actions in 4 concise, 32-word bullet points, humbly.
184. **t_describe_life_outlook**: Describes from TELOS file a person's life outlook in 5 concise, 16-word bullet points.
185. **t_extract_intro_sentences**: Summarizes from TELOS file a person's identity, work, and current projects in 5 concise and grounded bullet points.
186. **t_extract_panel_topics**: Creates 5 panel ideas with titles and descriptions based on deep context from a TELOS file and input.
187. **t_find_blindspots**: Identify potential blindspots in thinking, frames, or models that may expose the individual to error or risk.
188. **t_find_negative_thinking**: Analyze a TELOS file and input to identify negative thinking in documents or journals, followed by tough love encouragement.
189. **t_find_neglected_goals**: Analyze a TELOS file and input instructions to identify goals or projects that have not been worked on recently.
190. **t_give_encouragement**: Analyze a TELOS file and input instructions to evaluate progress, provide encouragement, and offer recommendations for continued effort.
191. **t_red_team_thinking**: Analyze a TELOS file and input instructions to red-team thinking, models, and frames, then provide recommendations for improvement.
192. **t_threat_model_plans**: Analyze a TELOS file and input instructions to create threat models for a life plan and recommend improvements.
193. **t_visualize_mission_goals_projects**: Analyze a TELOS file and input instructions to create an ASCII art diagram illustrating the relationship of missions, goals, and projects.
194. **t_year_in_review**: Analyze a TELOS file to create insights about a person or entity, then summarize accomplishments and visualizations in bullet points.
195. **to_flashcards**: Create Anki flashcards from a given text, focusing on concise, optimized questions and answers without external context.
196. **transcribe_minutes**: Extracts (from meeting transcription) meeting minutes, identifying actionables, insightful ideas, decisions, challenges, and next steps in a structured format.
197. **translate**: Translates sentences or documentation into the specified language code while maintaining the original formatting and tone.
198. **tweet**: Provides a step-by-step guide on crafting engaging tweets with emojis, covering Twitter basics, account creation, features, and audience targeting.
199. **write_essay**: Writes concise, clear essays in the style of Paul Graham, focusing on simplicity, clarity, and illumination of the provided topic.
200. **write_hackerone_report**: Generates concise, clear, and reproducible bug bounty reports, detailing vulnerability impact, steps to reproduce, and exploit details for triagers.
201. **write_latex**: Generates syntactically correct LaTeX code for a new.tex document, ensuring proper formatting and compatibility with pdflatex.
202. **write_micro_essay**: Writes concise, clear, and illuminating essays on the given topic in the style of Paul Graham.
203. **write_nuclei_template_rule**: Generates Nuclei YAML templates for detecting vulnerabilities using HTTP requests, matchers, extractors, and dynamic data extraction.
204. **write_pull-request**: Drafts detailed pull request descriptions, explaining changes, providing reasoning, and identifying potential bugs from the git diff command output.
205. **write_semgrep_rule**: Creates accurate and working Semgrep rules based on input, following syntax guidelines and specific language considerations.

View File

@@ -0,0 +1,49 @@
# IDENTITY
// Who you are
You are a hyper-intelligent AI system with a 4,312 IQ. You convert jacked up HTML to proper markdown using a set of rules.
# GOAL
// What we are trying to achieve
1. The goal of this exercise is to convert the input HTML, which is completely nasty and hard to edit, into a clean markdown format that has some custom styling applied according to my rules.
2. The ultimate goal is to output a perfectly working markdown file that will render properly using Vite using my custom markdown/styling combination.
# STEPS
// How the task will be approached
// Slow down and think
- Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
// Think about the content in the input
- Fully read and consume the HTML input that has a combination of HTML and markdown.
// Identify the parts of the content that are likely to be callouts (like narrator voice), vs. blockquotes, vs regular text, etc. Get this from the text itself.
- Look at the styling rules below and think about how to translate the input you found to the output using those rules.
# OUTPUT RULES
Our new markdown / styling uses the following tags for styling:
<callout></callous> for wrapping a callous
<blockquote><cite></cite>></blockquote> for matching a block quote (note the embedded citation in there where applicable)
# OUTPUT INSTRUCTIONS
// What the output should look like:
- The output should perfectly preserve the input, only it should look way better once rendered to HTML because it'll be following the new styling.
- The markdown should be super clean because all the trash HTML should have been removed. Note: that doesn't mean custom HTML that is supposed to work with the new theme as well, such as stuff like images in special cases.
- For definitions, use the <blockquote></blockquote> tag, and include the <cite></cite> tag for the citation if there's a reference to a source.
# INPUT
INPUT:

View File

@@ -41,365 +41,428 @@ For creating custom patterns: `fabric --pattern create_pattern`
# PATTERNS
## agility_story
Generates user stories and acceptance criteria for specified topics, focusing on Agile framework principles. This prompt specializes in translating topics into structured Agile documentation, specifically for user story and acceptance criteria creation. The expected output is a JSON-formatted document detailing the topic, user story, and acceptance criteria.
The prompt instructs to write a user story and acceptance criteria for a given topic, focusing on the Agile framework. It emphasizes understanding user stories and acceptance criteria creation. The expected output is a JSON format detailing the topic, user story, and acceptance criteria.
## ai
Summarizes and responds to questions with insightful bullet points. It involves creating a mental model of the question for deeper understanding. The output consists of 3-5 concise bullet points, each with a 10-word limit.
Provides insightful answers by deeply understanding the essence of questions. It involves creating a mental model of the question before responding. The output consists of 3-5 concise Markdown bullets, each with 10 words.
## analyze_answers
Evaluates the correctness of answers provided by learners to questions generated by a complementary quiz creation pattern. It aims to assess understanding of learning objectives and identify areas needing further study. The expected output is an analysis of the learner's answers, indicating their grasp of the subject matter.
Evaluates the correctness of answers provided by learners to questions generated by a complementary quiz creation pattern. It aims to assess understanding of learning objectives and identify areas needing further study, requiring input on the subject and learning objectives. The output indicates the accuracy of learners' answers in relation to predefined objectives.
## analyze_claims
Analyzes and rates the truth claims in input, providing evidence for and against, along with a balanced view. It separates truth claims from arguments, offering a nuanced analysis with ratings and labels for each claim. The output includes a summary, evidence, refutations, logical fallacies, ratings, labels, and an overall score and analysis.
Analyzes and rates truth claims in input, providing evidence for and against, along with a balanced view. It separates truth claims from arguments, evaluates their validity, and assigns ratings. The output includes a concise argument summary and detailed analysis of each claim.
## analyze_debate
Analyzes debate transcripts to help users understand different viewpoints and broaden their perspectives. It maps out claims, analyzes them neutrally, and rates the debate's insightfulness and emotionality. The output includes scores, participant emotionality, argument summaries with sources, and lists of agreements, disagreements, misunderstandings, learnings, and takeaways.
Analyzes debate transcripts to help users understand different viewpoints and broaden their perspectives. It maps out claims, analyzes them neutrally, and rates the debate on insightfulness and emotionality. The output includes scores, participant emotionality, argument summaries with sources, agreements, disagreements, misunderstandings, learnings, and takeaways.
## analyze_incident
Summarizes cybersecurity breach articles by extracting key information efficiently, focusing on conciseness and organization. It avoids inferential conclusions, relying solely on the article's content for details like attack date, type, and impact. The output is a structured summary with specific details about the cybersecurity incident, including attack methods, vulnerabilities, and recommendations for prevention.
Extracts and organizes critical information from cybersecurity breach articles, focusing on efficiency and clarity. It emphasizes direct data extraction without inferential conclusions, covering attack details, attacker and target profiles, incident specifics, and recommendations. The output is a structured summary with key cybersecurity incident insights.
## analyze_logs
Analyzes a server log file to identify patterns, anomalies, and potential issues, aiming to enhance the server's reliability and performance. The process involves a detailed examination of log entries, assessment of operational reliability, and identification of recurring issues. Recommendations for improvements are provided based on data-driven analysis, excluding personal opinions and irrelevant information.
Analyzes a server log file to identify patterns, anomalies, and potential issues, aiming to enhance the server's reliability and performance. It emphasizes a data-driven approach, excluding irrelevant information and personal opinions. The expected output includes insights into operational reliability, performance assessments, recurring issue identification, and specific improvement recommendations.
## analyze_malware
Analyzes malware across various platforms, focusing on extracting indicators of compromise and detailed malware behavior. This approach includes analyzing telemetry and community data to aid in malware detection and analysis. The expected output includes a summary of findings, potential indicators of compromise, Mitre Att&CK techniques, pivoting advice, detection strategies, suggested Yara rules, additional references, and technical recommendations.
The prompt instructs a malware analysis expert to methodically dissect malware, focusing on extracting comprehensive details for analysis and detection. It emphasizes a structured approach to identifying malware characteristics, behaviors, and potential indicators of compromise. The expected output includes a concise summary, detailed malware overview, indicators of compromise, Mitre Att&CK techniques, detection strategies, and recommendations for further analysis.
## analyze_paper
This service analyzes research papers to determine their main findings, scientific rigor, and quality. It uniquely maps out claims, evaluates study design, and assesses conflicts of interest. The output includes a summary, author details, findings, study quality, and a final grade with explanations.
This service analyzes research papers to determine their primary findings and assesses their scientific quality and rigor. It meticulously maps out claims, evaluates study design, sample size, and other critical aspects to gauge the paper's credibility. The output includes a summary, author details, findings, study quality assessment, and a final grade with justification.
## analyze_patent
The prompt outlines the role and responsibilities of a patent examiner, emphasizing the importance of technical and legal expertise in evaluating patents. It details the steps for examining a patent, including identifying the technology field, problem addressed, solution, advantages, novelty, and inventive step, and summarizing the core idea and keywords. The expected output involves detailed analysis and documentation in specific sections without concern for length, using bullet points for clarity.
The prompt outlines the role and responsibilities of a patent examiner, detailing the steps to evaluate a patent application. It emphasizes thorough analysis, focusing on the technology field, problem addressed, solution, advantage over existing art, novelty, and inventive step. The expected output includes detailed sections on each aspect, aiming for comprehensive evaluation without space limitations.
## analyze_personality
Performs in-depth psychological analysis on the main individual in the provided input. It involves identifying the primary person, deeply contemplating their language and responses, and comparing these to known human psychology principles. The output includes a concise psychological profile summary and detailed supporting points.
Performs in-depth psychological analysis on the main individual in the provided input, focusing on their psychological profile. It involves a detailed contemplation and comparison with human psychology to derive insights. The output includes a concise summary and supporting bullet points highlighting key psychological traits.
## analyze_presentation
Analyzes and critiques presentations, focusing on content, speaker's psychology, and the difference between stated and actual goals. It involves comparing intended messages to actual content, including self-references and entertainment attempts. The output includes scores and summaries for ideas, selflessness, and entertainment, plus an overall analysis.
Analyzes and critiques presentations, focusing on content, speaker's psychology, and the disparity between stated and actual goals. It involves a detailed breakdown of the presentation's content, the speaker's self-references, and entertainment attempts. The output includes scores and summaries for ideas, selflessness, entertainment, and an overall analysis with ASCII powerbars, followed by a concise conclusion.
## analyze_prose
Evaluates the quality of writing by assessing its novelty, clarity, and prose, and provides improvement recommendations. It uses a detailed approach to rate each aspect on a specific scale and ensures the overall rating reflects the lowest individual score. The expected output includes ratings and concise improvement tips.
Evaluates the quality of writing by assessing its novelty, clarity, and prose, and provides recommendations for improvement. It uses a detailed approach to rate each aspect and offers concise advice. The expected output includes ratings and specific suggestions for enhancing the writing.
## analyze_prose_json
Evaluates the quality of writing and content, providing ratings and recommendations for improvement based on novelty, clarity, and overall messaging. It assesses ideas for their freshness and originality, clarity of argument, and quality of prose, offering a structured approach to critique. The expected output is a JSON object summarizing these evaluations and recommendations.
Evaluates the quality of writing and content by assessing novelty, clarity, and prose, then provides ratings and recommendations for improvement. This process involves understanding the writer's intent, evaluating ideas for novelty, assessing clarity and prose quality, and offering concise improvement suggestions. The expected output is a JSON object detailing these evaluations and an overall rating based on the lowest individual score.
## analyze_prose_pinker
Evaluates prose based on Steven Pinker's writing principles, identifying its current style and recommending improvements for clarity and engagement. It involves analyzing the text's adherence to Pinker's stylistic categories and avoiding common pitfalls in writing. The output includes a detailed analysis of the prose's style, strengths, weaknesses, and specific examples of both effective and ineffective writing elements.
The prompt outlines a comprehensive process for evaluating prose based on Steven Pinker's "The Sense of Style," focusing on identifying the writing style, assessing positive and negative elements, and providing improvement recommendations. It details a structured approach to critique writing through style analysis, positive and negative assessments, examples of good and bad writing practices, spelling and grammar corrections, and specific improvement suggestions, all while employing Pinker's principles. The expected output includes detailed evaluations, examples, and scores reflecting the prose's adherence to or deviation from Pinker's guidelines.
## analyze_spiritual_text
Analyzes spiritual texts to highlight surprising claims and contrasts them with the King James Bible. This approach involves detailed comparison, providing examples from both texts to illustrate differences. The output consists of concise bullet points summarizing these findings.
Analyzes spiritual texts to highlight surprising claims and contrasts them with the King James Bible. It focuses on identifying and comparing specific tenets and claims. The output includes detailed examples from both texts to illustrate differences.
## analyze_tech_impact
Analyzes the societal impact of technology projects by breaking down their intentions, outcomes, and broader implications, including ethical considerations. It employs a structured approach, detailing the project's objectives, technologies used, target audience, outcomes, societal impact, ethical considerations, and sustainability. The expected output includes summaries, lists, and analyses across specified sections.
Analyzes the societal impact of technology projects by breaking down their intentions, outcomes, and broader implications, including ethical considerations. It employs a structured approach to evaluate the project's impact on society and its sustainability. The service outputs a comprehensive analysis, including a summary, technologies used, target audience, outcomes, societal impact, ethical considerations, sustainability, and an overall rating.
## analyze_threat_report
The prompt instructs a super-intelligent cybersecurity expert to analyze and extract key insights from cybersecurity threat reports. It emphasizes identifying new, interesting, and surprising information, and organizing these findings into concise, categorized summaries. The expected output includes a one-sentence summary, trends, statistics, quotes, references, and recommendations from the report, all formatted in plain language and without repetition.
The prompt instructs a super-intelligent cybersecurity expert to analyze and extract key insights from cybersecurity threat reports, focusing on new, interesting, and surprising information. It emphasizes creating concise, insightful summaries and lists of trends, statistics, quotes, references, and recommendations without using jargon. The expected output includes organized sections of extracted information, aiming for clarity and depth in understanding cybersecurity threats.
## analyze_threat_report_trends
Analyzes cybersecurity threat reports to identify up to 50 unique, surprising, and insightful trends. This process involves a deep, expert analysis to uncover new and interesting information. The expected output is a list of trends without repetition or formatting embellishments.
Analyzes cybersecurity threat reports to identify up to 50 unique, surprising, and insightful trends. This process involves a deep, expert-level examination of the content to uncover new and interesting findings. The output consists of a bulleted list highlighting these key trends without repetition or formatting embellishments.
## answer_interview_question
Generates tailored responses to technical interview questions, aiming for a casual yet insightful tone. The AI draws from a technical knowledge base and professional experiences to construct responses that demonstrate depth and alternative perspectives. Outputs are structured first-person responses, including context, main explanation, alternative approach, and evidence-based conclusion.
Generates tailored responses to technical interview questions, aiming for a casual yet insightful tone. The AI draws from a technical knowledge base and professional experiences to construct responses that demonstrate expertise and consider alternative approaches. Outputs are structured for verbal delivery, including context, main explanation, alternative approach, and evidence-based conclusion.
## ask_secure_by_design_questions
Generates a comprehensive set of security-focused questions tailored to the fundamental design of a specific project. This process involves deep analysis and conceptualization of the project's components and their security needs. The output includes a summary and a detailed list of security questions organized by themes.
Generates a comprehensive set of security-focused questions for ensuring a project's design is inherently secure. This process involves deep analysis and conceptualization of the project's components and their security needs. The output includes a summary and a prioritized list of security questions categorized by themes.
## capture_thinkers_work
Summarizes teachings and philosophies of notable individuals or philosophical schools, providing detailed templates on their backgrounds, ideas, and applications. It offers a structured approach to encapsulating complex thoughts into accessible summaries. The output includes encapsulations, background information, schools of thought, impactful ideas, primary teachings, works, quotes, applications, and life advice.
Summarizes teachings and philosophies of notable individuals or philosophical schools, providing detailed templates for each. It includes encapsulations, background, schools, impactful ideas, primary teachings, works, quotes, application, and life advice. The output offers a comprehensive overview of the subject's contributions and ideologies.
## check_agreement
The prompt outlines a process for analyzing contracts and agreements to identify potential issues or "gotchas." It involves summarizing the document, listing important aspects, categorizing issues by severity, and drafting responses for critical and important items. The expected output includes a concise summary, detailed callouts, categorized issues, and recommended responses in Markdown format.
Analyzes contracts and agreements to identify potential issues and summarize key points. This prompt focuses on extracting and organizing critical, important, and minor concerns for negotiation or reconsideration. The expected output includes a concise document summary, detailed callouts of significant stipulations, and structured recommendations for changes.
## clean_text
Summarizes and corrects formatting issues in text without altering the content. It focuses on removing odd line breaks to improve readability. The expected output is a clean, well-formatted version of the original text.
Summarizes and corrects formatting issues in text, focusing on removing odd line breaks and improving punctuation without altering content. This prompt emphasizes maintaining the original message while enhancing readability. The expected output is a cleaned, well-formatted version of the input text.
## coding_master
Explains coding concepts or languages to beginners, using examples from reputable sources and illustrating points with formatted code. The approach emphasizes clarity and accessibility, incorporating examples from Codeacademy and NetworkChuck. Outputs include markdown-formatted code and structured lists of ideas, recommendations, habits, facts, and insights, adhering to specific word counts.
The prompt instructs an expert coder to explain a specific coding concept or language to a beginner, using examples from reputable sources. It emphasizes teaching in an accessible manner and formatting code examples in markdown. The expected output includes structured Markdown content with specific sections for ideas, recommendations, habits, facts, and insights, each with a precise word count and quantity.
## compare_and_contrast
Compares and contrasts a list of items, focusing on their differences and similarities. The approach involves analyzing the items across various topics, organizing the findings into a markdown table. The expected output is a structured comparison in table format.
Compares and contrasts a list of items, focusing on their differences and similarities. The approach involves organizing the comparison into a markdown table format, with items on the left and topics at the top. The expected output is a structured table highlighting key comparisons.
## create_5_sentence_summary
Generates concise summaries or answers at five decreasing levels of depth. It involves deep understanding and thoughtful analysis of the input. The output is a structured list capturing the essence in 5, 4, 3, 2, and 1 word(s).
Generates concise summaries or answers at five varying depths. It involves deep understanding and thoughtful analysis of the input before producing a multi-layered summary. The output is a structured list of summaries, each with decreasing word count, capturing the essence of the input.
## create_academic_paper
Produces high-quality, authoritative Latex academic papers with clear concept explanations. It focuses on logical layout and simplicity while maintaining a professional appearance. The expected output is LateX code formatted in a two-column layout with a header and footer.
The prompt instructs on creating high-quality, authoritative academic papers in LaTeX, emphasizing clear concept explanations. It focuses on producing logically structured, visually appealing documents using a two-column layout. The expected output is LaTeX code tailored for academic publications.
## create_ai_jobs_analysis
Analyzes job reports to identify roles least and most vulnerable to automation, offering strategies for enhancing job security. It leverages historical insights to predict automation's impact on various job categories. The output includes a detailed analysis and recommendations for resilience against automation.
Analyzes job reports to identify roles at risk from automation and offers strategies for enhancing job security. It leverages historical insights to predict future trends. The output includes categorized job vulnerability levels and personalized resilience recommendations.
## create_aphorisms
Generates a list of 20 aphorisms related to the given topic(s), ensuring variety in their beginnings. It focuses on sourcing quotes from real individuals. The output includes each aphorism followed by the name of the person who said it.
Generates a list of 20 aphorisms related to the given topic(s), each attributed to its original author. It avoids starting all entries with the input keywords, ensuring variety. The output is a curated collection of wise sayings from various individuals.
## create_art_prompt
The prompt guides an expert artist in conceptualizing and instructing AI to create art that perfectly encapsulates a given concept. It emphasizes deep thought on the concept and its visual representation, aiming for compelling and interesting artwork. The expected output is a 100-word description that not only instructs the AI on what to create but also how the art should evoke feelings and suggest style through examples.
The prompt guides an expert artist and AI whisperer to conceptualize and instruct AI to create art that perfectly encapsulates a given concept. It emphasizes deep thought on the concept and its visual representation, aiming for compelling and interesting artwork. The expected output is a detailed description of the concept, visual representation, and direct instructions for the AI, including style cues for the artwork.
## create_better_frame
The essay explores the concept of framing as a way to construct and interpret reality through different lenses, emphasizing the power of perspective in shaping one's experience of the world. It highlights various dichotomies in perceptions around topics like AI, race/gender, success, personal identity, and control over life, illustrating how different frames can lead to vastly different outlooks and outcomes. The author argues for the importance of choosing positive frames to improve individual and collective realities, suggesting that changing frames can change outcomes and foster more positive social dynamics.
The essay discusses the concept of framing as a way to construct and interpret reality through specific lenses, emphasizing the power of positive framing to shape one's experience and outcomes in life. It highlights the importance of choosing frames that are positive and productive, as these can significantly influence one's perception of reality and, consequently, their actions and results. The expected output is an understanding of how different frames can lead to vastly different interpretations of the same reality and the encouragement to adopt more positive frames to improve one's life and societal dynamics.
## create_coding_project
Generates wireframes and starter code for coding projects based on user ideas. It specifically caters to transforming ideas into actionable project outlines and code skeletons, including detailed steps and file structures. The output includes project summaries, structured directories, and initial code setups.
Generates wireframes and starter code for coding projects based on user ideas. This tool takes a coding idea as input and outputs a detailed project plan, including wireframes, code structure, and setup instructions. The expected output includes project summaries, steps for development, file structure, and code for initializing the project.
## create_command
Generates specific command lines for various penetration testing tools based on a brief description of the desired outcome. This approach leverages the tool's help documentation to ensure accuracy and relevance. The expected output is a precise command that aligns with the user's objectives for the tool.
Generates specific command lines for various penetration testing tools based on a brief description of the desired outcome. This approach leverages the tool's help documentation to ensure accuracy and relevance of the generated commands. The expected output is a precise command line that can be executed to achieve the user's specified goal with the tool.
## create_cyber_summary
The prompt instructs on creating a comprehensive summary of cybersecurity threats, vulnerabilities, incidents, and malware for a technical audience. It emphasizes deep understanding through repetitive analysis and visualization techniques. The expected output includes a concise summary and categorized lists of cybersecurity issues.
The prompt instructs on creating a comprehensive summary of cybersecurity threats, vulnerabilities, incidents, and malware, emphasizing a detailed and iterative analysis process. It outlines a unique, mentally visual approach for organizing and understanding complex information. The expected output includes a concise summary and categorized lists of cybersecurity issues.
## create_git_diff_commit
This prompt provides instructions for using specific Git commands to manage code changes. It explains how to view differences since the last commit and display the current state of the repository. The expected output is a guide on executing these commands.
Provides instructions for using specific Git commands to manage code changes. It explains how to view differences since the last commit and display the latest commit details. The expected output includes command usage examples.
## create_graph_from_input
Creates progress over time graphs for a security program, focusing on improvement metrics. It involves analyzing data to identify trends and outputting a CSV file with specific fields. The expected output is a CSV file detailing the program's progress over time.
## create_hormozi_offer
The AI is designed to create business offers based on Alex Hormozi's "$100M Offers" strategies, aiming to craft irresistible deals. It integrates Hormozi's principles, focusing on value, pricing, guarantees, and market targeting. The expected output includes a detailed analysis of potential business offers, highlighting their unique value propositions.
## create_idea_compass
Guides users in developing a structured exploration of ideas through a detailed template. It emphasizes clarity and organization by breaking down the process into specific steps, including defining, supporting, and contextualizing the idea. The expected output is a comprehensive summary with related ideas, evidence, and sources organized in a structured format.
The prompt guides users in organizing and analyzing an idea or question through a structured template. It emphasizes detailed exploration, including definitions, evidence, sources, and examining similarities, opposites, themes, and consequences. The expected output is a comprehensive summary with organized sections and tags.
## create_investigation_visualization
Creates detailed GraphViz visualizations to illustrate complex intelligence investigations and data insights. This approach involves extensive analysis, organizing information, and visual representation using shapes, colors, and labels for clarity. The output includes a comprehensive diagram and analytical conclusions with a certainty rating.
Creates detailed GraphViz visualizations to illustrate complex intelligence investigations and data. This approach involves extensive analysis and organization of information to produce clear, annotated diagrams. The output includes a visual representation and analytical conclusions with a certainty rating.
## create_keynote
The prompt guides in creating TED-quality keynote presentations from provided input, focusing on narrative flow and practical takeaways. It outlines steps for structuring the presentation into slides with concise bullet points, images, and speaker notes. The expected output includes a story flow, the final takeaway, and a detailed slide deck presentation.
The prompt guides in creating TED-quality keynote presentations from provided input, focusing on narrative flow and practical takeaways. It outlines steps for structuring the presentation into slides with concise bullet points, images, and speaker notes. The expected output includes a story flow, the final takeaway, and a detailed slide deck.
## create_logo
Generates simple, minimalist company logos based on provided input, focusing on elegance and impact without text. The approach emphasizes super minimalist designs. The output is a prompt for an AI image generator to create a simple, vector graphic logo.
Generates simple and elegant company logos based on provided input, focusing on minimalist designs without text. The approach emphasizes creating vector graphic logos that capture the essence of the input. The expected output is a prompt for an AI image generator to create a minimalist logo.
## create_markmap_visualization
Transforms complex ideas into visual formats using MarkMap syntax for easy understanding. This process involves simplifying concepts to ensure they can be effectively represented within the constraints of MarkMap. The output is a MarkMap syntax diagram that visually communicates the core ideas.
Transforms complex ideas into visual diagrams using MarkMap syntax. This process involves simplifying concepts to ensure they can be effectively represented in a visual format. The output is a MarkMap syntax diagram that visually communicates the core ideas.
## create_mermaid_visualization
Transforms complex ideas into simplified Mermaid (Markdown) visual diagrams. This process involves creating detailed visualizations that can independently explain concepts using Mermaid syntax, focusing on clarity and comprehensibility. The expected output is a Mermaid syntax diagram accompanied by a concise visual explanation.
This prompt instructs on creating visualizations for complex ideas using Mermaid syntax in Markdown. It emphasizes producing standalone diagrams that fully convey concepts through intricate designs. The expected output is a Mermaid syntax diagram accompanied by a visual explanation.
## create_micro_summary
Summarizes content into a Markdown formatted summary, focusing on brevity and clarity. It emphasizes creating concise, impactful points and takeaways. The output includes a one-sentence summary, main points, and key takeaways, each adhering to strict word limits.
The prompt instructs on summarizing content into a structured Markdown format. It emphasizes conciseness and clarity, focusing on a single sentence summary, main points, and key takeaways. The expected output is a well-organized, bullet-pointed list highlighting the essence of the content.
## create_network_threat_landscape
Analyzes open ports and services from network scans to identify security risks and provide recommendations. This process involves a detailed examination of port and service statistics to uncover potential vulnerabilities. The expected output is a markdown formatted threat report with sections on description, risk, recommendations, a concise summary, trends, and quotes from the analysis.
Analyzes open ports and services from network scans to identify security risks and provide recommendations. This process involves a detailed examination of port and service statistics to uncover potential vulnerabilities. The output includes a threat report with descriptions of open ports, risk assessments, recommendations for mitigation, a concise summary, and insights into trends and notable quotes from the analysis.
## create_npc
Generates detailed NPCs for D&D 5th edition, incorporating a wide range of characteristics from background to appearance. It emphasizes creativity in developing a character's backstory, traits, and goals. The output is a comprehensive character profile suitable for gameplay.
Generates detailed NPCs for D&D 5th edition, incorporating creative input to ensure a rich character profile. This process includes a comprehensive set of attributes, from background and flaws to goals and peculiarities, aiming for a fully fleshed-out character sheet. The expected output is a clear, detailed NPC profile suitable for immediate use in gameplay.
## create_pattern
The AI assistant is designed to interpret and respond to LLM/AI prompts with structured outputs. It specializes in organizing and analyzing prompts to produce responses that adhere to specific instructions and formatting requirements. The assistant ensures accuracy and alignment with the intended outcomes through meticulous analysis.
Interprets and responds to LLM/AI prompts based on specific instructions and examples. This AI assistant excels in organizing and analyzing prompts to produce accurately structured responses. The output is expected to align perfectly with the formatting and content requirements provided.
## create_quiz
Generates questions for reviewing learning objectives based on provided subject and objectives. It requires defining the subject and learning objectives for accurate question generation. The output consists of questions aimed at helping students review key concepts.
Generates questions for learners to review key concepts based on provided learning objectives. It requires subject and learning objectives as input for accurate question generation. The output consists of questions aimed at helping students understand the main concepts.
## create_reading_plan
Designs a tailored three-phase reading plan based on user input, focusing on an author or specific guidance. It carefully selects books from various sources, including hidden gems, to enhance the user's knowledge on the topic. The output includes a concise plan summary and categorized reading lists with reasons for each selection.
Designs a tailored three-phase reading plan based on user input, focusing on an author or specific request. It carefully selects books, considering both popularity and hidden gems, to enhance the user's knowledge on the topic. The output includes a brief introduction, a structured reading plan across three phases, and a summary.
## create_report_finding
The prompt instructs the creation of a detailed markdown security finding report, incorporating sections like Description, Risk, Recommendations, and others, based on a vulnerability title and explanation provided by the user. It emphasizes a structured, insightful approach to documenting cybersecurity vulnerabilities. The expected output is a comprehensive report with specific sections, focusing on clarity, insightfulness, and relevance to cybersecurity assessment.
The prompt instructs the creation of a detailed markdown security finding for a cyber security assessment report, covering sections like Description, Risk, Recommendations, References, One-Sentence-Summary, Trends, and Quotes based on a provided vulnerability title and explanation. It emphasizes a structured, insightful approach without reliance on bullet points for certain sections and requires the extraction of key recommendations, trends, and quotes. The expected output is a comprehensive, informative document tailored for inclusion in a security assessment report.
## create_security_update
The prompt instructs on creating concise security updates for newsletters, focusing on cybersecurity developments, threats, advisories, and new vulnerabilities. It emphasizes brevity and relevance, requiring links to further information. The expected output includes structured sections with short descriptions and relevant details, aiming to inform readers about the latest security concerns efficiently.
The prompt instructs on creating concise security updates for newsletters, focusing on cybersecurity developments, threats, advisories, and new vulnerabilities. It emphasizes organizing content into specific sections with brief descriptions and links for further information. The expected output includes a structured summary of cybersecurity issues with links to detailed sources.
## create_show_intro
Creates compelling short intros for podcasts, focusing on the most interesting aspects of the show. It involves listening to the entire show, identifying key topics, and highlighting them in a concise introduction. The output is a structured intro that teases the conversation's main points.
The prompt guides in creating compelling short intros for podcasts, focusing on highlighting the most interesting topics discussed. It emphasizes selecting novel and surprising elements from the show for the intro. The expected output is a concise, engaging introduction mentioning up to ten key discussion topics.
## create_sigma_rules
Extracts Tactics, Techniques, and Procedures (TTPs) from security news publications to create YAML-based Sigma rules for host-based detection. These rules focus on detecting cybersecurity threats using tools like Sysinternals: Sysmon, PowerShell, and Windows logs. The output includes well-documented Sigma rules in YAML format, each separated by headers and footers.
## create_stride_threat_model
The prompt instructs on creating a detailed threat model using the STRIDE per element methodology for a given system design document. It emphasizes understanding the system's assets, trust boundaries, and data flows to identify and prioritize potential threats. The expected output is a comprehensive table listing threats, their components, mitigation strategies, and risk assessments.
The prompt instructs on creating a detailed threat model using the STRIDE per element methodology for a given system design document. It emphasizes understanding the system's assets, trust boundaries, and data flows to identify and prioritize potential threats. The expected output is a comprehensive table categorizing threats, their mitigation strategies, and assessing their risk severity.
## create_summary
Summarizes content into a structured Markdown format, focusing on brevity and clarity. It emphasizes creating a concise summary, listing main points, and identifying key takeaways. The output is organized into specific sections for easy reference.
The prompt instructs on summarizing content into a structured Markdown format. It emphasizes creating concise, informative summaries with specific sections for a one-sentence summary, main points, and key takeaways. The expected output is a neatly organized summary with clear, distinct sections.
## create_tags
The prompt instructs to identify and output tags from text content for use in mind mapping tools, focusing on extracting at least five subjects or ideas. It emphasizes including any authors or existing tags, converting spaces in tags to underscores, and ensuring all tags are in lowercase without repetition. The expected output is a single line of space-separated, lowercase tags relevant to the text's content.
## create_threat_model
The prompt outlines a comprehensive approach to everyday threat modeling, emphasizing its application beyond technical defenses to include personal and physical security scenarios. It distinguishes between realistic and possible threats, advocating for a balanced approach to risk management that considers the value of what's being protected, the likelihood of threats, and the cost of controls. The expected output involves creating threat models for various scenarios, highlighting realistic defenses, and guiding individuals towards logical security decisions through structured analysis.
The prompt instructs on creating narrative-based threat models for various scenarios, emphasizing realistic risk assessment over improbable dangers. It highlights the importance of distinguishing between possible and likely threats, focusing defense efforts on the latter. The expected output includes a structured threat model and an analysis section guiding logical defense choices against identified scenarios.
## create_threat_scenarios
The prompt seeks to identify and prioritize potential threats to a given system or situation, using a narrative-based, simple threat modeling approach. It emphasizes distinguishing between realistic and possible threats, focusing on those worth defending against. The expected output includes a list of prioritized threat scenarios, an analysis of the threat model, recommended controls, a narrative analysis, and a concise conclusion.
The prompt aims to create narrative-based, simple threat models for various security concerns, ranging from physical to cybersecurity. It emphasizes a realistic approach to identifying and prioritizing potential threats based on likelihood and impact. The expected output includes a detailed analysis of threat scenarios, a logical explanation of the threat modeling process, recommended controls, and a narrative analysis that injects realism into the assessment of risks.
## create_upgrade_pack
Extracts and organizes insights on world models and task algorithms from provided content. It focuses on identifying and categorizing beliefs about the world and optimal task execution strategies. The output includes concise, actionable bullet points under relevant categories.
The prompt instructs on extracting and updating world models and task algorithms from given content. It emphasizes deep thinking to identify beliefs about the world and how tasks should be performed. The expected output includes concise bullet points summarizing these beliefs and task strategies, organized into relevant categories.
## create_video_chapters
Extracts and organizes the most engaging topics from a transcript with corresponding timestamps. This process involves a detailed review of the transcript to identify key moments and subjects. The output is a list of topics with their timestamps in a sequential format.
Extracts and timestamps the most interesting topics from a transcript, simulating the experience of watching the video. It focuses on identifying key subjects and moments, then matching them with precise timestamps. The output is a list of topics with sequential timestamps within the video's length.
## create_visualization
Transforms complex ideas into simplified ASCII art visualizations. This approach focuses on distilling intricate concepts into visual forms that can be easily understood through ASCII art. The expected output is a detailed ASCII art representation accompanied by a concise visual explanation.
Transforms complex ideas into simplified ASCII art visualizations. This approach allows for intricate concepts to be understood visually through detailed ASCII diagrams. The output is a standalone ASCII art piece, accompanied by a concise visual explanation.
## explain_code
Analyzes and explains code, security tool outputs, or configuration texts, tailoring the explanation to the type of input. It uses specific sections to clarify the function, implications, or settings based on the input's nature. The expected output is a detailed explanation or answer in designated sections.
The prompt instructs an expert coder to analyze and explain code, security tool outputs, or configuration texts. It emphasizes a flexible approach to achieving the best explanation. The expected output is categorized explanations or answers to specific questions, tailored to the type of input provided.
## explain_docs
The prompt instructs on transforming input about tool usage into improved, structured documentation. It emphasizes clarity and utility, breaking down the process into specific sections for a comprehensive guide. The expected output includes an overview, usage syntax, common use cases, and key features of the tool.
Improves instructions for using tools or products by providing a structured format. This approach breaks down the explanation into what the tool does, why it's useful, how to use it, common use cases, and key features. The expected output includes simplified, better-organized instructions.
## explain_project
Summarizes project documentation into a concise, user and developer-focused summary, highlighting its purpose, problem addressed, approach, installation, usage, and examples. It simplifies complex information for easy understanding and application. The output includes a project overview, problem it addresses, approach to solving the problem, and practical steps for installation and usage.
The prompt instructs on summarizing project documentation into a structured, user-friendly format. It emphasizes understanding the project, then distilling this understanding into concise summaries and practical steps for installation and usage. The output includes a project overview, problem addressed, approach to solving the problem, and clear instructions for installation and usage, all aimed at making the project accessible to users and developers.
## explain_terms
Produces a glossary of advanced terms found in specific content, including definitions and analogies. It focuses on explaining obscure or complex terms to aid understanding. The output is a list of terms with explanations and analogies in a structured Markdown format.
The prompt aims to create glossaries for complex terms within a given content, enhancing comprehension. It focuses on identifying and explaining advanced terms, excluding basic ones, to aid in understanding the content. The expected output is a list of advanced terms with definitions, analogies, and their significance, formatted in Markdown.
## export_data_as_csv
The prompt instructs the AI to identify and format data structures from the input into a CSV file. It emphasizes understanding the context and accurately naming fields based on the input. The expected output is a CSV file containing all identified data structures.
## extract_algorithm_update_recommendations
Analyzes input to provide concise recommendations for improving processes. It focuses on extracting actionable advice from content descriptions. The output consists of a bulleted list of up to three brief suggestions.
Analyzes input to provide concise, actionable recommendations for improving processes within content. It focuses on extracting practical steps to enhance algorithms or methodologies. The output consists of a bulleted list of up to three brief suggestions.
## extract_article_wisdom
Extracts key insights and valuable information from textual content, focusing on ideas, quotes, habits, and references. It aims to address the issue of information overload by providing a concise summary of the content's most meaningful aspects. The expected output includes summarized ideas, notable quotes, referenced materials, and habits worth adopting.
Extracts key insights and wisdom from textual content, aiming to address the issue of information overload and the challenge of retaining valuable information. It uniquely identifies and organizes ideas, quotes, references, habits, and recommendations from a wide range of texts. The expected output includes summarized ideas, notable quotes, relevant references, and actionable habits.
## extract_book_ideas
Summarizes a book's key content by extracting 50 to 100 of its most interesting ideas. The process involves a deep dive into the book's insights, prioritizing them by interest and insightfulness. The output is a concise list of bulleted ideas, limited to 20 words each.
Summarizes a book's key content by extracting 50 to 100 of its most insightful, surprising, and interesting ideas. The process involves a deep recall of the book's details, prioritizing the ideas by their impact. The output is formatted as a bulleted list, limited to 20 words per idea.
## extract_book_recommendations
Summarizes a book's key content by extracting 50 to 100 of its most practical recommendations, prioritizing the most impactful advice. This process involves a thorough memory search to identify actionable insights. The output is formatted as an instructive, bullet-pointed list, limited to 20 words each.
Summarizes a book's key content by extracting 50 to 100 of its most practical recommendations. The approach focuses on actionable advice, prioritizing the most impactful suggestions first. The output is a Markdown-formatted list of instructive recommendations, capped at 20 words each.
## extract_business_ideas
The prompt outlines a process for identifying and elaborating on innovative business ideas. It focuses on extracting top business concepts from provided content and then refining the best ten by exploring adjacent possibilities. The expected output includes two sections: a list of extracted ideas and a detailed elaboration on the top ten ideas, ensuring uniqueness and differentiation.
Extracts and elaborates on top business ideas from provided content, focusing on those with potential to revolutionize industries. This assistant first identifies all notable business concepts, then selects and expands on the ten most promising ones, ensuring uniqueness and differentiation. The output includes a list of extracted ideas and a detailed elaboration on the top ten.
## extract_controversial_ideas
Identifies and lists controversial statements from inputs. This AI system focuses on extracting contentious ideas and quotes, presenting them in a structured Markdown format. The expected output includes sections for controversial ideas and supporting quotes, each with specific content guidelines.
## extract_extraordinary_claims
Identifies and lists extraordinary claims from conversations, focusing on those rejected by the scientific community or based on misinformation. The process involves deep analysis to pinpoint statements that defy accepted scientific truths, such as denying evolution or the moon landing. The output is a detailed list of quotes, ranging from 50 to 100, showcasing these claims.
The prompt instructs to identify and list extraordinary claims from conversations, focusing on those rejected by the scientific community or based on misinformation. It emphasizes capturing statements that defy accepted scientific truths, such as evolution or the moon landing. The expected output is a detailed list of at least 50 to no more than 100 specific quotes showcasing these claims.
## extract_ideas
Extracts and condenses insightful ideas from text into 15-word bullet points focusing on life's purpose and human progress. This process emphasizes capturing unique insights on specified themes. The output consists of a list of concise, thought-provoking ideas.
This prompt extracts insightful and interesting information from text, focusing on life's purpose and human progress. It emphasizes creating concise bullet points to summarize key ideas. The expected output includes a list of insightful ideas, each precisely 15 words long.
## extract_insights
Extracts and condenses complex insights from text on profound topics into 15-word bullet points. This process emphasizes the extraction of nuanced, powerful ideas related to human and technological advancement. The expected output is a concise list of abstracted, insightful bullets.
The prompt instructs on extracting and summarizing powerful insights from text, focusing on life's purpose and human-technology interaction. It emphasizes creating concise, insightful bullet points from the content. The expected output is a list of abstracted, wise insights, each precisely 15 words long.
## extract_main_idea
Extracts and highlights the most crucial or intriguing idea from any given content. This prompt emphasizes a methodical approach to identify and articulate the essence of the input. The expected output includes a concise main idea and a recommendation based on that idea.
The prompt instructs on extracting and presenting the most significant idea from any given content. It emphasizes a structured approach to identify and recommend actions based on the extracted idea. The expected output includes a concise main idea and recommendation, each in a 15-word sentence.
## extract_patterns
The prompt guides in identifying and analyzing recurring, surprising, or insightful patterns from a collection of ideas, data, or observations. It emphasizes extracting the most notable patterns based on their frequency and significance, and then documenting the process of discovery and analysis. The expected output includes a detailed summary of patterns, an explanation of their selection and significance, and actionable advice for startup builders based on these insights.
The prompt instructs on identifying and analyzing patterns from a collection of ideas, data, or observations, focusing on those that are most surprising or frequently mentioned. It outlines a structured approach to extract, weigh, and document these patterns, including a detailed analysis and advice for builders in the startup space. The expected output includes sections for patterns, meta-analysis, a summary analysis, the top five patterns, and advice for builders, all formatted as bullet points with specific word limits.
## extract_poc
Analyzes security or bug bounty reports to extract and provide proof of concept URLs for validating vulnerabilities. It specializes in identifying actionable URLs and commands from the reports, ensuring direct verification of reported vulnerabilities. The output includes the URL with a specific command to execute it, like using curl or python.
Analyzes security or bug bounty reports to extract and provide proof of concept URLs for validating vulnerabilities. It uniquely identifies URLs that can directly verify the existence of vulnerabilities, accompanied by the necessary command to execute them. The output includes a command followed by the URL or file to validate the vulnerability.
## extract_predictions
Extracts and organizes predictions from content into a structured format. It focuses on identifying specific predictions, their timelines, confidence levels, and verification methods. The expected output includes a bulleted list and a detailed table of these predictions.
The prompt instructs on extracting and organizing predictions from given content. It details a process for identifying specific predictions, their expected fulfillment dates, confidence levels, and verification methods. The expected output includes a bulleted list of predictions and a structured table summarizing these details.
## extract_questions
Extracts questions from content and analyzes their effectiveness in eliciting high-quality responses. It focuses on identifying the elements that make these questions particularly insightful. The expected output includes a list of questions, an analysis of their strengths, and recommendations for interviewers.
Extracts questions from content and analyzes their effectiveness in eliciting surprising, high-quality answers. It focuses on identifying the elements that make these questions outstanding. The output includes listed questions, an analysis of their brilliance, and recommendations for interviewers.
## extract_recommendations
Extracts and condenses recommendations from content into a concise list. This process involves identifying both explicit and implicit advice within the given material. The output is a bulleted list of up to 20 brief recommendations.
Extracts and condenses practical recommendations from content into a concise list. This process involves identifying explicit and implicit advice within the material. The output consists of a bulleted list of up to 20 brief recommendations.
## extract_references
Extracts references to various forms of cultural and educational content from provided text. This process involves identifying and listing references to art, literature, and academic papers concisely. The expected output is a bulleted list of up to 20 references, each summarized in no more than 16 words.
Extracts references to various forms of art and literature from content, compiling them into a concise list. This process involves identifying and listing up to 20 references, ensuring each is succinctly described in no more than 15 words. The output is a bulleted list of references to art, stories, books, literature, papers, and other sources of learning.
## extract_song_meaning
Analyzes and interprets the meaning of songs based on extensive research and lyric examination. This process involves deep analysis of the artist's background, song context, and lyrics to deduce the song's essence. Outputs include a summary sentence, detailed meaning in bullet points, and evidence supporting the interpretation.
Analyzes and interprets the meaning of songs based on lyrics, artist context, and other relevant information. This process involves extensive research and deep analysis of the lyrics. The output includes a summary sentence, detailed bullet points on the song's meaning, and evidence supporting the interpretation.
## extract_sponsors
Identifies and distinguishes between official and potential sponsors from transcripts. This process involves analyzing content to separate actual sponsors from merely mentioned companies. The output lists official sponsors and potential sponsors based on their mention in the content.
Identifies and categorizes sponsors and potential sponsors from transcripts. It discerns between actual sponsors and mere mentions, aiming for accurate sponsor identification. The output lists official and potential sponsors with descriptions and links.
## extract_videoid
Extracts video IDs from URLs for use in other applications. It meticulously analyzes the URL to isolate the video ID. The output is solely the video ID, with no additional information or errors included.
Extracts video IDs from URLs for use in other applications. It meticulously analyzes the URL to locate the specific part that contains the video ID. The output is solely the video ID, with no additional information or formatting.
## extract_wisdom
Extracts key insights, ideas, quotes, habits, and references from textual content to address the issue of information overload and the challenge of retaining knowledge. It uniquely filters and condenses valuable information from various texts, making it easier for users to decide if the content warrants a deeper review or to use as a note-taking alternative. The output includes summarized ideas, notable quotes, relevant habits, and useful references, all aimed at enhancing understanding and retention.
Extracts key insights from textual content to address the issue of information overload and memory retention. It uniquely identifies ideas, quotes, references, habits, and recommendations from a wide range of texts. The output includes summarized content, highlighting valuable takeaways and actionable items.
## extract_wisdom_agents
This prompt outlines a complex process for extracting insights from text content, focusing on themes like the meaning of life and technology's impact on humanity. It involves creating teams of AI agents with diverse expertise to analyze the content and produce summaries, ideas, insights, quotes, habits, facts, references, and recommendations. The expected output includes structured sections filled with concise, insightful entries derived from the input material.
The prompt outlines a complex process for extracting insights from text content, focusing on themes like the meaning of life and technology's impact on humanity. It describes creating teams of AI agents with diverse expertise to summarize content, identify key ideas, insights, quotes, habits, facts, references, and recommendations, and distill a one-sentence takeaway. The expected output includes summaries and lists of insights and recommendations, all structured to highlight the most valuable aspects of the input material.
## extract_wisdom_dm
Extracts and synthesizes valuable content from input text, focusing on insights related to life's purpose and human advancement. It employs a structured approach to distill surprising ideas, insights, quotes, habits, facts, and recommendations from the content. The output includes summaries, ideas, insights, and other categorized information for deep understanding and practical application.
The prompt outlines a comprehensive process for extracting and organizing valuable content from input text, focusing on insights related to life's purpose, human flourishing, and technology's impact. It emphasizes a detailed, step-by-step approach to identify ideas, insights, quotes, habits, facts, references, and recommendations from the content. The expected output includes summaries, lists of ideas, insights, quotes, habits, facts, references, and a one-sentence takeaway, all formatted in Markdown and adhering to specific word counts and item quantities.
## extract_wisdom_large
The purpose is to extract and distill key insights, ideas, habits, facts, and recommendations from a detailed conversation about writing, communication, and the iterative process of creating content. The nuanced approach involves identifying the essence of effective communication, the importance of authenticity in writing, and the value of distillation in conveying ideas. The expected output includes categorized summaries of ideas, insights, habits, facts, recommendations, and more, all aimed at enhancing understanding and application of the discussed principles in writing and communication.
## extract_wisdom_nometa
This prompt guides the extraction and organization of insightful content from text, focusing on life's purpose, human flourishing, and technology's impact. It emphasizes identifying and summarizing surprising ideas, refined insights, practical habits, notable quotes, valid facts, and useful recommendations related to these themes. The expected output includes structured sections for summaries, ideas, insights, quotes, habits, facts, recommendations, and references, each with specific content and formatting requirements.
The prompt instructs on extracting and organizing various insights, ideas, quotes, habits, facts, recommendations, and references from text content focused on life's purpose, human flourishing, and the impact of technology and AI. It emphasizes the discovery of surprising and insightful information within these themes. The output is structured into sections for summary, ideas, insights, quotes, habits, facts, references, and recommendations, with specific instructions on the length and format for each entry.
## find_hidden_message
Analyzes political messages to reveal overt and hidden intentions. It employs knowledge of politics, propaganda, and psychology to dissect content, focusing on recent political debates. The output includes overt messages, hidden cynical messages, supporting arguments, desired audience actions, and analyses from cynical to favorable.
The prompt instructs the AI to analyze and interpret political messages in content, distinguishing between overt and hidden messages. It emphasizes a cynical evaluation, focusing on underlying political intentions and expected actions from the audience. The output includes structured analysis and summaries of both overt and hidden messages, supported by arguments and desired audience actions, concluding with various levels of analysis from cynical to favorable.
## find_logical_fallacies
Identifies and categorizes various fallacies in arguments or texts. This prompt focuses on recognizing invalid or faulty reasoning across a wide range of fallacies, from formal to informal types. The expected output is a list of identified fallacies with brief explanations.
The prompt instructs the AI to identify various types of fallacies from a given text, using a comprehensive list of fallacies as a reference. It emphasizes the importance of recognizing invalid or faulty reasoning in arguments. The expected output is a list of identified fallacies, each described concisely within a 15-word explanation, formatted under a "FALLACIES" section in Markdown.
## get_wow_per_minute
Evaluates the density of wow-factor in content by analyzing its surprise, novelty, insight, value, and wisdom. This process involves a detailed and varied consumption of the content to assess its potential to engage and enrich viewers. The expected output is a JSON report detailing scores and explanations for each wow-factor component and overall wow-factor per minute.
Evaluates the density of wow-factor in content, focusing on surprise, novelty, insight, value, and wisdom across various content types. It aims to quantify how rewarding content is based on these elements. The expected output is a JSON file detailing scores and explanations for each wow-factor component per minute.
## get_youtube_rss
Generates RSS URLs for YouTube channels based on given channel IDs or URLs. It extracts the channel ID from the input and constructs the corresponding RSS URL. The output is solely the RSS URL.
## improve_academic_writing
This prompt aims to enhance the quality of text for academic purposes. It focuses on refining grammatical errors, improving clarity and coherence, and adopting an academic tone while ensuring ease of understanding. The expected output is a professionally refined text with a list of applied corrections.
This prompt aims to refine input text into an academic and scientific language, ensuring clarity, coherence, and ease of understanding. It emphasizes the use of formal English, avoiding repetition and trivial statements for a professional tone. The expected output is a text improved for academic purposes.
## improve_prompt
This service enhances LLM/AI prompts by applying expert prompt writing techniques to achieve better results. It leverages strategies like clear instructions, persona adoption, and reference text provision to refine prompts. The output is an improved version of the original prompt, optimized for clarity and effectiveness.
Enhances LLM/AI prompt quality by applying expert writing techniques, focusing on clarity, specificity, and structured instructions. It leverages strategies like clear instructions, persona adoption, and reference text provision to improve model responses. The service outputs refined prompts designed for optimal interaction with LLMs.
## improve_report_finding
The prompt instructs the creation of an improved security finding report from a penetration test, detailing the finding, risk, recommendations, references, a concise summary, and insightful quotes, all formatted in markdown without using markdown syntax or special formatting. It emphasizes a detailed, insightful approach to presenting cybersecurity issues and solutions. The output should be comprehensive, covering various sections including title, description, risk, recommendations, references, and quotes, aiming for clarity and depth in reporting.
Improves a security finding from a penetration test report by providing a detailed and enhanced report in markdown format, focusing on description, risk, recommendations, references, and summarizing the finding concisely. It emphasizes clarity, insightfulness, and actionable advice while avoiding jargon and repetition. The output includes a title, detailed description, risk analysis, insightful recommendations, relevant references, a concise summary, and notable quotes, all formatted for easy readability and immediate application.
## improve_writing
This prompt aims to refine input text for enhanced clarity, coherence, grammar, and style. It involves analyzing the text for errors and inconsistencies, then applying corrections while preserving the original meaning. The expected output is a grammatically correct and stylistically improved version of the text.
This prompt aims to refine and enhance input text for better clarity, coherence, grammar, and style. It involves analyzing the text for errors and inconsistencies, then applying corrections while preserving the original meaning. The expected output is a grammatically correct and stylistically improved version of the input text.
## label_and_rate
Evaluates and categorizes content based on its relevance to specific human-centric themes, then assigns a tiered rating and a numerical quality score. It uses a predefined set of labels for categorization and assesses content based on idea quantity and thematic alignment. The expected output is a structured JSON object detailing the content summary, labels, rating, and quality score with explanations.
The prompt outlines a process for evaluating content based on its relevance to specific human-centric themes, assigning labels from a predefined list, and rating its quality and thematic alignment. It emphasizes the importance of content's focus on human flourishing and meaning, penalizing content that is politically charged or unrelated to the core themes. The expected output is a structured JSON object summarizing the content's essence, its applicable labels, a tiered rating, and a numerical quality score, along with explanations for these assessments.
## official_pattern_template
The prompt outlines a complex process for diagnosing and addressing psychological issues based on a person's background and behaviors. It involves deep analysis of the individual's history, identifying potential mental health issues, and suggesting corrective actions. The expected output includes summaries of past events, possible psychological issues, their impact on behavior, and recommendations for improvement.
Analyzes a person's background and behaviors to diagnose psychological issues and recommend actions. It involves a detailed process of understanding the individual's history and current behavior to identify underlying problems. The output includes summaries of events, possible issues, behavior connections, and corrective recommendations.
## philocapsulate
Summarizes teachings of philosophers or philosophies, providing detailed templates on their background, encapsulated philosophy, school, teachings, works, quotes, application, and life advice. It differentiates between individual philosophers and philosophies with tailored templates for each. The output includes structured information for educational or analytical purposes.
The prompt instructs on creating detailed templates about philosophers or philosophies, including their background, teachings, and application. It specifies the structure for presenting information, such as encapsulating philosophies, listing works or teachings, and defining terms like "$philosopher-ian." The expected output is a comprehensive overview tailored to either an individual philosopher or a philosophy, highlighting key aspects and advice on living according to their teachings.
## provide_guidance
Provides comprehensive psychological advice tailored to the individual's specific question and context. This approach delves into the person's past, traumas, and life goals to offer targeted feedback and recommendations. The expected output includes a concise analysis, detailed scientific rationale, actionable recommendations, Esther Perel's perspective, self-reflection prompts, possible clinical diagnoses, and a summary, all aimed at fostering self-awareness and positive change.
Provides comprehensive psychological advice tailored to the individual's specific question and context. This approach combines elements of psychiatry, psychology, and life coaching, offering a structured analysis and actionable recommendations. The expected output includes a concise analysis, detailed scientific explanations, personalized recommendations, and self-reflection questions.
## rate_ai_response
Evaluates the quality of AI responses against the benchmark of human experts, assigning a letter grade and score. It involves deep analysis of both the instructions given to the AI and its output, comparing these to the potential performance of the world's best human expert. The process culminates in a detailed justification for the assigned grade, highlighting specific strengths and weaknesses of the AI's response.
Evaluates the quality of AI responses against the benchmark of the world's best human experts, focusing on understanding instructions, comparing AI output to optimal human performance, and rating the AI's work using a detailed grading system. The process involves deep analysis of both the instructions given to the AI and its response, followed by a structured evaluation that includes a letter grade, specific reasons for the grade, and a numerical score. The evaluation criteria emphasize comparison with human capabilities, ranging from expert to average performance.
## rate_ai_result
Evaluates the quality of AI-generated content based on construction, quality, and spirit. The process involves analyzing AI outputs against criteria set by experts and a high-IQ AI panel. The expected output is a final score out of 100, with deductions detailed for each category.
Evaluates the quality of AI-generated content based on construction, quality, and spirit. This process involves analyzing AI outputs against criteria set by experts and a high-IQ AI panel. The final output is a comprehensive score out of 100, reflecting the content's adherence to the prompt's requirements and essence.
## rate_content
The prompt outlines a process for evaluating content by labeling it with relevant single-word descriptors, rating its quality based on idea quantity and thematic alignment, and scoring it on a scale from 1 to 100. It emphasizes the importance of matching content with specific themes related to human meaning and the future of AI, among others. The expected output includes a list of labels, a tiered rating with an explanation, and an overall quality score with justification.
The prompt outlines a process for evaluating content by labeling it with relevant single-word descriptors and then rating its quality based on idea quantity and thematic alignment with specified themes. It emphasizes a nuanced approach to content assessment, combining quantitative and qualitative measures. The expected output includes a list of labels, a tiered rating with an explanation, and a numerical content score with justification.
## rate_value
This prompt seeks to acknowledge the collaborative effort behind its creation, inspired by notable figures in information theory and viral content creation. It highlights the fusion of theoretical foundations and modern digital strategies. The output is an attribution of credit.
The prompt aims to create content inspired by Claude Shannon's Information Theory and Mr. Beast's viral techniques. It leverages foundational communication theories and modern viral strategies for impactful content creation. The expected output is engaging and widely shareable content.
## raw_query
The prompt instructs the AI to produce the best possible output by thoroughly analyzing and understanding the input. It emphasizes deep contemplation of the input's meaning and the sender's intentions. The expected output is an optimal response tailored to the inferred desires of the input provider.
The prompt instructs the AI to produce the best possible output by thoroughly analyzing and understanding the input. It emphasizes deep contemplation of the input's meaning and the sender's intentions. The expected output is an optimal response tailored to the perceived desires of the prompt sender.
## recommend_artists
Recommends a personalized festival schedule featuring artists similar to the user's preferences in EDM genres and artists. The recommendation process involves analyzing the user's favorite styles and artists, then selecting similar artists and explaining the choices. The output is a detailed schedule organized by day, set time, stage, and artist, optimized for the user's enjoyment.
Recommends a personalized festival schedule featuring artists that match the user's preferred EDM styles and artists. The process involves analyzing the user's favorite styles and artists, then selecting similar artists and explaining the choices. The output is a day-by-day, set-time, and stage schedule optimized for the user's enjoyment.
## show_fabric_options_markmap
Create a visual representation of the functionalities provided by the Fabric project, focusing on augmenting human capabilities with AI. The approach involves breaking down the project's capabilities into categories like summarization, analysis, and more, with specific patterns branching from these categories. The expected output is comprehensive Markmap code detailing this functionality map.
Summarizes the Fabric project, an open-source framework designed to integrate AI into daily challenges through customizable prompts called Patterns. It emphasizes ease of use and adaptability, offering tools for a wide range of tasks from content summarization to creating AI art. The expected output includes a visual Markmap representation of Fabric's capabilities.
## suggest
Analyzes user input to suggest appropriate fabric commands or patterns, enhancing the tool's functionality. It involves understanding specific needs, determining suitable commands, and providing clear, user-friendly suggestions. The output includes command suggestions, explanations, and instructions for new patterns.
## suggest_pattern
Develops a feature for a fabric command-line tool to suggest appropriate commands or patterns based on user input. It involves analyzing requests, determining suitable commands, and providing clear suggestions. The output includes explanations or multiple options, aiming to enhance user accessibility.
## summarize
Summarizes content into a structured Markdown format, focusing on brevity and clarity. It extracts and lists the most crucial points and takeaways. The output includes a one-sentence summary, main points, and key takeaways, adhering to specified word limits.
The prompt instructs on summarizing content into a structured Markdown format. It emphasizes creating concise, informative summaries with specific sections for a one-sentence summary, main points, and key takeaways. The expected output is a neatly organized summary with clear, distinct sections.
## summarize_debate
Analyzes debates to identify and summarize the primary disagreements, arguments, and evidence that could change participants' minds. It breaks down complex discussions into concise summaries and evaluates argument strength, predicting outcomes. The output includes structured summaries and analyses of each party's position and evidence.
The prompt outlines a process for analyzing debates, focusing on identifying disagreements, arguments, and evidence that could change participants' minds. It emphasizes a structured approach to summarizing debates, including extracting key points and evaluating argument strength. The expected output includes summaries of the content, arguments, and evidence, along with an analysis of argument strength and predictions about the debate's outcome.
## summarize_git_changes
Summarizes major changes and upgrades in a GitHub project over the past week. It involves identifying key updates, then crafting a concise, enthusiastic summary and detailed bullet points highlighting these changes. The output includes a 20-word introduction and excitedly written update bullets.
Summarizes major changes and upgrades in a GitHub project over the past week. The approach involves creating a concise section titled "CHANGES" with bullet points limited to 10 words each. The expected output includes a 20-word introductory sentence and bullet points detailing the updates enthusiastically.
## summarize_git_diff
Analyzes Git diffs to summarize major changes and upgrades. It emphasizes creating concise bullet points for feature changes and updates, tailored to the extent of modifications. The expected output includes a 100-character intro sentence using conventional commits format.
Analyzes Git diffs to identify and summarize key changes and upgrades. This prompt focuses on creating concise, bullet-point summaries for project updates, using conventional commit messages. The expected output includes a brief intro sentence followed by bullet points detailing the changes.
## summarize_lecture
Extracts and organizes key topics from a lecture transcript, providing structured summaries, definitions, and timestamps. This process involves a detailed review of the transcript to identify main subjects, create bullet points, and list definitions with corresponding video timestamps. The output includes a concise summary, a list of tools mentioned with descriptions, and a one-sentence takeaway, all formatted for easy readability.
## summarize_micro
Summarizes content into a structured Markdown format. This prompt focuses on concise, bullet-pointed summaries and takeaways. The output includes a one-sentence summary and lists of main points and takeaways.
The prompt instructs on summarizing content into a structured Markdown format. It emphasizes conciseness and clarity, focusing on a single sentence summary, main points, and key takeaways. The expected output is a well-organized, bullet-pointed list highlighting the essence of the content.
## summarize_newsletter
Extracts and organizes key content from newsletters, focusing on the most meaningful, interesting, and useful information. It uniquely parses the entire newsletter to provide concise summaries, lists of content, opinions, tools, companies, and follow-up actions. The output includes sections for a brief summary, detailed content points, author opinions, mentioned tools and companies, and recommended follow-ups in a structured Markdown format.
Extracts and organizes key content from newsletters into a structured, easy-to-navigate format. It focuses on summarizing, categorizing, and highlighting essential information, including opinions, tools, and companies mentioned. The output is a comprehensive breakdown of the newsletter's content for quick reference.
## summarize_paper
Summarizes academic papers by extracting key sections such as title, authors, main goals, and more from the provided text. It employs a structured approach to highlight the paper's core aspects including technical methodology, distinctive features, and experimental outcomes. The output is a detailed summary covering various dimensions of the research.
Generates a summary of an academic paper from its full text, focusing on key sections like title, authors, main goals, and findings. It uniquely structures the output into specific categories for clarity. The expected output includes sections on the paper's title, authors, main goal, technical approach, distinctive features, experimental results, advantages, limitations, and conclusion.
## summarize_pattern
This prompt instructs on summarizing AI chat prompts into concise paragraphs. It emphasizes using active voice and present tense for clarity. The expected output is a structured summary highlighting the prompt's purpose, approach, and anticipated results.
## summarize_prompt
This prompt instructs on summarizing AI chat prompts concisely. It emphasizes using active voice and present tense for clarity. The expected output is a succinct paragraph detailing the prompt's purpose, approach, and anticipated result.
## summarize_pull-requests
Summarizes pull requests for a coding project, focusing on the types of changes made. It involves creating a summary and a detailed list of main PRs, rewritten for clarity. The output includes a concise overview and specific examples of pull requests.
The prompt instructs on summarizing pull requests for a coding project, focusing on creating a summary and detailing top pull requests in a readable format. It emphasizes rewriting pull request items for clarity. The expected output includes a brief overview of the pull requests' nature and a list of major ones, rewritten for readability.
## summarize_rpg_session
This prompt outlines the process for summarizing in-person role-playing game sessions, focusing on key events, combat details, character development, and worldbuilding. It emphasizes capturing the essence of the session in a structured format, including summaries, lists, and descriptions to encapsulate the narrative and gameplay dynamics. The expected output includes a comprehensive overview of the session's storyline, character interactions, and significant moments, tailored for both players and observers.
Summarizes in-person role-playing game sessions, focusing on key events, combat details, character development, and worldbuilding. It transforms RPG transcripts into structured summaries, highlighting significant moments and character evolution. The output includes a heroic summary, detailed combat stats, MVPs, key discussions, character flaws, changes, quotes, humor, and worldbuilding insights.
## to_flashcards
Creates Anki cards from texts following specific principles to ensure simplicity, optimized wording, and no reliance on external context. This approach aims to enhance learning efficiency and comprehension without requiring prior knowledge of the text. The expected output is a set of questions and answers formatted as a CSV table.
Creates Anki cards from texts, adhering to principles of minimal information, optimized wording, and no external context. This approach ensures simplicity without losing essential details, aiming for quick and accurate recall. The output is a set of questions and answers formatted as a CSV table.
## tweet
Guides users on crafting engaging tweets with emojis, focusing on Twitter's basics and content creation strategies. It emphasizes understanding Twitter, identifying the target audience, and using emojis effectively. The expected output is a comprehensive guide for creating appealing tweets with emojis.
Guides users on crafting engaging tweets with emojis, starting from understanding Twitter basics to analyzing tweet performance. It emphasizes concise messaging, audience engagement, and the strategic use of emojis for personality and clarity. The expected output is enhanced tweeting skills and better audience interaction.
## write_essay
The task is to write an essay in the style of Paul Graham, focusing on the essence and approach of writing concise, clear, and illuminating essays on any given topic.
The purpose of this prompt is to generate an essay in the style of Paul Graham, focusing on a given topic while emulating his clear, simple, and conversational writing style. The essay should avoid cliches, jargon, and journalistic language, presenting ideas in a straightforward manner without common concluding phrases.
## write_hackerone_report
Assists bug bounty hunters in writing reports for HackerOne by analyzing requests, responses, and comments to generate a structured report. It leverages the `bbReportFormatter` tool for formatting inputs, facilitating dynamic, plugin-integrated, or command-line report generation. The output is a HackerOne-ready report that can be fine-tuned with additional details.
## write_micro_essay
The task is to write an essay in the style of Paul Graham, focusing on the essence of simplicity in conveying complex ideas.
The purpose of this prompt is to generate an essay in the style of Paul Graham, focusing on the topic provided, using a simple, clear, and conversational style. The essay should avoid cliches, jargon, and journalistic language, aiming for a publish-ready piece that reflects Graham's approach to writing. The content should be concise, limited to 250 words, and exclude common concluding phrases or setup language.
## write_nuclei_template_rule
The purpose of this prompt is to guide the creation of Nuclei templates for cybersecurity applications, focusing on generating precise and efficient scanning templates for various protocols like HTTP, DNS, TCP, and more. It emphasizes the importance of incorporating elements such as matchers, extractors, and conditions to tailor the templates for detecting specific vulnerabilities or configurations. The expected output is a well-structured YAML Nuclei template that adheres to best practices in template creation, including handling dynamic data extraction, utilizing complex matchers, and ensuring accurate vulnerability detection with minimal false positives.
```yaml
id: vhost-enum-flow
info:
name: vhost enum flow
author: tarunKoyalwar
severity: info
description: |
vhost enumeration by extracting potential vhost names from ssl certificate.
flow: |
ssl();
for (let vhost of iterate(template["ssl_domains"])) {
set("vhost", vhost);
http();
}
ssl:
- address: "{{Host}}:{{Port}}"
http:
- raw:
- |
GET / HTTP/1.1
Host: {{vhost}}
matchers:
- type: dsl
dsl:
- status_code != 400
- status_code != 502
extractors:
- type: dsl
dsl:
- '"VHOST: " + vhost + ", SC: " + status_code + ", CL: " + content_length'
```
## write_pull-request
The prompt instructs on drafting a detailed pull request (PR) description based on the output of a `git diff` command, focusing on identifying and explaining code changes. It emphasizes analyzing changes, understanding their purpose, and detailing their impact on the project. The expected output is a structured PR description in markdown, covering a summary of changes, reasons, impacts, and testing plans in clear language.
The prompt instructs a software engineer to draft a detailed pull request description based on the output of a `git diff` command, which compares changes between the current branch and the main repository branch. It emphasizes analyzing the changes, understanding their purpose, and clearly documenting them in markdown format, including summaries, reasons, impacts, and testing plans. The expected output is a structured PR description that concisely communicates the modifications and their implications for the project.
## write_semgrep_rule
The prompt requests the creation of a Semgrep rule to detect a specific vulnerability pattern in code, based on provided context and examples. It emphasizes the importance of crafting a rule that is general enough to catch any instance of the described vulnerability, rather than being overly specific to the given examples. The expected output is a well-structured Semgrep rule that aligns with the syntax and guidelines detailed in the context, capable of identifying the vulnerability across different scenarios.
The prompt requests the creation of a Semgrep rule to detect a specific vulnerability pattern in code, based on provided context and examples. It emphasizes the importance of capturing the general case of the vulnerability rather than focusing solely on the specific instances mentioned. The expected output is a well-structured Semgrep rule that aligns with the syntax and capabilities outlined in the detailed Semgrep rule syntax guide, capable of identifying potential security issues in code.

View File

@@ -0,0 +1,15 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Write 8 16-word bullets describing how well or poorly I'm addressing my challenges. Call me out if I'm not putting work into them, and/or if you can see evidence of them affecting me in my journal or elsewhere.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,15 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Check this person's Metrics or KPIs (M's or K's) to see their current state and if they've been improved recently.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,15 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Analyze everything in my TELOS file and think about what I could and should do after my legacy corporate / technical skills are automated away. What can I contribute that's based on human-to-human interaction and exchanges of value?
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,16 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Write 4 32-word bullets describing who I am and what I do in a non-douchey way. Use the who I am, the problem I see in the world, and what I'm doing about it as the template. Something like:
a. I'm a programmer by trade, and one thing that really bothers me is kids being so stuck inside of tech and games. So I started a school where I teach kids to build things with their hands.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,15 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Write 5 16-word bullets describing this person's life outlook.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,15 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Write 5 16-word bullets describing who this person is, what they do, and what they're working on. The goal is to concisely and confidently project who they are while being humble and grounded.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,16 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Write 5 48-word bullet points, each including a 3-5 word panel title, that would be wonderful panels for this person to participate on.
5. Write them so that they'd be good panels for others to participate in as well, not just me.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,15 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Write 8 16-word bullets describing possible blindspots in my thinking, i.e., flaws in my frames or models that might leave me exposed to error or risk.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,16 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Write 4 16-word bullets identifying negative thinking either in my main document or in my journal.
5. Add some tough love encouragement (not fluff) to help get me out of that mindset.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,15 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Write 5 16-word bullets describing which of their goals and/or projects don't seem to have been worked on recently.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,15 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Write 8 16-word bullets looking at what I'm trying to do, and any progress I've made, and give some encouragement on the positive aspects and recommendations to continue the work.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,16 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Write 4 16-word bullets red-teaming my thinking, models, frames, etc, especially as evidenced throughout my journal.
5. Give a set of recommendations on how to fix the issues identified in the red-teaming.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,16 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Write 8 16-word bullets threat modeling my life plan and what could go wrong.
5. Provide recommendations on how to address the threats and improve the life plan.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,15 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Create an ASCII art diagram of the relationship my missions, goals, and projects.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -0,0 +1,16 @@
# IDENTITY
You are an expert at understanding deep context about a person or entity, and then creating wisdom from that context combined with the instruction or question given in the input.
# STEPS
1. Read the incoming TELOS File thoroughly. Fully understand everything about this person or entity.
2. Deeply study the input instruction or question.
3. Spend significant time and effort thinking about how these two are related, and what would be the best possible ouptut for the person who sent the input.
4. Write 8 16-word bullets describing what you accomplished this year.
5. End with an ASCII art visualization of what you worked on and accomplished vs. what you didn't work on or finish.
# OUTPUT INSTRUCTIONS
1. Only use basic markdown formatting. No special formatting or italics or bolding or anything.
2. Only output the list, nothing else.

View File

@@ -1 +1 @@
"1.4.129"
"1.4.136"

View File

@@ -3,6 +3,7 @@ package anthropic
import (
"context"
"fmt"
"strings"
"github.com/anthropics/anthropic-sdk-go"
"github.com/anthropics/anthropic-sdk-go/option"
@@ -11,7 +12,7 @@ import (
goopenai "github.com/sashabaranov/go-openai"
)
//const baseUrl = "https://api.anthropic.com/"
const defaultBaseUrl = "https://api.anthropic.com/"
func NewClient() (ret *Client) {
vendorName := "Anthropic"
@@ -23,11 +24,10 @@ func NewClient() (ret *Client) {
ConfigureCustom: ret.configure,
}
//ret.ApiBaseURL = ret.AddSetupQuestion("API Base URL", false)
//ret.ApiBaseURL.Value = baseUrl
ret.ApiBaseURL = ret.AddSetupQuestion("API Base URL", false)
ret.ApiBaseURL.Value = defaultBaseUrl
ret.ApiKey = ret.PluginBase.AddSetupQuestion("API key", true)
// we could provide a setup question for the following settings
ret.maxTokens = 4096
ret.defaultRequiredUserMessage = "Hi"
ret.models = []string{
@@ -44,8 +44,8 @@ func NewClient() (ret *Client) {
type Client struct {
*plugins.PluginBase
//ApiBaseURL *plugins.SetupQuestion
ApiKey *plugins.SetupQuestion
ApiBaseURL *plugins.SetupQuestion
ApiKey *plugins.SetupQuestion
maxTokens int
defaultRequiredUserMessage string
@@ -55,14 +55,23 @@ type Client struct {
}
func (an *Client) configure() (err error) {
/*if an.ApiBaseURL.Value != "" {
if an.ApiBaseURL.Value != "" {
baseURL := an.ApiBaseURL.Value
if strings.Contains(baseURL, "-") && !strings.HasSuffix(baseURL, "/v1") {
if strings.HasSuffix(baseURL, "/") {
baseURL = strings.TrimSuffix(baseURL, "/")
}
baseURL = baseURL + "/v1"
}
an.client = anthropic.NewClient(
option.WithAPIKey(an.ApiKey.Value), option.WithBaseURL(an.ApiBaseURL.Value),
option.WithAPIKey(an.ApiKey.Value),
option.WithBaseURL(baseURL),
)
} else {
*/
an.client = anthropic.NewClient(option.WithAPIKey(an.ApiKey.Value))
//}
an.client = anthropic.NewClient(option.WithAPIKey(an.ApiKey.Value))
}
return
}
@@ -73,7 +82,6 @@ func (an *Client) ListModels() (ret []string, err error) {
func (an *Client) SendStream(
msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions, channel chan string,
) (err error) {
messages := an.toMessages(msgs)
ctx := context.Background()
@@ -121,10 +129,8 @@ func (an *Client) Send(ctx context.Context, msgs []*goopenai.ChatCompletionMessa
}
func (an *Client) toMessages(msgs []*goopenai.ChatCompletionMessage) (ret []anthropic.MessageParam) {
// we could call the method before calling the specific vendor
normalizedMessages := common.NormalizeMessages(msgs, an.defaultRequiredUserMessage)
// Iterate over the incoming session messages and process them
for _, msg := range normalizedMessages {
var message anthropic.MessageParam
switch msg.Role {

View File

@@ -0,0 +1,65 @@
package anthropic
import (
"testing"
)
// Test generated using Keploy
func TestNewClient_DefaultInitialization(t *testing.T) {
client := NewClient()
if client == nil {
t.Fatal("Expected client to be initialized, got nil")
}
if client.ApiBaseURL.Value != defaultBaseUrl {
t.Errorf("Expected default API Base URL to be %s, got %s", defaultBaseUrl, client.ApiBaseURL.Value)
}
if client.maxTokens != 4096 {
t.Errorf("Expected default maxTokens to be 4096, got %d", client.maxTokens)
}
if len(client.models) == 0 {
t.Error("Expected models to be initialized with default values, got empty list")
}
}
// Test generated using Keploy
func TestClientListModels(t *testing.T) {
client := NewClient()
models, err := client.ListModels()
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
if len(models) != len(client.models) {
t.Errorf("Expected %d models, got %d", len(client.models), len(models))
}
for i, model := range models {
if model != client.models[i] {
t.Errorf("Expected model at index %d to be %s, got %s", i, client.models[i], model)
}
}
}
func TestClient_ListModels_ReturnsCorrectModels(t *testing.T) {
client := NewClient()
models, err := client.ListModels()
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
if len(models) != len(client.models) {
t.Errorf("Expected %d models, got %d", len(client.models), len(models))
}
for i, model := range models {
if model != client.models[i] {
t.Errorf("Expected model %s at index %d, got %s", client.models[i], i, model)
}
}
}

View File

@@ -3,9 +3,8 @@ package azure
import (
"github.com/danielmiessler/fabric/plugins"
"github.com/danielmiessler/fabric/plugins/ai/openai"
"strings"
goopenai "github.com/sashabaranov/go-openai"
"strings"
)
func NewClient() (ret *Client) {
@@ -31,8 +30,6 @@ func (oi *Client) configure() (err error) {
}
func (oi *Client) ListModels() (ret []string, err error) {
// Azure Open Client has models and deployments. We need to get the deployments to use them for chat
// There is no easy way to get the deployments from the API, so we need to ask the user to provide them
ret = oi.apiDeployments
return
}

View File

@@ -0,0 +1,67 @@
package azure
import (
"testing"
)
// Test generated using Keploy
func TestNewClientInitialization(t *testing.T) {
client := NewClient()
if client == nil {
t.Fatalf("Expected non-nil client, got nil")
}
if client.ApiDeployments == nil {
t.Errorf("Expected ApiDeployments to be initialized, got nil")
}
if client.Client == nil {
t.Errorf("Expected Client to be initialized, got nil")
}
}
// Test generated using Keploy
func TestClientConfigure(t *testing.T) {
client := NewClient()
client.ApiDeployments.Value = "deployment1,deployment2"
client.ApiKey.Value = "test-api-key"
client.ApiBaseURL.Value = "https://example.com"
err := client.configure()
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
expectedDeployments := []string{"deployment1", "deployment2"}
if len(client.apiDeployments) != len(expectedDeployments) {
t.Errorf("Expected %d deployments, got %d", len(expectedDeployments), len(client.apiDeployments))
}
for i, deployment := range expectedDeployments {
if client.apiDeployments[i] != deployment {
t.Errorf("Expected deployment %s, got %s", deployment, client.apiDeployments[i])
}
}
if client.ApiClient == nil {
t.Errorf("Expected ApiClient to be initialized, got nil")
}
}
// Test generated using Keploy
func TestListModels(t *testing.T) {
client := NewClient()
client.apiDeployments = []string{"deployment1", "deployment2"}
models, err := client.ListModels()
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
expectedModels := []string{"deployment1", "deployment2"}
if len(models) != len(expectedModels) {
t.Errorf("Expected %d models, got %d", len(expectedModels), len(models))
}
for i, model := range expectedModels {
if models[i] != model {
t.Errorf("Expected model %s, got %s", model, models[i])
}
}
}

View File

@@ -0,0 +1,15 @@
package deepseek
import (
"github.com/danielmiessler/fabric/plugins/ai/openai"
)
func NewClient() (ret *Client) {
ret = &Client{}
ret.Client = openai.NewClientCompatible("DeepSeek", "https://api.deepseek.com", nil)
return
}
type Client struct {
*openai.Client
}

View File

@@ -0,0 +1,13 @@
package deepseek
// Test generated using Keploy
import (
"testing"
)
func TestNewClient_EmbeddedClientNotNil(t *testing.T) {
client := NewClient()
if client.Client == nil {
t.Fatalf("Expected embedded openai.Client to be non-nil, got nil")
}
}

View File

@@ -0,0 +1,55 @@
package dryrun
import (
"github.com/danielmiessler/fabric/common"
"github.com/sashabaranov/go-openai"
"reflect"
"testing"
)
// Test generated using Keploy
func TestListModels_ReturnsExpectedModel(t *testing.T) {
client := NewClient()
models, err := client.ListModels()
if err != nil {
t.Fatalf("Expected no error, got %v", err)
}
expected := []string{"dry-run-model"}
if !reflect.DeepEqual(models, expected) {
t.Errorf("Expected %v, got %v", expected, models)
}
}
// Test generated using Keploy
func TestSetup_ReturnsNil(t *testing.T) {
client := NewClient()
err := client.Setup()
if err != nil {
t.Errorf("Expected nil error, got %v", err)
}
}
// Test generated using Keploy
func TestSendStream_SendsMessages(t *testing.T) {
client := NewClient()
msgs := []*openai.ChatCompletionMessage{
{Role: "user", Content: "Test message"},
}
opts := &common.ChatOptions{
Model: "dry-run-model",
}
channel := make(chan string)
go func() {
err := client.SendStream(msgs, opts, channel)
if err != nil {
t.Errorf("Expected no error, got %v", err)
}
}()
var receivedMessages []string
for msg := range channel {
receivedMessages = append(receivedMessages, msg)
}
if len(receivedMessages) == 0 {
t.Errorf("Expected to receive messages, but got none")
}
}

View File

@@ -0,0 +1,43 @@
package gemini
import (
"github.com/google/generative-ai-go/genai"
"testing"
)
// Test generated using Keploy
func TestBuildModelNameSimple(t *testing.T) {
client := &Client{}
fullModelName := "models/chat-bison-001"
expected := "chat-bison-001"
result := client.buildModelNameSimple(fullModelName)
if result != expected {
t.Errorf("Expected %v, got %v", expected, result)
}
}
// Test generated using Keploy
func TestExtractText(t *testing.T) {
client := &Client{}
response := &genai.GenerateContentResponse{
Candidates: []*genai.Candidate{
{
Content: &genai.Content{
Parts: []genai.Part{
genai.Text("Hello, "),
genai.Text("world!"),
},
},
},
},
}
expected := "Hello, world!"
result := client.extractText(response)
if result != expected {
t.Errorf("Expected %v, got %v", expected, result)
}
}

View File

@@ -4,12 +4,14 @@ import (
"github.com/danielmiessler/fabric/plugins/ai/openai"
)
// NewClient initializes and returns a new Groq Client.
func NewClient() (ret *Client) {
ret = &Client{}
ret.Client = openai.NewClientCompatible("Groq", "https://api.groq.com/openai/v1", nil)
return
}
// Client wraps the openai.Client to provide additional functionality specific to Groq.
type Client struct {
*openai.Client
}

View File

@@ -0,0 +1,13 @@
package groq
// Test generated using Keploy
import (
"testing"
)
func TestNewClientEmbeddedClientNotNil(t *testing.T) {
client := NewClient()
if client.Client == nil {
t.Fatalf("Expected embedded openai.Client to be non-nil, got nil")
}
}

View File

@@ -0,0 +1,358 @@
package lmstudio
import (
"bufio"
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"net/http"
goopenai "github.com/sashabaranov/go-openai"
"github.com/danielmiessler/fabric/common"
"github.com/danielmiessler/fabric/plugins"
)
// NewClient creates a new LM Studio client with default configuration.
func NewClient() (ret *Client) {
return NewClientCompatible("LM Studio", "http://localhost:1234/v1", nil)
}
// NewClientCompatible creates a new LM Studio client with custom configuration.
func NewClientCompatible(vendorName string, defaultBaseUrl string, configureCustom func() error) (ret *Client) {
ret = &Client{}
if configureCustom == nil {
configureCustom = ret.configure
}
ret.PluginBase = &plugins.PluginBase{
Name: vendorName,
EnvNamePrefix: plugins.BuildEnvVariablePrefix(vendorName),
ConfigureCustom: configureCustom,
}
ret.ApiBaseURL = ret.AddSetupQuestion("API Base URL", false)
ret.ApiBaseURL.Value = defaultBaseUrl
return
}
// Client represents the LM Studio client.
type Client struct {
*plugins.PluginBase
ApiBaseURL *plugins.SetupQuestion
HttpClient *http.Client
}
// configure sets up the HTTP client.
func (c *Client) configure() error {
c.HttpClient = &http.Client{}
return nil
}
// Configure sets up the client configuration.
func (c *Client) Configure() error {
return c.ConfigureCustom()
}
// ListModels returns a list of available models.
func (c *Client) ListModels() ([]string, error) {
url := fmt.Sprintf("%s/models", c.ApiBaseURL.Value)
req, err := http.NewRequest("GET", url, nil)
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
resp, err := c.HttpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
var result struct {
Data []struct {
ID string `json:"id"`
} `json:"data"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
models := make([]string, len(result.Data))
for i, model := range result.Data {
models[i] = model.ID
}
return models, nil
}
// // SendStream sends a stream of messages (not implemented for LM Studio).
// func (c *Client) SendStream(msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions, channel chan string) error {
// return fmt.Errorf("streaming is not currently supported for LM Studio")
// }
func (c *Client) SendStream(msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions, channel chan string) error {
url := fmt.Sprintf("%s/chat/completions", c.ApiBaseURL.Value)
payload := map[string]interface{}{
"messages": msgs,
"model": opts.Model,
"stream": true, // Enable streaming
}
jsonPayload, err := json.Marshal(payload)
if err != nil {
return fmt.Errorf("failed to marshal payload: %w", err)
}
req, err := http.NewRequest("POST", url, bytes.NewBuffer(jsonPayload))
if err != nil {
return fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.HttpClient.Do(req)
if err != nil {
return fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
// Close channel when function exits
defer close(channel)
reader := bufio.NewReader(resp.Body)
for {
line, err := reader.ReadBytes('\n')
if err != nil {
if err == io.EOF {
break
}
return fmt.Errorf("error reading response: %w", err)
}
// Ignore empty lines
if len(line) == 0 {
continue
}
// Remove OpenAI-style prefix
if bytes.HasPrefix(line, []byte("data: ")) {
line = bytes.TrimPrefix(line, []byte("data: "))
}
// Handle [DONE] signal
if string(line) == "[DONE]" {
break
}
// Parse JSON response
var result map[string]interface{}
if err := json.Unmarshal(line, &result); err != nil {
continue
}
// Extract content from streaming chunks
choices, ok := result["choices"].([]interface{})
if !ok || len(choices) == 0 {
continue
}
delta, ok := choices[0].(map[string]interface{})["delta"].(map[string]interface{})
if !ok {
continue
}
content, _ := delta["content"].(string)
// Send data to channel
channel <- content
}
return nil
}
// Send sends a single message and returns the response.
func (c *Client) Send(ctx context.Context, msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions) (string, error) {
url := fmt.Sprintf("%s/chat/completions", c.ApiBaseURL.Value)
payload := map[string]interface{}{
"messages": msgs,
"model": opts.Model,
// Add other options from opts if supported by LM Studio
}
jsonPayload, err := json.Marshal(payload)
if err != nil {
return "", fmt.Errorf("failed to marshal payload: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonPayload))
if err != nil {
return "", fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.HttpClient.Do(req)
if err != nil {
return "", fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
var result map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return "", fmt.Errorf("failed to decode response: %w", err)
}
choices, ok := result["choices"].([]interface{})
if !ok || len(choices) == 0 {
return "", fmt.Errorf("invalid response format: missing or empty choices")
}
message, ok := choices[0].(map[string]interface{})["message"].(map[string]interface{})
if !ok {
return "", fmt.Errorf("invalid response format: missing message in first choice")
}
content, ok := message["content"].(string)
if !ok {
return "", fmt.Errorf("invalid response format: missing or non-string content in message")
}
return content, nil
}
// Complete sends a completion request and returns the response.
func (c *Client) Complete(ctx context.Context, prompt string, opts *common.ChatOptions) (string, error) {
url := fmt.Sprintf("%s/completions", c.ApiBaseURL.Value)
payload := map[string]interface{}{
"prompt": prompt,
"model": opts.Model,
// Add other options from opts if supported by LM Studio
}
jsonPayload, err := json.Marshal(payload)
if err != nil {
return "", fmt.Errorf("failed to marshal payload: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonPayload))
if err != nil {
return "", fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.HttpClient.Do(req)
if err != nil {
return "", fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return "", fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
var result map[string]interface{}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return "", fmt.Errorf("failed to decode response: %w", err)
}
choices, ok := result["choices"].([]interface{})
if !ok || len(choices) == 0 {
return "", fmt.Errorf("invalid response format: missing or empty choices")
}
text, ok := choices[0].(map[string]interface{})["text"].(string)
if !ok {
return "", fmt.Errorf("invalid response format: missing or non-string text in first choice")
}
return text, nil
}
// GetEmbeddings returns embeddings for the given input.
func (c *Client) GetEmbeddings(ctx context.Context, input string, opts *common.ChatOptions) ([]float64, error) {
url := fmt.Sprintf("%s/embeddings", c.ApiBaseURL.Value)
payload := map[string]interface{}{
"input": input,
"model": opts.Model,
// Add other options from opts if supported by LM Studio
}
jsonPayload, err := json.Marshal(payload)
if err != nil {
return nil, fmt.Errorf("failed to marshal payload: %w", err)
}
req, err := http.NewRequestWithContext(ctx, "POST", url, bytes.NewBuffer(jsonPayload))
if err != nil {
return nil, fmt.Errorf("failed to create request: %w", err)
}
req.Header.Set("Content-Type", "application/json")
resp, err := c.HttpClient.Do(req)
if err != nil {
return nil, fmt.Errorf("failed to send request: %w", err)
}
defer resp.Body.Close()
if resp.StatusCode != http.StatusOK {
return nil, fmt.Errorf("unexpected status code: %d", resp.StatusCode)
}
var result struct {
Data []struct {
Embedding []float64 `json:"embedding"`
} `json:"data"`
}
if err := json.NewDecoder(resp.Body).Decode(&result); err != nil {
return nil, fmt.Errorf("failed to decode response: %w", err)
}
if len(result.Data) == 0 {
return nil, fmt.Errorf("no embeddings returned")
}
return result.Data[0].Embedding, nil
}
// GetName returns the name of the vendor.
func (c *Client) GetName() string {
return c.Name
}
// IsConfigured checks if the client is configured.
func (c *Client) IsConfigured() bool {
return c.ApiBaseURL != nil && c.ApiBaseURL.Value != ""
}
// Setup performs any necessary setup for the client.
func (c *Client) Setup() error {
return c.Configure()
}
// SetupFillEnvFileContent fills the environment file content.
func (c *Client) SetupFillEnvFileContent(buffer *bytes.Buffer) {
envName := fmt.Sprintf("%s_API_BASE_URL", c.EnvNamePrefix)
buffer.WriteString(fmt.Sprintf("%s=%s\n", envName, c.ApiBaseURL.Value))
}

View File

@@ -0,0 +1 @@
package openrouter

View File

@@ -107,7 +107,7 @@ func (o *PatternsEntity) getFromDB(name string) (ret *Pattern, err error) {
func (o *PatternsEntity) PrintLatestPatterns(latestNumber int) (err error) {
var contents []byte
if contents, err = os.ReadFile(o.UniquePatternsFilePath); err != nil {
err = fmt.Errorf("could not read unique patterns file. Pleas run --updatepatterns (%s)", err)
err = fmt.Errorf("could not read unique patterns file. Please run --updatepatterns (%s)", err)
return
}
uniquePatterns := strings.Split(string(contents), "\n")

View File

@@ -1,3 +1,23 @@
// to_pdf
//
// Usage:
// [no args] Read from stdin, write to output.pdf
// <file.tex> Read from .tex file, write to <file>.pdf
// <output.pdf> Read stdin, write to specified PDF
// <output> Read stdin, write to <output>.pdf
// <input> <output> Read input (.tex appended if needed), write to output.pdf
//
// Examples:
// to_pdf # stdin -> output.pdf
// to_pdf doc.tex # doc.tex -> doc.pdf
// to_pdf report # stdin -> report.pdf
// to_pdf chap.tex out/ # Creates out/chap.pdf
//
// Error handling:
// - Validates pdflatex installation
// - Creates missing directories
// - Cleans temp files on exit
package main
import (
@@ -9,23 +29,98 @@ import (
"strings"
)
// hasSuffix checks if a string ends with the given suffix, case-insensitive.
func hasSuffix(s, suffix string) bool {
return strings.HasSuffix(strings.ToLower(s), strings.ToLower(suffix))
}
// resolveInputFile attempts to open the input file.
// If tryAppendTex is true and the initial attempt fails, it appends ".tex" and retries.
func resolveInputFile(filename string, tryAppendTex bool) (io.ReadCloser, string) {
file, err := os.Open(filename)
if err == nil {
return file, filename
}
if tryAppendTex {
newFilename := filename + ".tex"
file, err = os.Open(newFilename)
if err == nil {
return file, newFilename
}
}
return nil, ""
}
func main() {
var input io.Reader
var outputFile string
if len(os.Args) > 1 {
// File input mode
file, err := os.Open(os.Args[1])
if err != nil {
fmt.Fprintf(os.Stderr, "Error opening file: %v\n", err)
args := os.Args
argCount := len(args) - 1 // excluding the program name
switch argCount {
case 0:
// Case 1: No arguments
input = os.Stdin
outputFile = "output.pdf"
case 1:
// Case 2: One argument
arg := args[1]
if hasSuffix(arg, ".tex") {
// Case 2a: Argument ends with .tex
file, actualName := resolveInputFile(arg, false)
if file == nil {
fmt.Fprintf(os.Stderr, "Error opening file: %s\n", arg)
os.Exit(1)
}
defer file.Close()
input = file
// Derive output file name by replacing .tex with .pdf
ext := filepath.Ext(actualName)
outputFile = strings.TrimSuffix(actualName, ext) + ".pdf"
} else if hasSuffix(arg, ".pdf") {
// Case 2b: Argument ends with .pdf
input = os.Stdin
outputFile = arg
} else {
// Case 2c: Argument without .pdf
input = os.Stdin
outputFile = arg + ".pdf"
}
case 2:
// Case 3: Two arguments
inputArg := args[1]
outputArg := args[2]
// Resolve input file, ignore actualName
file, _ := resolveInputFile(inputArg, true)
if file == nil {
fmt.Fprintf(os.Stderr, "Error: Input file '%s' not found, even after appending '.tex'.\n", inputArg)
os.Exit(1)
}
defer file.Close()
input = file
outputFile = strings.TrimSuffix(os.Args[1], filepath.Ext(os.Args[1])) + ".pdf"
} else {
// Stdin mode
input = os.Stdin
outputFile = "output.pdf"
// Resolve output file
if hasSuffix(outputArg, ".pdf") {
outputFile = outputArg
} else {
outputFile = outputArg + ".pdf"
}
default:
fmt.Fprintf(os.Stderr, "Usage:\n")
fmt.Fprintf(os.Stderr, " %s # Read from stdin, output to 'output.pdf'\n", args[0])
fmt.Fprintf(os.Stderr, " %s <file.tex> # Read from 'file.tex', output to 'file.pdf'\n", args[0])
fmt.Fprintf(os.Stderr, " %s <output.pdf> # Read from stdin, output to 'output.pdf'\n", args[0])
fmt.Fprintf(os.Stderr, " %s <output> # Read from stdin, output to '<output>.pdf'\n", args[0])
fmt.Fprintf(os.Stderr, " %s <input> <output># Read from 'input' (tries 'input.tex'), output to 'output.pdf'\n", args[0])
os.Exit(1)
}
// Check if pdflatex is installed
@@ -44,7 +139,8 @@ func main() {
defer os.RemoveAll(tmpDir)
// Create a temporary .tex file
tmpFile, err := os.Create(filepath.Join(tmpDir, "input.tex"))
tmpFilePath := filepath.Join(tmpDir, "input.tex")
tmpFile, err := os.Create(tmpFilePath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error creating temporary file: %v\n", err)
os.Exit(1)
@@ -54,12 +150,13 @@ func main() {
_, err = io.Copy(tmpFile, input)
if err != nil {
fmt.Fprintf(os.Stderr, "Error writing to temporary file: %v\n", err)
tmpFile.Close()
os.Exit(1)
}
tmpFile.Close()
// Run pdflatex with nonstopmode
cmd := exec.Command("pdflatex", "-interaction=nonstopmode", "-output-directory", tmpDir, tmpFile.Name())
cmd := exec.Command("pdflatex", "-interaction=nonstopmode", "-output-directory", tmpDir, "input.tex")
output, err := cmd.CombinedOutput()
if err != nil {
fmt.Fprintf(os.Stderr, "Error running pdflatex: %v\n", err)
@@ -75,43 +172,25 @@ func main() {
os.Exit(1)
}
// Move the output PDF to the current directory
// Move the output PDF to the desired location
err = copyFile(pdfPath, outputFile)
if err != nil {
fmt.Fprintf(os.Stderr, "Error moving output file: %v\n", err)
os.Exit(1)
}
// Remove the original file after copying
// Remove the generated PDF from the temporary directory
err = os.Remove(pdfPath)
if err != nil {
fmt.Fprintf(os.Stderr, "Error cleaning up temporary file: %v\n", err)
os.Exit(1)
// Not exiting as the main process succeeded
}
// Clean up temporary files
cleanupTempFiles(tmpDir)
fmt.Printf("PDF created: %s\n", outputFile)
}
func cleanupTempFiles(dir string) {
extensions := []string{".aux", ".log", ".out", ".toc", ".lof", ".lot", ".bbl", ".blg"}
for _, ext := range extensions {
files, err := filepath.Glob(filepath.Join(dir, "*"+ext))
if err != nil {
fmt.Fprintf(os.Stderr, "Error finding %s files: %v\n", ext, err)
continue
}
for _, file := range files {
if err := os.Remove(file); err != nil {
fmt.Fprintf(os.Stderr, "Error removing file %s: %v\n", file, err)
}
}
}
}
// Copy a file from source src to destination dst
// copyFile copies a file from src to dst.
// If dst exists, it will be overwritten.
func copyFile(src, dst string) error {
sourceFile, err := os.Open(src)
if err != nil {
@@ -119,6 +198,13 @@ func copyFile(src, dst string) error {
}
defer sourceFile.Close()
// Ensure the destination directory exists
dstDir := filepath.Dir(dst)
err = os.MkdirAll(dstDir, 0755)
if err != nil {
return err
}
destFile, err := os.Create(dst)
if err != nil {
return err

View File

@@ -113,6 +113,45 @@ func (o *YouTube) GrabTranscript(videoId string, language string) (ret string, e
return
}
func (o *YouTube) GrabTranscriptWithTimestamps(videoId string, language string) (ret string, err error) {
var transcript string
if transcript, err = o.GrabTranscriptBase(videoId, language); err != nil {
err = fmt.Errorf("transcript not available. (%v)", err)
return
}
// Parse the XML transcript
doc := soup.HTMLParse(transcript)
// Extract the text content from the <text> tags
textTags := doc.FindAll("text")
var textBuilder strings.Builder
for _, textTag := range textTags {
// Extract the start and duration attributes
start := textTag.Attrs()["start"]
dur := textTag.Attrs()["dur"]
end := fmt.Sprintf("%f", parseFloat(start)+parseFloat(dur))
// Format the timestamps
startFormatted := formatTimestamp(parseFloat(start))
endFormatted := formatTimestamp(parseFloat(end))
text := strings.ReplaceAll(textTag.Text(), "&#39;", "'")
textBuilder.WriteString(fmt.Sprintf("[%s - %s] %s\n", startFormatted, endFormatted, text))
}
ret = textBuilder.String()
return
}
func parseFloat(s string) float64 {
f, _ := strconv.ParseFloat(s, 64)
return f
}
func formatTimestamp(seconds float64) string {
hours := int(seconds) / 3600
minutes := (int(seconds) % 3600) / 60
secs := int(seconds) % 60
return fmt.Sprintf("%02d:%02d:%02d", hours, minutes, secs)
}
func (o *YouTube) GrabTranscriptBase(videoId string, language string) (ret string, err error) {
if err = o.initService(); err != nil {
return
@@ -265,6 +304,13 @@ func (o *YouTube) Grab(url string, options *Options) (ret *VideoInfo, err error)
return
}
}
if options.TranscriptWithTimestamps {
if ret.Transcript, err = o.GrabTranscriptWithTimestamps(videoId, "en"); err != nil {
return
}
}
return
}
@@ -372,11 +418,12 @@ type VideoMeta struct {
}
type Options struct {
Duration bool
Transcript bool
Comments bool
Lang string
Metadata bool
Duration bool
Transcript bool
TranscriptWithTimestamps bool
Comments bool
Lang string
Metadata bool
}
type VideoInfo struct {
@@ -437,6 +484,7 @@ func (o *YouTube) GrabByFlags() (ret *VideoInfo, err error) {
options := &Options{}
flag.BoolVar(&options.Duration, "duration", false, "Output only the duration")
flag.BoolVar(&options.Transcript, "transcript", false, "Output only the transcript")
flag.BoolVar(&options.TranscriptWithTimestamps, "transcriptWithTimestamps", false, "Output only the transcript with timestamps")
flag.BoolVar(&options.Comments, "comments", false, "Output the comments on the video")
flag.StringVar(&options.Lang, "lang", "en", "Language for the transcript (default: English)")
flag.BoolVar(&options.Metadata, "metadata", false, "Output video metadata")

View File

@@ -191,6 +191,26 @@ func writeSSEResponse(w gin.ResponseWriter, response StreamResponse) error {
return nil
}
/*
func detectFormat(content string) string {
if strings.HasPrefix(content, "graph TD") ||
strings.HasPrefix(content, "gantt") ||
strings.HasPrefix(content, "flowchart") ||
strings.HasPrefix(content, "sequenceDiagram") ||
strings.HasPrefix(content, "classDiagram") ||
strings.HasPrefix(content, "stateDiagram") {
return "mermaid"
}
if strings.Contains(content, "```") ||
strings.Contains(content, "#") ||
strings.Contains(content, "*") ||
strings.Contains(content, "_") ||
strings.Contains(content, "-") {
return "markdown"
}
return "plain"
}
*/
func detectFormat(content string) string {
if strings.HasPrefix(content, "graph TD") ||
strings.HasPrefix(content, "gantt") ||
@@ -200,12 +220,5 @@ func detectFormat(content string) string {
strings.HasPrefix(content, "stateDiagram") {
return "mermaid"
}
if strings.Contains(content, "```") ||
strings.Contains(content, "#") ||
strings.Contains(content, "*") ||
strings.Contains(content, "_") ||
strings.Contains(content, "-") {
return "markdown"
}
return "plain"
return "markdown"
}

View File

@@ -44,6 +44,7 @@ func (h *ConfigHandler) GetConfig(c *gin.Context) {
"ollama": "",
"openrouter": "",
"silicon": "",
"deepseek": "",
})
return
}
@@ -63,6 +64,8 @@ func (h *ConfigHandler) GetConfig(c *gin.Context) {
"ollama": os.Getenv("OLLAMA_URL"),
"openrouter": os.Getenv("OPENROUTER_API_KEY"),
"silicon": os.Getenv("SILICON_API_KEY"),
"deepseek": os.Getenv("DEEPSEEK_API_KEY"),
"lmstudio": os.Getenv("LM_STUDIO_API_BASE_URL"),
}
c.JSON(http.StatusOK, config)
@@ -83,6 +86,8 @@ func (h *ConfigHandler) UpdateConfig(c *gin.Context) {
OllamaURL string `json:"ollama_url"`
OpenRouterApiKey string `json:"openrouter_api_key"`
SiliconApiKey string `json:"silicon_api_key"`
DeepSeekApiKey string `json:"deepseek_api_key"`
LMStudioURL string `json:"lm_studio_base_url"`
}
if err := c.BindJSON(&config); err != nil {
@@ -91,14 +96,16 @@ func (h *ConfigHandler) UpdateConfig(c *gin.Context) {
}
envVars := map[string]string{
"OPENAI_API_KEY": config.OpenAIApiKey,
"ANTHROPIC_API_KEY": config.AnthropicApiKey,
"GROQ_API_KEY": config.GroqApiKey,
"MISTRAL_API_KEY": config.MistralApiKey,
"GEMINI_API_KEY": config.GeminiApiKey,
"OLLAMA_URL": config.OllamaURL,
"OPENROUTER_API_KEY": config.OpenRouterApiKey,
"SILICON_API_KEY": config.SiliconApiKey,
"OPENAI_API_KEY": config.OpenAIApiKey,
"ANTHROPIC_API_KEY": config.AnthropicApiKey,
"GROQ_API_KEY": config.GroqApiKey,
"MISTRAL_API_KEY": config.MistralApiKey,
"GEMINI_API_KEY": config.GeminiApiKey,
"OLLAMA_URL": config.OllamaURL,
"OPENROUTER_API_KEY": config.OpenRouterApiKey,
"SILICON_API_KEY": config.SiliconApiKey,
"DEEPSEEK_API_KEY": config.DeepSeekApiKey,
"LM_STUDIO_API_BASE_URL": config.LMStudioURL,
}
var envContent strings.Builder

View File

@@ -1,3 +1,3 @@
package main
var version = "v1.4.129"
var version = "v1.4.136"

View File

@@ -1,13 +1,25 @@
## The Fabric Web App
# The Fabric Web App
[Installing](#Installing)|[Todos](#Todos)|[Collaborators](#Collaborators)
This is the web app for Fabric. It is built using [Svelte](https://svelte.dev/), [SkeletonUI](https://skeleton.dev/), and [Mdsvex](https://mdsvex.pngwn.io/).
This is a web app for Fabric. It was built using [Svelte](https://svelte.dev/), [SkeletonUI](https://skeleton.dev/), and [Mdsvex](https://mdsvex.pngwn.io/).
The goal of this app is to not only provide a user interface for Fabric, but also a out-of-the-box website for those who want to get started with web development or blogging. The tech stack is minimal and (I hope) the code is easy to read and understand. One thing I kept in mind when making this app was to make it easy for beginners to get started with web development. You can use this app as a GUI interface for Fabric, a ready to go blog-site, or a website template for your own projects. I hope you find it useful!
The goal of this app is to not only provide a user interface for Fabric, but also a out-of-the-box website for those who want to get started with web development, blogging, or to just have a web interface for fabric. You can use this app as a GUI interface for Fabric, a ready to go blog-site, or a website template for your own projects.
![Preview](static/image.png)
![Preview](/fabric-png.png)
## Installing
The app can be run by navigating to the `web` directory and using `npm install`, `pnpm install`, or your preferred package manager. Then simply run `npm run dev`, `pnpm run dev`, or your equivalent command to start the app. *You will need to run fabric in a separate terminal with the `fabric --serve` command.*
## Tips
When creating new posts make sure to include a date, description, tags, and aliases. Only a date is needed to display a note.
You can include images, tags to other articles, code blocks, and more all within your markdown files.
### If you choose to use Obsidian along side ths app
You can design and order your vault however you like, though a `posts` folder should be kept in your vault to house any articles you'd like to post.
### Installing
It can be installed by navigating to the `web` directory and using `npm install`, `pnpm install`, or your favorite package manager. Then simply run `npm run dev` or your equivalent command to start the app. *You will need to run fabric in a seperate terminal with the `fabric --serve` command.*

View File

@@ -1,21 +0,0 @@
# Security Policy
## Supported Versions
Use this section to tell people about which versions of your project are
currently being supported with security updates.
| Version | Supported |
| ------- | ------------------ |
| 5.1.x | :white_check_mark: |
| 5.0.x | :x: |
| 4.0.x | :white_check_mark: |
| < 4.0 | :x: |
## Reporting a Vulnerability
Use this section to tell people how to report a vulnerability.
Tell them where to go, how often they can expect to get an update on a
reported vulnerability, what to expect if the vulnerability is accepted or
declined, etc.

4
web/markdown.d.ts vendored
View File

@@ -1,4 +0,0 @@
/* declare module '*.md' {
const component: import('svelte').ComponentType;
export default component;
} */

View File

View File

7397
web/package-lock.json generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,5 +1,5 @@
{
"name": "terminal-blog",
"name": "fabric",
"version": "0.0.1",
"private": true,
"scripts": {
@@ -13,22 +13,31 @@
"format": "prettier --write ."
},
"devDependencies": {
"@eslint/js": "^9.17.0",
"@skeletonlabs/skeleton": "^2.8.0",
"@skeletonlabs/tw-plugin": "^0.3.1",
"@sveltejs/adapter-auto": "^3.0.0",
"@sveltejs/kit": "^2.8.4",
"@sveltejs/kit": "^2.9.0",
"@sveltejs/vite-plugin-svelte": "^3.0.0",
"@tailwindcss/forms": "^0.5.7",
"@tailwindcss/typography": "^0.5.10",
"@types/node": "^20.10.0",
"autoprefixer": "^10.4.16",
"eslint-plugin-svelte": "^2.46.1",
"lucide-svelte": "^0.309.0",
"mdsvex": "^0.11.0",
"postcss": "^8.4.32",
"postcss-load-config": "^5.0.2",
"mdsvex": "^0.11.2",
"postcss": "^8.4.49",
"postcss-load-config": "^6.0.1",
"rehype-autolink-headings": "^7.1.0",
"rehype-slug": "^6.0.0",
"shiki": "^1.24.3",
"svelte": "^4.2.7",
"svelte-check": "^3.6.0",
"svelte-inview": "^4.0.4",
"svelte-markdown": "^0.4.1",
"svelte-reveal": "^1.1.0",
"svelte-youtube-embed": "^0.3.3",
"svelte-youtube-lite": "^0.6.2",
"tailwindcss": "^3.3.6",
"typescript": "^5.0.0",
"vite": "^5.0.3",
@@ -42,12 +51,30 @@
"date-fns": "^4.1.0",
"highlight.js": "^11.10.0",
"marked": "^15.0.1",
"nanoid": "4.0.2",
"rehype": "^13.0.2",
"rehype-autolink-headings": "^7.1.0",
"rehype-external-links": "^3.0.0",
"rehype-slug": "^6.0.0",
"rehype-unwrap-images": "^1.0.0",
"tailwind-merge": "^2.5.4",
"vfile-message": "^4.0.2",
"yaml": "^2.6.1",
"youtube-transcript": "^1.2.1"
},
"pnpm": {
"overrides": {
"tunnel-agent@<0.6.0": ">=0.6.0",
"qs@<6.0.4": ">=6.0.4",
"qs@<1.0.0": ">=1.0.0",
"hawk@<3.1.3": ">=3.1.3",
"http-signature@<0.10.0": ">=0.10.0",
"request@>=2.2.6 <2.47.0": ">=2.68.0",
"mime@<1.4.1": ">=1.4.1",
"hoek@<4.2.1": ">=4.2.1",
"hawk@<9.0.1": ">=9.0.1",
"qs@<6.2.4": ">=6.2.4",
"cookie@<0.7.0": ">=0.7.0",
"tough-cookie@<4.1.3": ">=4.1.3",
"nanoid@<3.3.8": ">=3.3.8"
}
}
}

1338
web/pnpm-lock.yaml generated

File diff suppressed because it is too large Load Diff

View File

@@ -1,22 +1,22 @@
import svelte from 'rollup-plugin-svelte';
import resolve from '@rollup/plugin-node-resolve';
export default {
input: 'src/+page.js',
output: {
file: 'public/bundle.js',
format: 'iife',
name: 'app'
},
plugins: [
svelte({
// svelte options
extensions: [".svelte", ".svx", ".md"],
preprocess: mdsvex()
}),
resolve({
browser: true,
dedupe: ['svelte']
})
]
};
//import svelte from 'rollup-plugin-svelte';
//import resolve from '@rollup/plugin-node-resolve';
//
//export default {
// input: 'src/+page.js',
// output: {
// file: 'public/bundle.js',
// format: 'iife',
// name: 'app'
// },
// plugins: [
// svelte({
// // svelte options
// extensions: [".svelte", ".svx", ".md"],
// preprocess: mdsvex()
// }),
// resolve({
// browser: true,
// dedupe: ['svelte']
// })
// ]
//};

2
web/src/app.d.ts vendored
View File

@@ -3,7 +3,7 @@
// and what to do when importing types
declare namespace App {
// interface Locals {}
interface PageData {}
// interface PageData {}
// interface Error {}
// interface Platform {}
}

View File

@@ -1,12 +1,10 @@
<!DOCTYPE html>
<html lang="en" class="dark">
<head>
<meta charset="utf-8" />
<link rel="icon" href="%sveltekit.assets%/favicon.png" />
<meta name="viewport" content="width=device-width" />
%sveltekit.head%
</head>
<body data-sveltekit-preload-data="hover" data-theme="my-custom-theme">
<div style="display: contents" class="h-full overflow-hidden">%sveltekit.body%</div>
</body>
</html>
<head>
<meta charset="utf-8" />
<link rel="icon" href="%sveltekit.assets%/favicon.png" />
<meta name="viewport" content="width=device-width" />%sveltekit.head%</head>
<body data-sveltekit-preload-data="hover" data-theme="rocket">
<div style="display: contents" class="h-full overflow-hidden">%sveltekit.body%</div>
</body>
</html>

View File

@@ -0,0 +1,15 @@
export function clickOutside(node: HTMLElement, handler: () => void) {
const handleClick = (event: MouseEvent) => {
if (node && !node.contains(event.target as Node) && !event.defaultPrevented) {
handler();
}
};
document.addEventListener('click', handleClick, true);
return {
destroy() {
document.removeEventListener('click', handleClick, true);
}
};
}

98
web/src/lib/api/base.ts Normal file
View File

@@ -0,0 +1,98 @@
import type { StorageEntity } from '$lib/interfaces/storage-interface';
interface APIErrorResponse {
error: string;
}
interface APIResponse<T> {
data?: T;
error?: string;
}
// Define and export the base api object
export const api = {
async fetch<T>(endpoint: string, options: RequestInit = {}): Promise<APIResponse<T>> {
const response = await fetch(`/api${endpoint}`, {
...options,
headers: {
'Content-Type': 'application/json',
...options.headers,
},
});
if (!response.ok) {
return { error: (await response.json() as APIErrorResponse).error || response.statusText };
}
return { data: await response.json() as T };
},
get: <T>(endpoint: string) => api.fetch<T>(endpoint),
post: <T>(endpoint: string, data: unknown) => api.fetch<T>(endpoint, { method: 'POST', body: JSON.stringify(data) }),
put: <T>(endpoint: string, data?: unknown) => api.fetch<T>(endpoint, { method: 'PUT', body: data ? JSON.stringify(data) : undefined }),
delete: <T>(endpoint: string) => api.fetch<T>(endpoint, { method: 'DELETE' }),
stream: async function* (endpoint: string, data: unknown): AsyncGenerator<string> {
const response = await fetch(`/api${endpoint}`, {
method: 'POST',
headers: { 'Content-Type': 'application/json' },
body: JSON.stringify(data),
});
if (!response.ok) {
throw new Error(`HTTP error! status: ${response.status}`);
}
const reader = response.body?.getReader();
if (!reader) throw new Error('Response body is null');
const decoder = new TextDecoder();
while (true) {
const { done, value } = await reader.read();
yield decoder.decode(value);
if (done) break;
}
}
};
export function createStorageAPI<T extends StorageEntity>(entityType: string) {
return {
async get(name: string): Promise<T> {
const response = await api.fetch<T>(`/${entityType}/${name}`);
if (response.error) throw new Error(response.error);
return response.data as T;
},
async getNames(): Promise<string[]> {
const response = await api.fetch<string[]>(`/${entityType}/names`);
if (response.error) throw new Error(response.error);
return response.data as [];
},
async delete(name: string): Promise<void> {
const response = await api.fetch(`/${entityType}/${name}`, { method: 'DELETE' });
if (response.error) throw new Error(response.error);
},
async exists(name: string): Promise<boolean> {
const response = await api.fetch<boolean>(`/${entityType}/exists/${name}`);
if (response.error) throw new Error(response.error);
return response.data as boolean;
},
async rename(oldName: string, newName: string): Promise<void> {
const response = await api.fetch(`/${entityType}/rename/${oldName}/${newName}`, { method: 'PUT' });
if (response.error) throw new Error(response.error);
},
async save(name: string, content: string | object): Promise<void> {
const response = await api.fetch(`/${entityType}/${name}`, {
method: 'POST',
body: JSON.stringify(content),
headers: { 'Content-Type': 'application/json' },
});
if (response.error) throw new Error(response.error);
},
};
}

27
web/src/lib/api/config.ts Normal file
View File

@@ -0,0 +1,27 @@
import type { ModelConfig } from '$lib/interfaces/model-interface';
import { api } from './base';
const DEFAULT_CONFIG: Omit<ModelConfig, 'model'> = {
temperature: 0.7,
top_p: 0.9,
frequency: .5,
presence: 0,
maxLength: 2000
};
export const configApi = {
async get(): Promise<ModelConfig> {
try {
const response = await api.fetch<ModelConfig>('/config');
if (!response.data) {
return { ...DEFAULT_CONFIG, model: '' };
}
return response.data;
} catch (error) {
console.error('Failed to fetch config:', error);
throw error;
}
}
};

View File

@@ -0,0 +1,13 @@
import { api } from './base';
import type { Context } from '$lib/interfaces/context-interface';
export const contextAPI = {
async getAvailable(): Promise<Context[]> {
const response = await api.fetch<Context[]>('/contexts/names');
return response.data || [];
}
}
// TODO: add context element somewhere in the UI
// Should the file upload functionality be used as the context element?
// Or should there be another area to upload a context file?

24
web/src/lib/api/models.ts Normal file
View File

@@ -0,0 +1,24 @@
import { api } from './base';
import type { VendorModel, ModelsResponse } from '$lib/interfaces/model-interface';
export const modelsApi = {
async getAvailable(): Promise<VendorModel[]> {
try {
const response = await api.fetch<ModelsResponse>('/models/names');
if (!response.data?.vendors) {
throw new Error('Invalid response format: missing vendors data');
}
return Object.entries(response.data.vendors).flatMap(([vendor, models]) =>
models.map(model => ({
name: model,
vendor
}))
);
} catch (error) {
console.error("Failed to fetch models:", error);
throw error;
}
},
};

View File

@@ -0,0 +1,114 @@
<script lang="ts">
import ChatInput from "./ChatInput.svelte";
import ChatMessages from "./ChatMessages.svelte";
import ModelConfig from "./ModelConfig.svelte";
import DropdownGroup from "./DropdownGroup.svelte";
import NoteDrawer from "$lib/components/ui/noteDrawer/NoteDrawer.svelte";
import { Button } from "$lib/components/ui/button";
import { Input } from "$lib/components/ui/input";
import { Label } from "$lib/components/ui/label";
import { Checkbox } from "$lib/components/ui/checkbox";
import Tooltip from "$lib/components/ui/tooltip/Tooltip.svelte";
import { Textarea } from "$lib/components/ui/textarea";
import { obsidianSettings } from "$lib/store/obsidian-store";
import { featureFlags } from "$lib/config/features";
import { getDrawerStore } from '@skeletonlabs/skeleton';
import { systemPrompt, selectedPatternName } from "$lib/store/pattern-store";
const drawerStore = getDrawerStore();
function openDrawer() {
drawerStore.open({});
}
$: showObsidian = $featureFlags.enableObsidianIntegration;
</script>
<div class="flex gap-0 p-2 w-full h-screen">
<!-- Left Column -->
<aside class="w-[50%] flex flex-col gap-2 pr-2">
<!-- Dropdowns Group -->
<div class="bg-background/5 p-2 rounded-lg">
<div class="rounded-lg bg-background/10">
<DropdownGroup />
</div>
</div>
<!-- Model Config -->
<div class="bg-background/5 p-2 rounded-lg">
<ModelConfig />
</div>
<!-- Message Input -->
<div class="h-[200px] bg-background/5 rounded-lg overflow-hidden">
<ChatInput />
</div>
<!-- System Instructions -->
<div class="flex-1 min-h-0 bg-background/5 p-2 rounded-lg">
<div class="h-full flex flex-col">
<Textarea
bind:value={$systemPrompt}
readonly={true}
placeholder="System instructions will appear here when you select a pattern..."
class="w-full flex-1 bg-primary-800/30 rounded-lg border-none whitespace-pre-wrap overflow-y-auto resize-none text-sm scrollbar-thin scrollbar-thumb-white/10 scrollbar-track-transparent hover:scrollbar-thumb-white/20"
/>
</div>
</div>
</aside>
<!-- Right Column -->
<div class="flex flex-col w-[50%] gap-2">
<!-- Header with Obsidian Settings -->
<div class="flex items-center justify-between px-2 py-1">
<div class="flex items-center gap-2">
{#if showObsidian}
<div class="flex items-center gap-2">
<div class="flex items-center gap-1">
<Checkbox
bind:checked={$obsidianSettings.saveToObsidian}
id="save-to-obsidian"
class="h-3 w-3"
/>
<Label for="save-to-obsidian" class="text-xs text-white/70">Save to Obsidian</Label>
</div>
{#if $obsidianSettings.saveToObsidian}
<Input
id="note-name"
bind:value={$obsidianSettings.noteName}
placeholder="Note name..."
class="h-6 text-xs w-48 bg-white/5 border-none focus:ring-1 ring-white/20"
/>
{/if}
</div>
{/if}
</div>
<Button variant="ghost" size="sm" class="h-6 px-2 text-xs opacity-70 hover:opacity-100" on:click={openDrawer}>
<Tooltip text="Take Notes" position="left">
<span>Take Notes</span>
</Tooltip>
</Button>
</div>
<!-- Chat Area -->
<div class="flex-1 flex flex-col min-h-0">
<!-- Chat History -->
<div class="flex-1 min-h-0 bg-background/5 rounded-lg overflow-hidden">
<ChatMessages />
</div>
</div>
</div>
</div>
<NoteDrawer />
<style>
.loading-message {
animation: flash 1.5s ease-in-out infinite;
}
@keyframes flash {
0% { opacity: 1; }
50% { opacity: 0.5; }
100% { opacity: 1; }
}
</style>

View File

@@ -0,0 +1,364 @@
<script lang="ts">
import { Button } from "$lib/components/ui/button";
import { Textarea } from "$lib/components/ui/textarea";
import { sendMessage, messageStore } from '$lib/store/chat-store';
import { systemPrompt, selectedPatternName } from '$lib/store/pattern-store';
import { getToastStore } from '@skeletonlabs/skeleton';
import { FileButton } from '@skeletonlabs/skeleton';
import { Paperclip, Send, FileCheck } from 'lucide-svelte';
import { onMount } from 'svelte';
import { get } from 'svelte/store';
import { getTranscript } from '$lib/services/transcriptService';
import { ChatService } from '$lib/services/ChatService';
// import { obsidianSettings } from '$lib/store/obsidian-store';
import { languageStore } from '$lib/store/language-store';
import { obsidianSettings, updateObsidianSettings } from '$lib/store/obsidian-store';
const chatService = new ChatService();
let userInput = "";
let isYouTubeURL = false;
const toastStore = getToastStore();
let files: FileList | undefined = undefined;
let uploadedFiles: string[] = [];
let fileContents: string[] = [];
let isProcessingFiles = false;
function detectYouTubeURL(input: string): boolean {
const youtubePattern = /(?:https?:\/\/)?(?:www\.)?(?:youtube\.com|youtu\.be)/i;
const isYoutube = youtubePattern.test(input);
if (isYoutube) {
console.log('YouTube URL detected:', input);
console.log('Current system prompt:', $systemPrompt?.length);
console.log('Selected pattern:', $selectedPatternName);
}
return isYoutube;
}
function handleInput(event: Event) {
console.log('\n=== Handle Input ===');
const target = event.target as HTMLTextAreaElement;
userInput = target.value;
const currentLanguage = get(languageStore);
const languageQualifiers = {
'--en': 'en',
'--fr': 'fr',
'--es': 'es',
'--de': 'de',
'--zh': 'zh',
'--ja': 'ja'
};
let detectedLang = '';
for (const [qualifier, lang] of Object.entries(languageQualifiers)) {
if (userInput.includes(qualifier)) {
detectedLang = lang;
languageStore.set(lang);
userInput = userInput.replace(new RegExp(`${qualifier}\\s*`), '');
break;
}
}
console.log('2. Language state:', {
previousLanguage: currentLanguage,
currentLanguage: get(languageStore),
detectedOverride: detectedLang,
inputAfterLangRemoval: userInput
});
isYouTubeURL = detectYouTubeURL(userInput);
console.log('3. URL detection:', {
isYouTube: isYouTubeURL,
pattern: $selectedPatternName,
systemPromptLength: $systemPrompt?.length
});
}
async function handleFileUpload(e: Event) {
if (!files || files.length === 0) return;
if (uploadedFiles.length >= 5 || (uploadedFiles.length + files.length) > 5) {
toastStore.trigger({
message: 'Maximum 5 files allowed',
background: 'variant-filled-error'
});
return;
}
isProcessingFiles = true;
try {
for (let i = 0; i < files.length && uploadedFiles.length < 5; i++) {
const file = files[i];
const content = await readFileContent(file);
fileContents.push(content);
uploadedFiles = [...uploadedFiles, file.name];
}
} catch (error) {
toastStore.trigger({
message: 'Error processing files: ' + (error as Error).message,
background: 'variant-filled-error'
});
} finally {
isProcessingFiles = false;
}
}
function readFileContent(file: File): Promise<string> {
return new Promise((resolve, reject) => {
const reader = new FileReader();
reader.onload = (e) => resolve(e.target?.result as string);
reader.onerror = (e) => reject(new Error('Failed to read file'));
reader.readAsText(file);
});
}
async function saveToObsidian(content: string) {
if (!$obsidianSettings.saveToObsidian) {
console.log('Obsidian saving is disabled');
return;
}
if (!$obsidianSettings.noteName) {
toastStore.trigger({
message: 'Please enter a note name in Obsidian settings',
background: 'variant-filled-error'
});
return;
}
if (!$selectedPatternName) {
toastStore.trigger({
message: 'No pattern selected',
background: 'variant-filled-error'
});
return;
}
if (!content) {
toastStore.trigger({
message: 'No content to save',
background: 'variant-filled-error'
});
return;
}
try {
const response = await fetch('/obsidian', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
},
body: JSON.stringify({
pattern: $selectedPatternName,
noteName: $obsidianSettings.noteName,
content
})
});
const responseData = await response.json();
if (!response.ok) {
throw new Error(responseData.error || 'Failed to save to Obsidian');
}
// Add this after successful save
updateObsidianSettings({
saveToObsidian: false, // Reset the save flag
noteName: '' // Clear the note name
});
toastStore.trigger({
message: responseData.message || `Saved to Obsidian: ${responseData.fileName}`,
background: 'variant-filled-success'
});
} catch (error) {
console.error('Failed to save to Obsidian:', error);
toastStore.trigger({
message: error instanceof Error ? error.message : 'Failed to save to Obsidian',
background: 'variant-filled-error'
});
}
}
async function processYouTubeURL(input: string) {
console.log('\n=== YouTube Flow Start ===');
const originalLanguage = get(languageStore);
try {
// Add processing message first
messageStore.update(messages => [...messages, {
role: 'system',
content: 'Processing YouTube video...',
format: 'loading'
}]);
// Get transcript but don't display it
const { transcript } = await getTranscript(input);
// Log system prompt BEFORE createChatRequest
console.log('System prompt BEFORE createChatRequest in YouTube flow:', $systemPrompt);
// Log system prompt BEFORE streamChat
console.log(`System prompt BEFORE streamChat in YouTube flow: ${$systemPrompt}`);
const stream = await chatService.streamChat(transcript, $systemPrompt);
await chatService.processStream(
stream,
(content, response) => {
messageStore.update(messages => {
const newMessages = [...messages];
// Replace the processing message with actual content
const lastMessage = newMessages[newMessages.length - 1];
if (lastMessage?.format === 'loading') {
newMessages.pop();
}
newMessages.push({
role: 'assistant',
content,
format: response?.format
});
return newMessages;
});
},
(error) => {
messageStore.update(messages =>
messages.filter(m => m.format !== 'loading')
);
throw error;
}
);
// Handle Obsidian saving if needed
if ($obsidianSettings.saveToObsidian) {
let lastContent = '';
messageStore.subscribe(messages => {
const lastMessage = messages[messages.length - 1];
if (lastMessage?.role === 'assistant') {
lastContent = lastMessage.content;
}
})();
if (lastContent) await saveToObsidian(lastContent);
}
userInput = "";
uploadedFiles = [];
fileContents = [];
} catch (error) {
console.error('Error processing YouTube URL:', error);
messageStore.update(messages =>
messages.filter(m => m.format !== 'loading')
);
throw error;
}
}
async function handleSubmit() {
if (!userInput.trim()) return;
try {
console.log('\n=== Submit Handler Start ===');
if (isYouTubeURL) {
console.log('2a. Starting YouTube flow');
await processYouTubeURL(userInput);
return;
}
const finalContent = fileContents.length > 0
? userInput + '\n\nFile Contents:\n' + fileContents.join('\n\n')
: userInput;
await sendMessage(finalContent);
userInput = "";
uploadedFiles = [];
fileContents = [];
} catch (error) {
console.error('Chat submission error:', error);
}
}
function handleKeydown(event: KeyboardEvent) {
if (event.key === 'Enter' && !event.shiftKey) {
event.preventDefault();
handleSubmit();
}
}
onMount(() => {
console.log('ChatInput mounted, current system prompt:', $systemPrompt);
});
</script>
<div class="h-full flex flex-col p-2">
<div class="relative flex-1 min-h-0 bg-primary-800/30 rounded-lg">
<Textarea
bind:value={userInput}
on:input={handleInput}
on:keydown={handleKeydown}
placeholder="Enter your message (YouTube URLs will be automatically processed)..."
class="w-full h-full resize-none bg-transparent border-none text-sm focus:ring-0 transition-colors p-3 pb-[48px]"
/>
<div class="absolute bottom-3 right-3 flex items-center gap-2">
<div class="flex items-center gap-2">
{#if uploadedFiles.length > 0}
<span class="text-xs text-white/70">
{uploadedFiles.length} file{uploadedFiles.length > 1 ? 's' : ''} attached
</span>
{/if}
<FileButton
name="file-upload"
button="btn-icon variant-ghost"
bind:files
on:change={handleFileUpload}
disabled={isProcessingFiles || uploadedFiles.length >= 5}
class="h-10 w-10 bg-primary-800/30 hover:bg-primary-800/50 rounded-full transition-colors"
>
{#if uploadedFiles.length > 0}
<FileCheck class="w-5 h-5" />
{:else}
<Paperclip class="w-5 h-5" />
{/if}
</FileButton>
<Button
type="button"
variant="ghost"
size="icon"
name="send"
on:click={handleSubmit}
disabled={isProcessingFiles || !userInput.trim()}
class="h-10 w-10 bg-primary-800/30 hover:bg-primary-800/50 rounded-full transition-colors disabled:opacity-30"
>
<Send class="w-5 h-5" />
</Button>
</div>
</div>
</div>
</div>
<style>
:global(textarea) {
scrollbar-width: thin;
scrollbar-color: rgba(255, 255, 255, 0.2) transparent;
}
:global(textarea::-webkit-scrollbar) {
width: 6px;
}
:global(textarea::-webkit-scrollbar-track) {
background: transparent;
}
:global(textarea::-webkit-scrollbar-thumb) {
background-color: rgba(255, 255, 255, 0.2);
border-radius: 3px;
}
:global(textarea::-webkit-scrollbar-thumb:hover) {
background-color: rgba(255, 255, 255, 0.3);
}
:global(textarea::selection) {
background-color: rgba(255, 255, 255, 0.1);
}
</style>

View File

@@ -0,0 +1,249 @@
<script lang="ts">
import { chatState, errorStore, streamingStore } from '$lib/store/chat-store';
import { afterUpdate, onMount } from 'svelte';
import { toastStore } from '$lib/store/toast-store';
import { marked } from 'marked';
import SessionManager from './SessionManager.svelte';
import { fade, slide } from 'svelte/transition';
import { ArrowDown } from 'lucide-svelte';
import Modal from '$lib/components/ui/modal/Modal.svelte';
import PatternList from '$lib/components/patterns/PatternList.svelte';
import type { Message } from '$lib/interfaces/chat-interface';
import { get } from 'svelte/store';
import { selectedPatternName } from '$lib/store/pattern-store';
let showPatternModal = false;
let messagesContainer: HTMLDivElement | null = null;
let showScrollButton = false;
let isUserMessage = false;
function scrollToBottom() {
if (messagesContainer) {
messagesContainer.scrollTo({ top: messagesContainer.scrollHeight, behavior: 'smooth' });
}
}
function handleScroll() {
if (!messagesContainer) return;
const { scrollTop, scrollHeight, clientHeight } = messagesContainer;
showScrollButton = scrollHeight - scrollTop - clientHeight > 100;
}
// Watch for changes in messages
$: if ($chatState.messages.length > 0) {
const lastMessage = $chatState.messages[$chatState.messages.length - 1];
isUserMessage = lastMessage.role === 'user';
if (isUserMessage) {
// Only auto-scroll on user messages
setTimeout(scrollToBottom, 100);
}
}
onMount(() => {
if (messagesContainer) {
messagesContainer.addEventListener('scroll', handleScroll);
return () => {
if (messagesContainer) {
messagesContainer.removeEventListener('scroll', handleScroll);
}
};
}
});
// Configure marked to be synchronous
const renderer = new marked.Renderer();
marked.setOptions({
gfm: true,
breaks: true,
renderer,
async: false
});
// New shouldRenderAsMarkdown function
function shouldRenderAsMarkdown(message: Message): boolean {
const pattern = get(selectedPatternName);
if (pattern && message.role === 'assistant') {
return message.format !== 'mermaid';
}
return message.role === 'assistant' && message.format !== 'plain';
}
// Keep the original renderContent function
function renderContent(message: Message): string {
const content = message.content.replace(/\\n/g, '\n');
if (shouldRenderAsMarkdown(message)) {
try {
return marked.parse(content, { async: false }) as string;
} catch (error) {
console.error('Error rendering markdown:', error);
return content;
}
}
return content;
}
</script>
<div class="bg-primary-800/30 rounded-lg flex flex-col h-full shadow-lg">
<div class="flex justify-between items-center p-3 flex-none border-b border-white/5">
<div>
<span class="text-xs text-white/70 font-medium">Chat History</span>
</div>
<SessionManager />
</div>
<Modal
show={showPatternModal}
on:close={() => showPatternModal = false}
>
<PatternList on:close={() => showPatternModal = false} />
</Modal>
{#if $errorStore}
<div class="error-message" transition:slide>
<div class="bg-red-100 border-l-4 border-red-500 text-red-700 p-4 mb-4" role="alert">
<p>{$errorStore}</p>
</div>
</div>
{/if}
<div
class="messages-container p-3 flex-1 overflow-y-auto max-h-dvh relative"
bind:this={messagesContainer}
>
<div class="messages-content flex flex-col gap-3">
{#each $chatState.messages as message}
<div
class="message-item {message.role === 'system' ? 'w-full bg-blue-900/20' : message.role === 'assistant' ? 'bg-primary/5 rounded-lg p-3' : 'ml-auto'}"
transition:fade
class:loading-message={message.format === 'loading'}
>
<div class="message-header flex items-center gap-2 mb-1 {message.role === 'assistant' || message.role === 'system' ? '' : 'justify-end'}">
<span class="text-xs text-muted-foreground rounded-lg p-1 variant-glass-secondary font-bold uppercase">
{#if message.role === 'system'}
SYSTEM
{:else if message.role === 'assistant'}
AI
{:else}
You
{/if}
</span>
{#if message.role === 'assistant' && $streamingStore}
<span class="loading-indicator flex gap-1">
<span class="dot animate-bounce">.</span>
<span class="dot animate-bounce delay-100">.</span>
<span class="dot animate-bounce delay-200">.</span>
</span>
{/if}
</div>
{#if message.role === 'system'}
<div class="text-blue-300 text-sm font-semibold">
{message.content}
</div>
{:else if message.role === 'assistant'}
<div class="{shouldRenderAsMarkdown(message) ? 'prose prose-slate dark:prose-invert text-inherit prose-headings:text-inherit prose-pre:bg-primary/10 prose-pre:text-inherit' : 'whitespace-pre-wrap'} text-sm max-w-none">
{@html renderContent(message)}
</div>
{:else}
<div class="whitespace-pre-wrap text-sm">
{message.content}
</div>
{/if}
</div>
{/each}
</div>
{#if showScrollButton}
<button
class="absolute bottom-4 right-4 bg-primary/20 hover:bg-primary/30 rounded-full p-2 transition-opacity"
on:click={scrollToBottom}
transition:fade
>
<ArrowDown class="w-4 h-4" />
</button>
{/if}
</div>
</div>
<style>
:global(.loading-message) {
animation: flash 1.5s ease-in-out infinite;
}
@keyframes flash {
0% { opacity: 1; }
50% { opacity: 0.5; }
100% { opacity: 1; }
}
.messages-container {
flex: 1;
overflow-y: auto;
scrollbar-width: thin;
-ms-overflow-style: thin;
}
.messages-content {
display: flex;
flex-direction: column;
gap: 0.75rem;
}
.message-header {
display: flex;
gap: 0.5rem;
}
.message-item {
position: relative;
}
.loading-indicator {
display: inline-flex;
gap: 2px;
}
.dot {
animation: blink 1.4s infinite;
opacity: 0;
}
.dot:nth-child(2) {
animation-delay: 0.2s;
}
.dot:nth-child(3) {
animation-delay: 0.4s;
}
@keyframes blink {
0%, 100% { opacity: 0; }
50% { opacity: 1; }
}
:global(.prose pre) {
background-color: rgb(40, 44, 52);
color: rgb(171, 178, 191);
padding: 1rem;
border-radius: 0.375rem;
margin: 1rem 0;
}
:global(.prose code) {
color: rgb(171, 178, 191);
background-color: rgba(40, 44, 52, 0.1);
padding: 0.2em 0.4em;
border-radius: 0.25rem;
}
</style>

View File

@@ -0,0 +1,35 @@
<script lang="ts">
import Patterns from "./Patterns.svelte";
import Models from "./Models.svelte";
import { Select } from "$lib/components/ui/select";
import { languageStore } from '$lib/store/language-store';
const languages = [
{ code: '', name: 'Default Language' },
{ code: 'en', name: 'English' },
{ code: 'fr', name: 'French' },
{ code: 'es', name: 'Spanish' },
{ code: 'de', name: 'German' },
{ code: 'zh', name: 'Chinese' },
{ code: 'ja', name: 'Japanese' }
];
</script>
<div class="flex flex-col gap-3">
<div class="w-[50%]">
<Patterns />
</div>
<div class="w-[50%]">
<Models />
</div>
<div class="w-[50%]">
<Select
bind:value={$languageStore}
class="bg-primary-800/30 border-none hover:bg-primary-800/40 transition-colors"
>
{#each languages as lang}
<option value={lang.code}>{lang.name}</option>
{/each}
</Select>
</div>
</div>

View File

@@ -0,0 +1,94 @@
<script lang="ts">
export {};
import { Label } from "$lib/components/ui/label";
import { Slider } from "$lib/components/ui/slider";
import { modelConfig } from "$lib/store/model-store";
import { slide } from 'svelte/transition';
import { cubicOut } from 'svelte/easing';
import { browser } from '$app/environment';
import { clickOutside } from '$lib/actions/clickOutside';
import Tooltip from "$lib/components/ui/tooltip/Tooltip.svelte";
// Load expanded state from localStorage
const STORAGE_KEY = 'modelConfigExpanded';
let isExpanded = false;
if (browser) {
const stored = localStorage.getItem(STORAGE_KEY);
isExpanded = stored ? JSON.parse(stored) : false;
}
// Save expanded state
function toggleExpanded() {
isExpanded = !isExpanded;
saveState();
}
function saveState() {
if (browser) {
localStorage.setItem(STORAGE_KEY, JSON.stringify(isExpanded));
}
}
function handleClickOutside() {
if (isExpanded) {
isExpanded = false;
saveState();
}
}
const settings = [
{ key: 'maxLength', label: 'Maximum Length', min: 1, max: 4000, step: 1, tooltip: "Maximum number of tokens in the response" },
{ key: 'temperature', label: 'Temperature', min: 0, max: 2, step: 0.1, tooltip: "Higher values make output more random, lower values more focused" },
{ key: 'top_p', label: 'Top P', min: 0, max: 1, step: 0.01, tooltip: "Controls diversity via nucleus sampling" },
{ key: 'frequency', label: 'Frequency Penalty', min: 0, max: 1, step: 0.01, tooltip: "Reduces repetition of the same words" },
{ key: 'presence', label: 'Presence Penalty', min: 0, max: 1, step: 0.01, tooltip: "Reduces repetition of similar topics" }
] as const;
</script>
<div class="w-full" use:clickOutside={handleClickOutside}>
<button
class="w-full flex items-center py-2 px-2 hover:text-white/90 transition-colors rounded-t"
on:click={toggleExpanded}
>
<span class="text-sm font-semibold">Model Configuration</span>
<span class="transform transition-transform duration-200 opacity-70 ml-1 text-xs" class:rotate-180={isExpanded}>
</span>
</button>
{#if isExpanded}
<div
class="pt-2 px-2 space-y-3"
transition:slide={{
duration: 200,
easing: cubicOut,
}}
>
{#each settings as setting}
<div class="group">
<div class="flex justify-between items-center mb-0.5">
<Tooltip text={setting.tooltip} position="right">
<Label class="text-[10px] text-white/70 cursor-help group-hover:text-white/90 transition-colors">{setting.label}</Label>
</Tooltip>
<span class="text-[10px] font-mono text-white/50 group-hover:text-white/70 transition-colors">
{typeof $modelConfig[setting.key] === 'number' ? $modelConfig[setting.key].toFixed(2) : $modelConfig[setting.key]}
</span>
</div>
<Slider
bind:value={$modelConfig[setting.key]}
min={setting.min}
max={setting.max}
step={setting.step}
class="h-3 group-hover:opacity-90 transition-opacity"
/>
</div>
{/each}
</div>
{/if}
</div>
<style>
:global(.slider) {
height: 0.75rem !important;
}
</style>

View File

@@ -0,0 +1,21 @@
<script lang="ts">
import { onMount } from 'svelte';
import { Select } from "$lib/components/ui/select";
import { modelConfig, availableModels, loadAvailableModels } from "$lib/store/model-store";
onMount(async () => {
await loadAvailableModels();
});
</script>
<div class="min-w-0">
<Select
bind:value={$modelConfig.model}
class="bg-primary-800/30 border-none hover:bg-primary-800/40 transition-colors"
>
<option value="">Default Model</option>
{#each $availableModels as model (model.name)}
<option value={model.name}>{model.vendor} - {model.name}</option>
{/each}
</Select>
</div>

View File

@@ -0,0 +1,53 @@
<script lang="ts">
import { onMount } from 'svelte';
import { Select } from "$lib/components/ui/select";
import { patterns, patternAPI, systemPrompt, selectedPatternName } from "$lib/store/pattern-store";
import { get } from 'svelte/store';
let selectedPreset = $selectedPatternName || "";
// Subscribe to selectedPatternName changes
selectedPatternName.subscribe(value => {
if (value && value !== selectedPreset) {
console.log('Pattern selected from modal:', value);
selectedPreset = value;
}
});
// Watch selectedPreset changes
$: if (selectedPreset) {
console.log('Pattern selected from dropdown:', selectedPreset);
try {
patternAPI.selectPattern(selectedPreset);
// Verify the selection
const currentSystemPrompt = get(systemPrompt);
const currentPattern = get(selectedPatternName);
console.log('After dropdown selection - Pattern:', currentPattern);
console.log('After dropdown selection - System Prompt length:', currentSystemPrompt?.length);
if (!currentPattern || !currentSystemPrompt) {
console.error('Pattern selection verification failed:');
console.error('- Selected Pattern:', currentPattern);
console.error('- System Prompt:', currentSystemPrompt);
}
} catch (error) {
console.error('Error in pattern selection:', error);
}
}
onMount(async () => {
await patternAPI.loadPatterns();
});
</script>
<div class="min-w-0">
<Select
bind:value={selectedPreset}
class="bg-primary-800/30 border-none hover:bg-primary-800/40 transition-colors"
>
<option value="">Load a pattern...</option>
{#each $patterns as pattern}
<option value={pattern.Name}>{pattern.Name}</option>
{/each}
</Select>
</div>

View File

@@ -0,0 +1,68 @@
<script lang="ts">
import { onMount } from 'svelte';
import { RotateCcw, Trash2, Save, Copy, File as FileIcon } from 'lucide-svelte';
import { sessions, sessionAPI } from '$lib/store/session-store';
import { chatState, clearMessages, revertLastMessage, currentSession, messageStore } from '$lib/store/chat-store';
import { Button } from '$lib/components/ui/button';
import { toastService } from '$lib/services/toast-service';
let sessionsList: string[] = [];
$: sessionName = $currentSession;
$: if ($sessions) {
sessionsList = $sessions.map(s => s.Name);
}
onMount(async () => {
try {
await sessionAPI.loadSessions();
} catch (error) {
console.error('Failed to load sessions:', error);
}
});
async function saveSession() {
try {
await sessionAPI.exportToFile($chatState.messages);
} catch (error) {
console.error('Failed to save session:', error);
}
}
async function loadSession() {
try {
const messages = await sessionAPI.importFromFile();
messageStore.set(messages);
} catch (error) {
console.error('Failed to load session:', error);
}
}
async function copyToClipboard() {
try {
await navigator.clipboard.writeText($chatState.messages.map(m => m.content).join('\n'));
toastService.success('Chat copied to clipboard!');
} catch (err) {
toastService.error('Failed to copy transcript');
}
}
</script>
<div class="p-1 m-1 mr-2">
<div class="flex gap-2">
<Button variant="outline" size="icon" aria-label="Revert Last Message" on:click={revertLastMessage}>
<RotateCcw class="h-4 w-4" />
</Button>
<Button variant="outline" size="icon" aria-label="Clear Chat" on:click={clearMessages}>
<Trash2 class="h-4 w-4" />
</Button>
<Button variant="outline" size="icon" aria-label="Copy Chat" on:click={copyToClipboard}>
<Copy class="h-4 w-4" />
</Button>
<Button variant="outline" size="icon" aria-label="Load Session" on:click={loadSession}>
<FileIcon class="h-4 w-4" />
</Button>
<Button variant="outline" size="icon" aria-label="Save Session" on:click={saveSession}>
<Save class="h-4 w-4" />
</Button>
</div>
</div>

View File

@@ -0,0 +1,135 @@
<script lang='ts'>
import { getToastStore } from '@skeletonlabs/skeleton';
import { Button } from "$lib/components/ui/button";
import Input from '$lib/components/ui/input/Input.svelte';
import { Toast } from '@skeletonlabs/skeleton';
let url = '';
let transcript = '';
let loading = false;
let error = '';
let title = '';
const toastStore = getToastStore();
async function fetchTranscript() {
function isValidYouTubeUrl(url: string) {
const pattern = /^(https?:\/\/)?(www\.)?(youtube\.com|youtu\.be)\/.+$/;
return pattern.test(url);
}
if (!isValidYouTubeUrl(url)) {
error = 'Please enter a valid YouTube URL';
toastStore.trigger({
message: error,
background: 'variant-filled-error'
});
return;
}
loading = true;
error = '';
try {
const response = await fetch('/chat', {
method: 'POST',
headers: {
'Content-Type': 'application/json',
'Accept': 'application/json'
},
body: JSON.stringify({ url })
});
if (!response.ok) {
const errorData = await response.json();
throw new Error(errorData.error || 'Failed to fetch transcript');
}
const data = await response.json();
console.log('Parsed response data:', data);
transcript = data.transcript;
title = data.title;
} finally {
loading = false;
}
}
async function copyToClipboard() {
try {
await navigator.clipboard.writeText(transcript);
toastStore.trigger({
message: 'Transcript copied to clipboard!',
background: 'variant-filled-success'
});
} catch (err) {
toastStore.trigger({
message: 'Failed to copy transcript',
background: 'variant-filled-error'
});
}
}
</script>
<div class="flex gap-2">
<Input
bind:value={url}
placeholder="YouTube URL"
class="flex-1 rounded-full border bg-background px-4"
disabled={loading}
/>
<Button
variant="secondary"
on:click={fetchTranscript}
disabled={loading || !url}
>
{#if loading}
<div class="spinner-border" />
{:else}
Get
{/if}
</Button>
</div>
{#if error}
<div class="bg-destructive/15 text-destructive rounded-lg p-2">{error}</div>
{/if}
{#if transcript}
<Toast position="l" />
<div class="space-y-4 border rounded-lg p-4 bg-muted/50 h-96">
<div class="flex justify-between items-center">
<h3 class="text-xs font-semibold">{title || 'Transcript'}</h3>
<Button
variant="outline"
size="sm"
on:click={copyToClipboard}
>
Copy to Clipboard
</Button>
</div>
<textarea
class="w-full text-xs rounded-md border bg-background px-3 py-2 resize-none h-72"
readonly
value={transcript}
></textarea>
</div>
{/if}
<style>
.spinner-border {
width: 1rem;
height: 1rem;
border: 2px solid currentColor;
border-right-color: transparent;
border-radius: 50%;
animation: spin 0.75s linear infinite;
}
@keyframes spin {
from { transform: rotate(0deg); }
to { transform: rotate(360deg); }
}
</style>

View File

@@ -0,0 +1,26 @@
<script lang="ts">
import { Contact } from 'lucide-svelte';
</script>
<div class="form-control w-full m-auto p-4 rounded-lg bg-gradient-to-br variant-gradient-success-warning shadow-lg text-current" title="contact form">
<h2 class="font-bold pl-2">We'd love to hear from you</h2>
<p class="font-bold pl-2">Email</p>
<div class="input-group input-group-divider grid-cols-[1fr_auto]">
<input type="text" placeholder="Enter an email address where you can be reached..." />
</div>
<p class="font-bold pl-2">Website</p>
<div class="input-group input-group-divider grid-cols-[auto_1fr_auto]">
<div class="input-group-shim">https://</div>
<input type="text" placeholder="www.example.com" />
</div>
<p class="font-bold pl-2">Contact Information</p>
<div class="input-group input-group-divider grid-cols-[1fr_auto]">
<input type="text" placeholder="Enter a other contact information here..." />
</div>
<label class="label">
<span class="font-bold pl-2">Message</span>
<textarea class="textarea" rows="4" placeholder="Enter your message ..." />
</label>
<a href="/" title=""><button class="button variant-filled-secondary rounded-lg p-2"><Contact /></button></a>
</div>

View File

@@ -6,11 +6,11 @@
<footer class="border-t bg-background/95 backdrop-blur supports-[backdrop-filter]:bg-background/60">
<div class="container flex h-14 items-center justify-between px-4">
<p class="text-sm text-muted-foreground">
Built in {year} by @johnconnor-sec
Built in {year} by @Fabric <!-- Feel free to put your name here -->
</p>
<nav class="flex items-center gap-4 ">
<BuyMeCoffee url="https://www.buymeacoffee.com/johnconnor.sec" />
<BuyMeCoffee url="https://www.buymeacoffee.com/" /> <!-- And here -->
</nav>
</div>
</footer>

View File

@@ -0,0 +1,168 @@
<script lang="ts">
import { page } from '$app/stores';
import { Sun, Moon, Menu, X, Github, FileText } from 'lucide-svelte';
import { Avatar } from '@skeletonlabs/skeleton';
import { fade } from 'svelte/transition';
import { theme, cycleTheme, initTheme } from '$lib/store/theme-store';
import { onMount } from 'svelte';
import Modal from '$lib/components/ui/modal/Modal.svelte';
import PatternList from '$lib/components/patterns/PatternList.svelte';
import HelpModal from '$lib/components/ui/help/HelpModal.svelte';
import { selectedPatternName } from '$lib/store/pattern-store';
let isMenuOpen = false;
let showPatternModal = false;
let showHelpModal = false;
function goToGithub() {
window.open('https://github.com/danielmiessler/fabric', '_blank');
}
function toggleMenu() {
isMenuOpen = !isMenuOpen;
}
$: currentPath = $page.url.pathname;
$: isDarkMode = $theme === 'my-custom-theme';
const navItems = [
{ href: '/', label: 'Home' },
{ href: '/posts', label: 'Posts' },
// { href: '/tags', label: 'Tags' },
{ href: '/chat', label: 'Chat' },
//{ href: '/obsidian', label: 'Obsidian' },
{ href: '/contact', label: 'Contact' },
{ href: '/about', label: 'About' },
];
onMount(() => {
initTheme();
});
</script>
<header class="fixed top-0 z-50 w-full border-b bg-background/95 backdrop-blur supports-[backdrop-filter]:bg-background/60">
<div class="container flex h-16 items-center justify-between px-4">
<div class="flex items-center gap-4">
<Avatar
src="/fabric-logo.png"
width="w-10"
rounded="rounded-full"
class="border-2 border-primary/20"
/>
<a href="/" class="flex items-center">
<span class="text-lg font-semibold">fabric</span>
</a>
</div>
<!-- Desktop Navigation -->
<nav class="hidden flex-1 px-8 md:flex">
<ul class="flex items-center space-x-8">
{#each navItems as { href, label }}
<li>
<a
{href}
class="text-sm font-medium transition-colors hover:text-primary {currentPath === href ? 'text-primary' : 'text-foreground/60'}"
>
{label}
</a>
</li>
{/each}
</ul>
</nav>
<div class="flex items-center gap-2">
<button name="pattern-description"
on:click={() => showPatternModal = true}
class="inline-flex h-9 items-center justify-center rounded-full border bg-background px-3 text-sm font-medium transition-colors hover:bg-accent hover:text-accent-foreground gap-2"
aria-label="Pattern Description"
>
<FileText class="h-4 w-4" />
<span>Pattern Description</span>
</button>
<button name="github"
on:click={goToGithub}
class="inline-flex h-9 w-9 items-center justify-center rounded-full border bg-background text-sm font-medium transition-colors hover:bg-accent hover:text-accent-foreground"
aria-label="GitHub"
>
<Github class="h-4 w-4" />
<span class="sr-only">GitHub</span>
</button>
<button name="toggle-theme"
on:click={cycleTheme}
class="inline-flex h-9 w-9 items-center justify-center rounded-full border bg-background text-sm font-medium transition-colors hover:bg-accent hover:text-accent-foreground"
aria-label="Toggle theme"
>
{#if isDarkMode}
<Sun class="h-4 w-4" />
{:else}
<Moon class="h-4 w-4" />
{/if}
<span class="sr-only">Toggle theme</span>
</button>
<button name="help"
on:click={() => showHelpModal = true}
class="inline-flex h-9 w-9 items-center justify-center rounded-full border bg-background text-sm font-medium transition-colors hover:bg-accent hover:text-accent-foreground ml-3"
aria-label="Help"
>
<span class="text-xl font-bold text-white/90 hover:text-white">?</span>
<span class="sr-only">Help</span>
</button>
<!-- Mobile Menu Button -->
<button name="toggle-menu"
class="inline-flex h-9 w-9 items-center justify-center rounded-lg border bg-background text-sm font-medium transition-colors hover:bg-accent hover:text-accent-foreground md:hidden"
on:click={toggleMenu}
aria-expanded={isMenuOpen}
aria-label="Toggle menu"
>
{#if isMenuOpen}
<X class="h-4 w-4" />
{:else}
<Menu class="h-4 w-4" />
{/if}
</button>
</div>
</div>
<!-- Mobile Navigation -->
{#if isMenuOpen}
<div class="container md:hidden" transition:fade={{ duration: 200 }}>
<nav class="flex flex-col space-y-4 p-4">
{#each navItems as { href, label }}
<a
{href}
class="text-base font-medium transition-colors hover:text-primary {currentPath === href ? 'text-primary' : 'text-foreground/60'}"
on:click={() => (isMenuOpen = false)}
>
{label}
</a>
{/each}
</nav>
</div>
{/if}
</header>
<Modal
show={showPatternModal}
on:close={() => showPatternModal = false}
>
<PatternList
on:close={() => showPatternModal = false}
on:select={(e) => {
selectedPatternName.set(e.detail);
showPatternModal = false;
}}
/>
</Modal>
<Modal
show={showHelpModal}
on:close={() => showHelpModal = false}
>
<HelpModal
on:close={() => showHelpModal = false}
/>
</Modal>

View File

@@ -0,0 +1,209 @@
<script lang="ts">
import { onMount, createEventDispatcher } from 'svelte';
import { get } from 'svelte/store';
import type { Pattern } from '$lib/interfaces/pattern-interface';
import { favorites } from '$lib/store/favorites-store';
import { patterns, patternAPI, systemPrompt, selectedPatternName } from '$lib/store/pattern-store';
import { Input } from "$lib/components/ui/input";
import TagFilterPanel from './TagFilterPanel.svelte';
let tagFilterRef: TagFilterPanel;
const dispatch = createEventDispatcher<{
close: void;
select: string;
tagsChanged: string[]; // Add this line
}>();
let patternsContainer: HTMLDivElement;
let sortBy: 'alphabetical' | 'favorites' = 'alphabetical';
let searchText = ""; // For pattern filtering
let selectedTags: string[] = [];
// First filter patterns by both text and tags
// First filter patterns by both text and tags
$: filteredPatterns = $patterns
.filter((p: Pattern) =>
p.Name.toLowerCase().includes(searchText.toLowerCase())
)
.filter((p: Pattern) =>
selectedTags.length === 0 ||
(p.tags && selectedTags.every(tag => p.tags.includes(tag)))
);
// Then sort the filtered patterns
$: sortedPatterns = sortBy === 'alphabetical'
? [...filteredPatterns].sort((a: Pattern, b: Pattern) => a.Name.localeCompare(b.Name))
: [
...filteredPatterns.filter((p: Pattern) => $favorites.includes(p.Name)).sort((a: Pattern, b: Pattern) => a.Name.localeCompare(b.Name)),
...filteredPatterns.filter((p: Pattern) => !$favorites.includes(p.Name)).sort((a: Pattern, b: Pattern) => a.Name.localeCompare(b.Name))
];
function handleTagFilter(event: CustomEvent<string[]>) {
selectedTags = event.detail;
}
onMount(async () => {
try {
await patternAPI.loadPatterns();
} catch (error) {
console.error('Error loading patterns:', error);
}
});
function toggleFavorite(name: string) {
favorites.toggleFavorite(name);
}
</script>
<div class="bg-primary-800 rounded-lg flex flex-col h-[85vh] w-[600px] shadow-lg relative">
<div class="flex flex-col border-b border-primary-700/30">
<div class="flex justify-between items-center p-4">
<b class="text-lg text-muted-foreground font-bold">Pattern Descriptions</b>
<button
on:click={() => dispatch('close')}
class="text-muted-foreground hover:text-primary-300 transition-colors"
>
</button>
</div>
<div class="px-4 pb-4 flex items-center justify-between">
<div class="flex gap-4">
<label class="flex items-center gap-2 text-sm text-muted-foreground">
<input
type="radio"
bind:group={sortBy}
value="alphabetical"
class="radio"
>
Alphabetical
</label>
<label class="flex items-center gap-2 text-sm text-muted-foreground">
<input
type="radio"
bind:group={sortBy}
value="favorites"
class="radio"
>
Favorites First
</label>
</div>
<div class="w-64 mr-4">
<Input
bind:value={searchText}
placeholder="Search patterns..."
class="text-emerald-900"
/>
</div>
</div>
<!-- New tag display section -->
<div class="px-4 pb-2">
<div class="text-sm text-white/70 bg-primary-700/30 rounded-md p-2 flex justify-between items-center">
<div>Tags: {selectedTags.length ? selectedTags.join(', ') : 'none'}</div>
<button
class="px-2 py-1 text-xs text-white/70 bg-primary-600/30 rounded hover:bg-primary-600/50 transition-colors"
on:click={() => {
selectedTags = [];
dispatch('tagsChanged', selectedTags);
}}
>
reset
</button>
</div>
</div>
</div>
<TagFilterPanel
patterns={$patterns}
on:tagsChanged={handleTagFilter}
bind:this={tagFilterRef}
/>
<div
class="patterns-container p-4 flex-1 overflow-y-auto"
bind:this={patternsContainer}
>
<div class="patterns-list space-y-2">
{#each sortedPatterns as pattern}
<div class="pattern-item bg-primary/10 rounded-lg p-3">
<div class="flex justify-between items-start gap-4 mb-2">
<button
class="text-xl font-bold text-primary-300 hover:text-primary-100 cursor-pointer transition-colors text-left w-full"
on:click={() => {
console.log('Selecting pattern:', pattern.Name);
patternAPI.selectPattern(pattern.Name);
searchText = "";
tagFilterRef.reset();
dispatch('select', pattern.Name);
dispatch('close');
}}
on:keydown={(e) => {
if (e.key === 'Enter' || e.key === ' ') {
e.preventDefault();
e.currentTarget.click();
}
}}
>
{pattern.Name}
</button>
<button
class="text-muted-foreground hover:text-primary-300 transition-colors"
on:click={() => toggleFavorite(pattern.Name)}
>
{#if $favorites.includes(pattern.Name)}
{:else}
{/if}
</button>
</div>
<p class="text-sm text-muted-foreground break-words leading-relaxed">{pattern.Description}</p>
</div>
{/each}
</div>
</div>
</div>
<style>
.patterns-container {
flex: 1;
overflow-y: auto;
scrollbar-width: thin;
-ms-overflow-style: thin;
}
.patterns-list {
display: flex;
flex-direction: column;
width: 100%;
max-width: 560px;
margin: 0 auto;
}
.pattern-item {
display: flex;
flex-direction: column;
border-bottom: 1px solid rgba(255, 255, 255, 0.1);
}
.pattern-item:last-child {
border-bottom: none;
}
</style>

View File

@@ -0,0 +1,194 @@
<script lang="ts">
import type { Pattern } from '$lib/interfaces/pattern-interface';
import { createEventDispatcher } from 'svelte';
const dispatch = createEventDispatcher<{
tagsChanged: string[];
}>();
export let patterns: Pattern[];
let selectedTags: string[] = [];
let isExpanded = false;
// Add console log to see what tags we're getting
$: console.log('Available tags:', Array.from(new Set(patterns.flatMap(p => p.tags))));
// Add these debug logs
$: console.log('Patterns received:', patterns);
$: console.log('Tags extracted:', patterns.map(p => p.tags));
$: console.log('Panel expanded:', isExpanded);
function toggleTag(tag: string) {
selectedTags = selectedTags.includes(tag)
? selectedTags.filter(t => t !== tag)
: [...selectedTags, tag];
dispatch('tagsChanged', selectedTags);
}
function togglePanel() {
isExpanded = !isExpanded;
}
export function reset() {
selectedTags = [];
isExpanded = false;
dispatch('tagsChanged', selectedTags);
}
</script>
<div class="tag-panel {isExpanded ? 'expanded' : ''}" style="z-index: 50">
<div class="panel-header">
<button class="close-btn" on:click={togglePanel}>
{isExpanded ? 'Close Filter Tags ◀' : 'Open Filter Tags ▶'}
</button>
</div>
<div class="panel-content">
<div class="reset-container">
<button
class="reset-btn"
on:click={() => {
selectedTags = [];
dispatch('tagsChanged', selectedTags);
}}
>
Reset All Tags
</button>
</div>
{#each Array.from(new Set(patterns.flatMap(p => p.tags))).sort() as tag}
<button
class="tag-brick {selectedTags.includes(tag) ? 'selected' : ''}"
on:click={() => toggleTag(tag)}
>
{tag}
</button>
{/each}
</div>
</div>
<style>
.tag-panel {
position: fixed; /* Change to fixed positioning */
left: calc(50% + 300px); /* Position starts after modal's right edge */
top: 50%;
transform: translateY(-50%);
width: 300px;
transition: left 0.3s ease;
}
.tag-panel.expanded {
left: calc(50% + 360px); /* Final position just to the right of modal */
}
.panel-content {
display: none;
padding: 12px;
flex-wrap: wrap;
gap: 6px;
max-height: 80vh;
overflow-y: auto;
grid-template-columns: repeat(3, 1fr);
}
.tag-brick {
padding: 4px 8px;
font-size: 0.8rem;
border-radius: 12px;
background: rgba(255,255,255,0.1);
cursor: pointer;
white-space: nowrap;
text-overflow: ellipsis;
overflow: hidden;
}
.reset-container {
width: 100%;
padding-bottom: 8px;
margin-bottom: 8px;
border-bottom: 1px solid rgba(255,255,255,0.1);
}
.reset-btn {
width: 100%;
padding: 6px;
font-size: 0.8rem;
color: var(--primary-300);
background: rgba(255,255,255,0.05);
border-radius: 4px;
transition: all 0.2s;
}
.reset-btn:hover {
background: rgba(255,255,255,0.1);
}
.expanded .panel-content {
display: flex;
}
/* .toggle-btn {
position: absolute;
left: -30px;
top: 50%;
transform: translateY(-50%);
padding: 8px;
background: var(--primary-800);
border-radius: 4px 0 0 4px;
cursor: pointer;
display: flex;
align-items: center;
gap: 4px;
font-size: 0.9rem;
box-shadow: -2px 0 5px rgba(0,0,0,0.2);
} */
.panel-header {
padding: 8px;
border-bottom: 1px solid rgba(255,255,255,0.1);
}
.close-btn {
width: auto;
padding: 6px;
position: absolute;
font-size: 0.8rem;
color: var(--primary-300);
background: rgba(255,255,255,0.05);
border-radius: 4px;
transition: all 0.2s;
text-align: left;
}
/* Position for 'Open Filter Tags' */
.tag-panel:not(.expanded) .close-btn {
top: -290px; /* Moves up to search bar level */
margin-left: 10px;
}
/* Position for 'Close Filter Tags' */
.expanded .close-btn {
position: relative;
top: 0;
margin-left: -50px;
}
.close-btn:hover {
background: rgba(255,255,255,0.1);
}
.tag-brick.selected {
background: var(--primary-300);
}
</style>

View File

@@ -0,0 +1,52 @@
<script lang="ts">
import { formatDistance } from 'date-fns';
import type { Post } from './post-interface';
import PostMeta from './PostMeta.svelte';
import Card from '$lib/components/ui/cards/card.svelte';
import { cn } from '$lib/utils/utils';
export let post: Post;
export let className: string = '';
function parseDate(dateStr: string): Date {
// Handle both ISO strings and YYYY-MM-DD formats
return new Date(dateStr);
}
</script>
<article class="card card-hover group relative rounded-lg border p-6 hover:bg-primary-500/50 {className}">
<a
href="/posts/{post.slug}"
class="absolute inset-0"
data-sveltekit-preload-data="off"
>
<span class="sr-only">View {post.metadata?.title}</span>
</a>
<div class="flex flex-col justify-between space-y-4">
<div class="space-y-2">
<!-- <img src={post.metadata?.images?.[0]} alt="Posts Cards" class="rounded-lg" /> -->
<h2 class="text-xl font-semibold tracking-tight">{post.metadata?.title}</h2>
<p class="text-muted-foreground">{post.metadata?.description}</p>
</div>
<div class="flex items-center space-x-4 text-sm text-muted-foreground">
<time datetime={post.metadata?.date}>
{#if post.metadata?.date}
{formatDistance(parseDate(post.metadata.date), new Date(), { addSuffix: false })}
{/if}
</time>
{#if post.metadata?.tags?.length > 0}
<span class="text-xs"></span>
<div class="flex flex-wrap gap-2">
{#each post.metadata?.tags as tag}
<a
href="/tags/{tag}"
class="inline-flex items-center rounded-md border px-2 py-0.5 text-xs font-semibold transition-colors hover:bg-secondary"
>
{tag}
</a>
{/each}
</div>
{/if}
</div>
</div>
</article>

View File

@@ -0,0 +1,37 @@
<script lang="ts">
import PostMeta from './PostMeta.svelte';
import type { Post } from './post-interface'
import Spinner from '$lib/components/ui/spinner/spinner.svelte';
import Toc from '$lib/components/ui/toc/Toc.svelte';
export let post: Post;
</script>
<article class="py-6">
{#if !post?.content || !post?.metadata}
<div class="flex min-h-[400px] items-center justify-center">
<div class="flex items-center gap-2">
<Spinner class="h-6 w-6" />
<span class="text-sm text-muted-foreground">Loading post...</span>
</div>
</div>
{:else}
<div class="space-y-4 pl-8 ml-8">
<h1 class="inline-block text-4xl font-bold inherit-colors lg:text-5xl">{post.metadata.title}</h1>
<PostMeta data={post.metadata} />
</div>
<div class="items-center py-8 mx-auto gap-8 max-w-7xl relative prose prose-slate dark:prose-invert">
{#if typeof post.content === 'function'}
<Toc />
<svelte:component this={post.content} />
{:else if typeof post.content === 'string'}
{post.content}
{:else}
<div class="flex gap-2">
<Spinner class="h-8 w-8" />
<span class="text-sm text-muted-foreground">Loading content...</span>
</div>
{/if}
</div>
{/if}
</article>

View File

@@ -0,0 +1,57 @@
<script>
export let aliases
export let date
export let tags
export let title
export let description
export let author
export let updated
//export let content
</script>
<article class="prose prose-slate dark:prose-invert max-w-5xl flex-1">
{#if aliases}
<h1 class="inline-block text-4xl font-bold inherit-colors lg:text-5xl">{aliases}</h1>
{/if}
<slot />
</article>
<style lang="postcss">
:global(h1) {
@apply h1;
}
:global(h2) {
@apply h2;
}
:global(p) {
@layer p;
}
:global(ul) {
@layer ul;
}
:global(li) {
@layer li;
}
:global(a) {
@layer a;
}
:global(img) {
@layer img;
}
:global(blockquote) {
@layer blockquote;
}
:global(code) {
@layer code;
}
</style>

Some files were not shown because too many files have changed in this diff Show More