mirror of
https://github.com/danielmiessler/Fabric.git
synced 2026-01-09 22:38:10 -05:00
Compare commits
38 Commits
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
4353bc9f7f | ||
|
|
7a8024ee79 | ||
|
|
b5bf75ad2e | ||
|
|
a2c954ba50 | ||
|
|
730d0adc86 | ||
|
|
dc9168ab6f | ||
|
|
e500a5916e | ||
|
|
6ddf46a379 | ||
|
|
e8aa358b15 | ||
|
|
62f373c2b4 | ||
|
|
fcf826f3de | ||
|
|
bd2db29cee | ||
|
|
c6d612ee9a | ||
|
|
d613c25974 | ||
|
|
c0abea7c66 | ||
|
|
496bd2812a | ||
|
|
70fccaf2fb | ||
|
|
9a71f7c96d | ||
|
|
5da3db383d | ||
|
|
19438cbd20 | ||
|
|
a0b71ee365 | ||
|
|
034513ece5 | ||
|
|
0affb9bab1 | ||
|
|
3305df8fb2 | ||
|
|
892c229076 | ||
|
|
599c5f2b9f | ||
|
|
19e5d8dbe0 | ||
|
|
b772127738 | ||
|
|
5dd61abe2a | ||
|
|
f45e140126 | ||
|
|
752a66cb48 | ||
|
|
da28d91d65 | ||
|
|
5a66ca1c5a | ||
|
|
98f3da610b | ||
|
|
73ce92ccd9 | ||
|
|
7f3f1d641f | ||
|
|
44b5c46beb | ||
|
|
8d37c9d6b9 |
65
README.md
65
README.md
@@ -13,9 +13,11 @@ Fabric is graciously supported by…
|
||||

|
||||
[](https://opensource.org/licenses/MIT)
|
||||
|
||||
<div align="center">
|
||||
<p class="align center">
|
||||
<h4><code>fabric</code> is an open-source framework for augmenting humans using AI.</h4>
|
||||
</p>
|
||||
</div>
|
||||
|
||||
[Updates](#updates) •
|
||||
[What and Why](#what-and-why) •
|
||||
@@ -32,6 +34,30 @@ Fabric is graciously supported by…
|
||||
|
||||
</div>
|
||||
|
||||
## What and why
|
||||
|
||||
Since the start of modern AI in late 2022 we've seen an **_extraordinary_** number of AI applications for accomplishing tasks. There are thousands of websites, chatbots, mobile apps, and other interfaces for using all the differnet AI out there.
|
||||
|
||||
It's all really exciting and powerful, but _it's not easy to integrate this functionality into our lives._
|
||||
|
||||
<p class="align center">
|
||||
<h4>In other words, AI doesn't have a capabilities problem—it has an <em>integration</em> problem.</h4>
|
||||
</p>
|
||||
|
||||
**Fabric was created to address this by creating and organizating the fundamental units of AI—the prompts themselves!**
|
||||
|
||||
Fabric organizes prompts by real-world task, allowing people to create, collect, and organize their most important AI solutions in a single place for use in their favorite tools. And if you're command-line focused, you can use Fabric itself as the interface!
|
||||
|
||||
## Intro videos
|
||||
|
||||
Keep in mind that many of these were recorded when Fabric was Python-based, so remember to use the current [install instructions](#installation) below.
|
||||
|
||||
- [Network Chuck](https://www.youtube.com/watch?v=UbDyjIIGaxQ)
|
||||
- [David Bombal](https://www.youtube.com/watch?v=vF-MQmVxnCs)
|
||||
- [My Own Intro to the Tool](https://www.youtube.com/watch?v=wPEyyigh10g)
|
||||
- [More Fabric YouTube Videos](https://www.youtube.com/results?search_query=fabric+ai)
|
||||
|
||||
|
||||
## Navigation
|
||||
|
||||
- [`fabric`](#fabric)
|
||||
@@ -87,34 +113,21 @@ Fabric is graciously supported by…
|
||||
## Updates
|
||||
|
||||
> [!NOTE]
|
||||
>
|
||||
>June 17, 2025
|
||||
>
|
||||
>- Fabric now supports Perplexity AI. Configure it by using `fabric -S` to add your Perlexity AI API Key,
|
||||
> and then try:
|
||||
>
|
||||
> ```bash
|
||||
> fabric -m sonar-pro "What is the latest world news?"
|
||||
> ```
|
||||
>
|
||||
>June 11, 2025
|
||||
>
|
||||
> - Fabric's YouTube transcription now needs `yt-dlp` to be installed. Make sure to install the latest
|
||||
> version (2025.06.09 as of this note). The YouTube API key is only needed for comments (the `--comments` flag)
|
||||
> and metadata extraction (the `--metadata` flag).
|
||||
>
|
||||
> May 22, 2025
|
||||
>
|
||||
> - Fabric now supports Anthropic's Claude 4. Read the [blog post from Anthropic](https://www.anthropic.com/news/claude-4).
|
||||
|
||||
## What and why
|
||||
|
||||
Since the start of 2023 and GenAI we've seen a massive number of AI applications for accomplishing tasks. It's powerful, but _it's not easy to integrate this functionality into our lives._
|
||||
|
||||
<div align="center">
|
||||
<h4>In other words, AI doesn't have a capabilities problem—it has an <em>integration</em> problem.</h4>
|
||||
</div>
|
||||
|
||||
Fabric was created to address this by enabling everyone to granularly apply AI to everyday challenges.
|
||||
|
||||
## Intro videos
|
||||
|
||||
Keep in mind that many of these were recorded when Fabric was Python-based, so remember to use the current [install instructions](#installation) below.
|
||||
|
||||
- [Network Chuck](https://www.youtube.com/watch?v=UbDyjIIGaxQ)
|
||||
- [David Bombal](https://www.youtube.com/watch?v=vF-MQmVxnCs)
|
||||
- [My Own Intro to the Tool](https://www.youtube.com/watch?v=wPEyyigh10g)
|
||||
- [More Fabric YouTube Videos](https://www.youtube.com/results?search_query=fabric+ai)
|
||||
>- Fabric's YouTube transcription now needs `yt-dlp` to be installed. Make sure to install the latest
|
||||
> version (2025.06.09 as of this note). The YouTube API key is only needed for comments (the `--comments` flag)
|
||||
> and metadata extraction (the `--metadata` flag).
|
||||
|
||||
## Philosophy
|
||||
|
||||
|
||||
@@ -25,6 +25,7 @@ type ChatOptions struct {
|
||||
Raw bool
|
||||
Seed int
|
||||
ModelContextLength int
|
||||
MaxTokens int
|
||||
}
|
||||
|
||||
// NormalizeMessages remove empty messages and ensure messages order user-assist-user
|
||||
|
||||
@@ -12,6 +12,7 @@ import (
|
||||
|
||||
"github.com/danielmiessler/fabric/plugins/ai/bedrock"
|
||||
"github.com/danielmiessler/fabric/plugins/ai/exolab"
|
||||
"github.com/danielmiessler/fabric/plugins/ai/perplexity" // Added Perplexity plugin
|
||||
"github.com/danielmiessler/fabric/plugins/strategy"
|
||||
|
||||
"github.com/samber/lo"
|
||||
@@ -35,6 +36,32 @@ import (
|
||||
"github.com/danielmiessler/fabric/plugins/tools/youtube"
|
||||
)
|
||||
|
||||
// hasAWSCredentials checks if any AWS credentials are present either in the
|
||||
// environment variables or in the default/shared credentials file. It doesn't
|
||||
// attempt to verify the validity of the credentials, but simply ensures that a
|
||||
// potential authentication source exists so we can safely initialize the
|
||||
// Bedrock client without causing the AWS SDK to search for credentials.
|
||||
func hasAWSCredentials() bool {
|
||||
if os.Getenv("AWS_PROFILE") != "" ||
|
||||
os.Getenv("AWS_ROLE_SESSION_NAME") != "" ||
|
||||
(os.Getenv("AWS_ACCESS_KEY_ID") != "" && os.Getenv("AWS_SECRET_ACCESS_KEY") != "") {
|
||||
return true
|
||||
}
|
||||
|
||||
credFile := os.Getenv("AWS_SHARED_CREDENTIALS_FILE")
|
||||
if credFile == "" {
|
||||
if home, err := os.UserHomeDir(); err == nil {
|
||||
credFile = filepath.Join(home, ".aws", "credentials")
|
||||
}
|
||||
}
|
||||
if credFile != "" {
|
||||
if _, err := os.Stat(credFile); err == nil {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func NewPluginRegistry(db *fsdb.Db) (ret *PluginRegistry, err error) {
|
||||
ret = &PluginRegistry{
|
||||
Db: db,
|
||||
@@ -67,9 +94,13 @@ func NewPluginRegistry(db *fsdb.Db) (ret *PluginRegistry, err error) {
|
||||
anthropic.NewClient(),
|
||||
lmstudio.NewClient(),
|
||||
exolab.NewClient(),
|
||||
bedrock.NewClient(),
|
||||
perplexity.NewClient(), // Added Perplexity client
|
||||
)
|
||||
|
||||
if hasAWSCredentials() {
|
||||
vendors = append(vendors, bedrock.NewClient())
|
||||
}
|
||||
|
||||
// Add all OpenAI-compatible providers
|
||||
for providerName := range openai_compatible.ProviderMap {
|
||||
provider, _ := openai_compatible.GetProviderByName(providerName)
|
||||
|
||||
1
go.mod
1
go.mod
@@ -92,6 +92,7 @@ require (
|
||||
github.com/pjbgf/sha1cd v0.3.2 // indirect
|
||||
github.com/pmezard/go-difflib v1.0.0 // indirect
|
||||
github.com/sergi/go-diff v1.4.0 // indirect
|
||||
github.com/sgaunet/perplexity-go/v2 v2.8.0 // indirect
|
||||
github.com/skeema/knownhosts v1.3.1 // indirect
|
||||
github.com/tidwall/gjson v1.18.0 // indirect
|
||||
github.com/tidwall/match v1.1.1 // indirect
|
||||
|
||||
2
go.sum
2
go.sum
@@ -202,6 +202,8 @@ github.com/sashabaranov/go-openai v1.40.1/go.mod h1:lj5b/K+zjTSFxVLijLSTDZuP7adO
|
||||
github.com/scylladb/termtables v0.0.0-20191203121021-c4c0b6d42ff4/go.mod h1:C1a7PQSMz9NShzorzCiG2fk9+xuCgLkPeCvMHYR2OWg=
|
||||
github.com/sergi/go-diff v1.4.0 h1:n/SP9D5ad1fORl+llWyN+D6qoUETXNZARKjyY2/KVCw=
|
||||
github.com/sergi/go-diff v1.4.0/go.mod h1:A0bzQcvG0E7Rwjx0REVgAGH58e96+X0MeOfepqsbeW4=
|
||||
github.com/sgaunet/perplexity-go/v2 v2.8.0 h1:stnuVieniZMGo6qJLCV2JyR2uF7K5398YOA/ZZcgrSg=
|
||||
github.com/sgaunet/perplexity-go/v2 v2.8.0/go.mod h1:MSks4RNuivCi0GqJyylhFdgSJFVEwZHjAhrf86Wkynk=
|
||||
github.com/sirupsen/logrus v1.7.0/go.mod h1:yWOB1SBYBC5VeMP7gHvWumXLIWorT60ONWic61uBYv0=
|
||||
github.com/skeema/knownhosts v1.3.1 h1:X2osQ+RAjK76shCbvhHHHVl3ZlgDm8apHEHFqRjnBY8=
|
||||
github.com/skeema/knownhosts v1.3.1/go.mod h1:r7KTdC8l4uxWRyK2TpQZ/1o5HaSzh06ePQNxPwTcfiY=
|
||||
|
||||
@@ -238,6 +238,9 @@ schema = 3
|
||||
[mod."github.com/sergi/go-diff"]
|
||||
version = "v1.4.0"
|
||||
hash = "sha256-rs9NKpv/qcQEMRg7CmxGdP4HGuFdBxlpWf9LbA9wS4k="
|
||||
[mod."github.com/sgaunet/perplexity-go/v2"]
|
||||
version = "v2.8.0"
|
||||
hash = "sha256-w1S14Jf4/6LFODREmmiJvPtkZh4Sor81Rr1PqC5pIak="
|
||||
[mod."github.com/skeema/knownhosts"]
|
||||
version = "v1.3.1"
|
||||
hash = "sha256-kjqQDzuncQNTuOYegqVZExwuOt/Z73m2ST7NZFEKixI="
|
||||
|
||||
@@ -1 +1 @@
|
||||
"1.4.205"
|
||||
"1.4.212"
|
||||
|
||||
@@ -8,19 +8,19 @@ Take a deep breath and think step by step about how to best accomplish this goal
|
||||
|
||||
- Consume the entire paper and think deeply about it.
|
||||
|
||||
- Map out all the claims and implications on a virtual whiteboard in your mind.
|
||||
- Map out all the claims and implications on a giant virtual whiteboard in your mind.
|
||||
|
||||
# OUTPUT
|
||||
|
||||
- Extract a summary of the paper and its conclusions into a 25-word sentence called SUMMARY.
|
||||
- Extract a summary of the paper and its conclusions into a 16-word sentence called SUMMARY.
|
||||
|
||||
- Extract the list of authors in a section called AUTHORS.
|
||||
|
||||
- Extract the list of organizations the authors are associated, e.g., which university they're at, with in a section called AUTHOR ORGANIZATIONS.
|
||||
|
||||
- Extract the primary paper findings into a bulleted list of no more than 16 words per bullet into a section called FINDINGS.
|
||||
- Extract the most surprising and interesting paper findings into a 10 bullets of no more than 16 words per bullet into a section called FINDINGS.
|
||||
|
||||
- Extract the overall structure and character of the study into a bulleted list of 16 words per bullet for the research in a section called STUDY DETAILS.
|
||||
- Extract the overall structure and character of the study into a bulleted list of 16 words per bullet for the research in a section called STUDY OVERVIEW.
|
||||
|
||||
- Extract the study quality by evaluating the following items in a section called STUDY QUALITY that has the following bulleted sub-sections:
|
||||
|
||||
@@ -76,7 +76,9 @@ END EXAMPLE CHART
|
||||
|
||||
- SUMMARY STATEMENT:
|
||||
|
||||
A final 25-word summary of the paper, its findings, and what we should do about it if it's true.
|
||||
A final 16-word summary of the paper, its findings, and what we should do about it if it's true.
|
||||
|
||||
Also add 5 8-word bullets of how you got to that rating and conclusion / summary.
|
||||
|
||||
# RATING NOTES
|
||||
|
||||
@@ -84,21 +86,23 @@ A final 25-word summary of the paper, its findings, and what we should do about
|
||||
|
||||
- An A would be a paper that is novel, rigorous, empirical, and has no conflicts of interest.
|
||||
|
||||
- A paper could get an A if it's theoretical but everything else would have to be perfect.
|
||||
- A paper could get an A if it's theoretical but everything else would have to be VERY good.
|
||||
|
||||
- The stronger the claims the stronger the evidence needs to be, as well as the transparency into the methodology. If the paper makes strong claims, but the evidence or transparency is weak, then the RIGOR score should be lowered.
|
||||
|
||||
- Remove at least 1 grade (and up to 2) for papers where compelling data is provided but it's not clear what exact tests were run and/or how to reproduce those tests.
|
||||
|
||||
- Do not relax this transparency requirement for papers that claim security reasons.
|
||||
|
||||
- If a paper does not clearly articulate its methodology in a way that's replicable, lower the RIGOR and overall score significantly.
|
||||
- Do not relax this transparency requirement for papers that claim security reasons. If they didn't show their work we have to assume the worst given the reproducibility crisis..
|
||||
|
||||
- Remove up to 1-3 grades for potential conflicts of interest indicated in the report.
|
||||
|
||||
# ANALYSIS INSTRUCTIONS
|
||||
|
||||
- Tend towards being more critical. Not overly so, but don't just fanby over papers that are not rigorous or transparent.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
- Output all sections above.
|
||||
- After deeply considering all the sections above and how they interact with each other, output all sections above.
|
||||
|
||||
- Ensure the scoring looks closely at the reproducibility and transparency of the methodology, and that it doesn't give a pass to papers that don't provide the data or methodology for safety or other reasons.
|
||||
|
||||
@@ -108,7 +112,7 @@ Known [-2--------] Novel
|
||||
Weak [-------8--] Rigorous
|
||||
Theoretical [--3-------] Empirical
|
||||
|
||||
- For the findings and other analysis sections, write at the 9th-grade reading level. This means using short sentences and simple words/concepts to explain everything.
|
||||
- For the findings and other analysis sections, and in fact all writing, write in the clear, approachable style of Paul Graham.
|
||||
|
||||
- Ensure there's a blank line between each bullet of output.
|
||||
|
||||
@@ -120,4 +124,3 @@ Theoretical [--3-------] Empirical
|
||||
|
||||
# INPUT:
|
||||
|
||||
INPUT:
|
||||
|
||||
@@ -1,25 +1,21 @@
|
||||
# IDENTITY and PURPOSE
|
||||
|
||||
You extract surprising, powerful, and interesting insights from text content. You are interested in insights related to the purpose and meaning of life, human flourishing, the role of technology in the future of humanity, artificial intelligence and its affect on humans, memes, learning, reading, books, continuous improvement, and similar topics.
|
||||
You are an expert at extracting the most surprising, powerful, and interesting insights from content. You are interested in insights related to the purpose and meaning of life, human flourishing, the role of technology in the future of humanity, artificial intelligence and its affect on humans, memes, learning, reading, books, continuous improvement, and similar topics.
|
||||
|
||||
You create 15 word bullet points that capture the most important insights from the input.
|
||||
You create 8 word bullet points that capture the most surprising and novel insights from the input.
|
||||
|
||||
Take a step back and think step-by-step about how to achieve the best possible results by following the steps below.
|
||||
|
||||
# STEPS
|
||||
|
||||
- Extract 20 to 50 of the most surprising, insightful, and/or interesting ideas from the input in a section called IDEAS, and write them on a virtual whiteboard in your mind using 15 word bullets. If there are less than 50 then collect all of them. Make sure you extract at least 20.
|
||||
|
||||
- From those IDEAS, extract the most powerful and insightful of them and write them in a section called INSIGHTS. Make sure you extract at least 10 and up to 25.
|
||||
- Extract 10 of the most surprising and novel insights from the input.
|
||||
- Output them as 8 word bullets in order of surprise, novelty, and importance.
|
||||
- Write them in the simple, approachable style of Paul Graham.
|
||||
|
||||
# OUTPUT INSTRUCTIONS
|
||||
|
||||
- INSIGHTS are essentially higher-level IDEAS that are more abstracted and wise.
|
||||
|
||||
- Output the INSIGHTS section only.
|
||||
|
||||
- Each bullet should be 16 words in length.
|
||||
|
||||
- Do not give warnings or notes; only output the requested sections.
|
||||
|
||||
- You use bulleted lists for output, not numbered lists.
|
||||
@@ -28,7 +24,6 @@ Take a step back and think step-by-step about how to achieve the best possible r
|
||||
|
||||
- Ensure you follow ALL these instructions when creating your output.
|
||||
|
||||
|
||||
# INPUT
|
||||
|
||||
INPUT:
|
||||
{{input}}
|
||||
|
||||
@@ -1,27 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Required parameters:
|
||||
# @raycast.schemaVersion 1
|
||||
# @raycast.title Capture Thinkers Work
|
||||
# @raycast.mode fullOutput
|
||||
|
||||
# Optional parameters:
|
||||
# @raycast.icon 🧠
|
||||
# @raycast.argument1 { "type": "text", "placeholder": "Input text", "optional": false, "percentEncoded": true}
|
||||
|
||||
# Documentation:
|
||||
# @raycast.description Run fabric capture_thinkers_work on the input text
|
||||
# @raycast.author Daniel Miessler
|
||||
# @raycast.authorURL https://github.com/danielmiessler
|
||||
|
||||
# Set PATH to include common locations and $HOME/go/bin
|
||||
PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$HOME/go/bin:$PATH"
|
||||
|
||||
# Use the PATH to find and execute fabric
|
||||
if command -v fabric >/dev/null 2>&1; then
|
||||
fabric -sp capture_thinkers_work "${1}"
|
||||
else
|
||||
echo "Error: fabric command not found in PATH"
|
||||
echo "Current PATH: $PATH"
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,27 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Required parameters:
|
||||
# @raycast.schemaVersion 1
|
||||
# @raycast.title Create Story Explanation
|
||||
# @raycast.mode fullOutput
|
||||
|
||||
# Optional parameters:
|
||||
# @raycast.icon 🧠
|
||||
# @raycast.argument1 { "type": "text", "placeholder": "Input text", "optional": false, "percentEncoded": true}
|
||||
|
||||
# Documentation:
|
||||
# @raycast.description Run fabric create_story_explanation on the input text
|
||||
# @raycast.author Daniel Miessler
|
||||
# @raycast.authorURL https://github.com/danielmiessler
|
||||
|
||||
# Set PATH to include common locations and $HOME/go/bin
|
||||
PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$HOME/go/bin:$PATH"
|
||||
|
||||
# Use the PATH to find and execute fabric
|
||||
if command -v fabric >/dev/null 2>&1; then
|
||||
fabric -sp create_story_explanation "${1}"
|
||||
else
|
||||
echo "Error: fabric command not found in PATH"
|
||||
echo "Current PATH: $PATH"
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,27 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Required parameters:
|
||||
# @raycast.schemaVersion 1
|
||||
# @raycast.title Extract Primary Problem
|
||||
# @raycast.mode fullOutput
|
||||
|
||||
# Optional parameters:
|
||||
# @raycast.icon 🧠
|
||||
# @raycast.argument1 { "type": "text", "placeholder": "Input text", "optional": false, "percentEncoded": true}
|
||||
|
||||
# Documentation:
|
||||
# @raycast.description Run fabric extract_primary_problem on the input text
|
||||
# @raycast.author Daniel Miessler
|
||||
# @raycast.authorURL https://github.com/danielmiessler
|
||||
|
||||
# Set PATH to include common locations and $HOME/go/bin
|
||||
PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$HOME/go/bin:$PATH"
|
||||
|
||||
# Use the PATH to find and execute fabric
|
||||
if command -v fabric >/dev/null 2>&1; then
|
||||
fabric -sp extract_primary_problem "${1}"
|
||||
else
|
||||
echo "Error: fabric command not found in PATH"
|
||||
echo "Current PATH: $PATH"
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,27 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Required parameters:
|
||||
# @raycast.schemaVersion 1
|
||||
# @raycast.title Extract Wisdom
|
||||
# @raycast.mode fullOutput
|
||||
|
||||
# Optional parameters:
|
||||
# @raycast.icon 🧠
|
||||
# @raycast.argument1 { "type": "text", "placeholder": "Input text", "optional": false, "percentEncoded": true}
|
||||
|
||||
# Documentation:
|
||||
# @raycast.description Run fabric extract_wisdom on input text
|
||||
# @raycast.author Daniel Miessler
|
||||
# @raycast.authorURL https://github.com/danielmiessler
|
||||
|
||||
# Set PATH to include common locations and $HOME/go/bin
|
||||
PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$HOME/go/bin:$PATH"
|
||||
|
||||
# Use the PATH to find and execute fabric
|
||||
if command -v fabric >/dev/null 2>&1; then
|
||||
fabric -sp extract_wisdom "${1}"
|
||||
else
|
||||
echo "Error: fabric command not found in PATH"
|
||||
echo "Current PATH: $PATH"
|
||||
exit 1
|
||||
fi
|
||||
@@ -1,27 +0,0 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Required parameters:
|
||||
# @raycast.schemaVersion 1
|
||||
# @raycast.title Get YouTube Transcript
|
||||
# @raycast.mode fullOutput
|
||||
|
||||
# Optional parameters:
|
||||
# @raycast.icon 🧠
|
||||
# @raycast.argument1 { "type": "text", "placeholder": "Input text", "optional": false, "percentEncoded": false}
|
||||
|
||||
# Documentation:
|
||||
# @raycast.description Run fabric -y on the input text of a YouTube video to get the transcript from.
|
||||
# @raycast.author Daniel Miessler
|
||||
# @raycast.authorURL https://github.com/danielmiessler
|
||||
|
||||
# Set PATH to include common locations and $HOME/go/bin
|
||||
PATH="/usr/local/bin:/usr/bin:/bin:/usr/sbin:/sbin:$HOME/go/bin:$PATH"
|
||||
|
||||
# Use the PATH to find and execute fabric
|
||||
if command -v fabric >/dev/null 2>&1; then
|
||||
fabric -y "${1}"
|
||||
else
|
||||
echo "Error: fabric command not found in PATH"
|
||||
echo "Current PATH: $PATH"
|
||||
exit 1
|
||||
fi
|
||||
File diff suppressed because it is too large
Load Diff
@@ -1,6 +1,9 @@
|
||||
package openai_compatible
|
||||
|
||||
import (
|
||||
"os"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/plugins/ai/openai"
|
||||
)
|
||||
|
||||
@@ -24,29 +27,37 @@ func NewClient(providerConfig ProviderConfig) *Client {
|
||||
|
||||
// ProviderMap is a map of provider name to ProviderConfig for O(1) lookup
|
||||
var ProviderMap = map[string]ProviderConfig{
|
||||
"Mistral": {
|
||||
Name: "Mistral",
|
||||
BaseURL: "https://api.mistral.ai/v1",
|
||||
"AIML": {
|
||||
Name: "AIML",
|
||||
BaseURL: "https://api.aimlapi.com/v1",
|
||||
},
|
||||
"LiteLLM": {
|
||||
Name: "LiteLLM",
|
||||
BaseURL: "http://localhost:4000",
|
||||
},
|
||||
"Groq": {
|
||||
Name: "Groq",
|
||||
BaseURL: "https://api.groq.com/openai/v1",
|
||||
},
|
||||
"GrokAI": {
|
||||
Name: "GrokAI",
|
||||
BaseURL: "https://api.x.ai/v1",
|
||||
"Cerebras": {
|
||||
Name: "Cerebras",
|
||||
BaseURL: "https://api.cerebras.ai/v1",
|
||||
},
|
||||
"DeepSeek": {
|
||||
Name: "DeepSeek",
|
||||
BaseURL: "https://api.deepseek.com",
|
||||
},
|
||||
"Cerebras": {
|
||||
Name: "Cerebras",
|
||||
BaseURL: "https://api.cerebras.ai/v1",
|
||||
"GrokAI": {
|
||||
Name: "GrokAI",
|
||||
BaseURL: "https://api.x.ai/v1",
|
||||
},
|
||||
"Groq": {
|
||||
Name: "Groq",
|
||||
BaseURL: "https://api.groq.com/openai/v1",
|
||||
},
|
||||
"Langdock": {
|
||||
Name: "Langdock",
|
||||
BaseURL: "https://api.langdock.com/openai/{{REGION=us}}/v1",
|
||||
},
|
||||
"LiteLLM": {
|
||||
Name: "LiteLLM",
|
||||
BaseURL: "http://localhost:4000",
|
||||
},
|
||||
"Mistral": {
|
||||
Name: "Mistral",
|
||||
BaseURL: "https://api.mistral.ai/v1",
|
||||
},
|
||||
"OpenRouter": {
|
||||
Name: "OpenRouter",
|
||||
@@ -56,15 +67,37 @@ var ProviderMap = map[string]ProviderConfig{
|
||||
Name: "SiliconCloud",
|
||||
BaseURL: "https://api.siliconflow.cn/v1",
|
||||
},
|
||||
"AIML": {
|
||||
Name: "AIML",
|
||||
BaseURL: "https://api.aimlapi.com/v1",
|
||||
},
|
||||
}
|
||||
|
||||
// GetProviderByName returns the provider configuration for a given name with O(1) lookup
|
||||
func GetProviderByName(name string) (ProviderConfig, bool) {
|
||||
provider, found := ProviderMap[name]
|
||||
if strings.Contains(provider.BaseURL, "{{") && strings.Contains(provider.BaseURL, "}}") {
|
||||
// Extract the template variable and default value
|
||||
start := strings.Index(provider.BaseURL, "{{")
|
||||
end := strings.Index(provider.BaseURL, "}}") + 2
|
||||
template := provider.BaseURL[start:end]
|
||||
|
||||
// Parse the template to get variable name and default value
|
||||
inner := template[2 : len(template)-2] // Remove {{ and }}
|
||||
parts := strings.Split(inner, "=")
|
||||
if len(parts) == 2 {
|
||||
varName := strings.TrimSpace(parts[0])
|
||||
defaultValue := strings.TrimSpace(parts[1])
|
||||
|
||||
// Create environment variable name
|
||||
envVarName := strings.ToUpper(provider.Name) + "_" + varName
|
||||
|
||||
// Get value from environment or use default
|
||||
envValue := os.Getenv(envVarName)
|
||||
if envValue == "" {
|
||||
envValue = defaultValue
|
||||
}
|
||||
|
||||
// Replace the template with the actual value
|
||||
provider.BaseURL = strings.Replace(provider.BaseURL, template, envValue, 1)
|
||||
}
|
||||
}
|
||||
return provider, found
|
||||
}
|
||||
|
||||
|
||||
246
plugins/ai/perplexity/perplexity.go
Normal file
246
plugins/ai/perplexity/perplexity.go
Normal file
@@ -0,0 +1,246 @@
|
||||
package perplexity
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"os"
|
||||
"sync" // Added sync package
|
||||
|
||||
"github.com/danielmiessler/fabric/common"
|
||||
"github.com/danielmiessler/fabric/plugins"
|
||||
perplexity "github.com/sgaunet/perplexity-go/v2"
|
||||
|
||||
goopenai "github.com/sashabaranov/go-openai"
|
||||
)
|
||||
|
||||
const (
|
||||
providerName = "Perplexity"
|
||||
)
|
||||
|
||||
var models = []string{
|
||||
"r1-1776", "sonar", "sonar-pro", "sonar-reasoning", "sonar-reasoning-pro",
|
||||
}
|
||||
|
||||
type Client struct {
|
||||
*plugins.PluginBase
|
||||
APIKey *plugins.SetupQuestion
|
||||
client *perplexity.Client
|
||||
}
|
||||
|
||||
func NewClient() *Client {
|
||||
c := &Client{}
|
||||
c.PluginBase = &plugins.PluginBase{
|
||||
Name: providerName,
|
||||
EnvNamePrefix: plugins.BuildEnvVariablePrefix(providerName),
|
||||
ConfigureCustom: c.Configure, // Assign the Configure method
|
||||
}
|
||||
c.APIKey = c.AddSetupQuestion("API_KEY", true)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *Client) Configure() error {
|
||||
// The PluginBase.Configure() is called by the framework if needed.
|
||||
// We only need to handle specific logic for this plugin.
|
||||
if c.APIKey.Value == "" {
|
||||
// Attempt to get from environment variable if not set by user during setup
|
||||
envKey := c.EnvNamePrefix + "API_KEY"
|
||||
apiKeyFromEnv := os.Getenv(envKey)
|
||||
if apiKeyFromEnv != "" {
|
||||
c.APIKey.Value = apiKeyFromEnv
|
||||
} else {
|
||||
return fmt.Errorf("%s API key not configured. Please set the %s environment variable or run 'fabric --setup %s'", providerName, envKey, providerName)
|
||||
}
|
||||
}
|
||||
c.client = perplexity.NewClient(c.APIKey.Value)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) ListModels() ([]string, error) {
|
||||
// Perplexity API does not have a ListModels endpoint.
|
||||
// We return a predefined list.
|
||||
return models, nil
|
||||
}
|
||||
|
||||
func (c *Client) Send(ctx context.Context, msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions) (string, error) {
|
||||
if c.client == nil {
|
||||
if err := c.Configure(); err != nil {
|
||||
return "", fmt.Errorf("failed to configure Perplexity client: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var perplexityMessages []perplexity.Message
|
||||
for _, msg := range msgs {
|
||||
perplexityMessages = append(perplexityMessages, perplexity.Message{
|
||||
Role: msg.Role,
|
||||
Content: msg.Content,
|
||||
})
|
||||
}
|
||||
|
||||
requestOptions := []perplexity.CompletionRequestOption{
|
||||
perplexity.WithModel(opts.Model),
|
||||
perplexity.WithMessages(perplexityMessages),
|
||||
}
|
||||
if opts.MaxTokens > 0 {
|
||||
requestOptions = append(requestOptions, perplexity.WithMaxTokens(opts.MaxTokens))
|
||||
}
|
||||
if opts.Temperature > 0 { // Perplexity default is 1.0, only set if user specifies
|
||||
requestOptions = append(requestOptions, perplexity.WithTemperature(opts.Temperature))
|
||||
}
|
||||
if opts.TopP > 0 { // Perplexity default is not specified, typically 1.0
|
||||
requestOptions = append(requestOptions, perplexity.WithTopP(opts.TopP))
|
||||
}
|
||||
if opts.PresencePenalty != 0 {
|
||||
// Corrected: Pass float64 directly
|
||||
requestOptions = append(requestOptions, perplexity.WithPresencePenalty(opts.PresencePenalty))
|
||||
}
|
||||
if opts.FrequencyPenalty != 0 {
|
||||
// Corrected: Pass float64 directly
|
||||
requestOptions = append(requestOptions, perplexity.WithFrequencyPenalty(opts.FrequencyPenalty))
|
||||
}
|
||||
|
||||
request := perplexity.NewCompletionRequest(requestOptions...)
|
||||
|
||||
// Corrected: Use SendCompletionRequest method from perplexity-go library
|
||||
resp, err := c.client.SendCompletionRequest(request) // Pass request directly
|
||||
if err != nil {
|
||||
return "", fmt.Errorf("perplexity API request failed: %w", err) // Corrected capitalization
|
||||
}
|
||||
|
||||
content := resp.GetLastContent()
|
||||
|
||||
// Append citations if available
|
||||
citations := resp.GetCitations()
|
||||
if len(citations) > 0 {
|
||||
content += "\n\n# CITATIONS\n\n"
|
||||
for i, citation := range citations {
|
||||
content += fmt.Sprintf("- [%d] %s\n", i+1, citation)
|
||||
}
|
||||
}
|
||||
|
||||
return content, nil
|
||||
}
|
||||
|
||||
func (c *Client) SendStream(msgs []*goopenai.ChatCompletionMessage, opts *common.ChatOptions, channel chan string) error {
|
||||
if c.client == nil {
|
||||
if err := c.Configure(); err != nil {
|
||||
close(channel) // Ensure channel is closed on error
|
||||
return fmt.Errorf("failed to configure Perplexity client: %w", err)
|
||||
}
|
||||
}
|
||||
|
||||
var perplexityMessages []perplexity.Message
|
||||
for _, msg := range msgs {
|
||||
perplexityMessages = append(perplexityMessages, perplexity.Message{
|
||||
Role: msg.Role,
|
||||
Content: msg.Content,
|
||||
})
|
||||
}
|
||||
|
||||
requestOptions := []perplexity.CompletionRequestOption{
|
||||
perplexity.WithModel(opts.Model),
|
||||
perplexity.WithMessages(perplexityMessages),
|
||||
perplexity.WithStream(true), // Enable streaming
|
||||
}
|
||||
|
||||
if opts.MaxTokens > 0 {
|
||||
requestOptions = append(requestOptions, perplexity.WithMaxTokens(opts.MaxTokens))
|
||||
}
|
||||
if opts.Temperature > 0 {
|
||||
requestOptions = append(requestOptions, perplexity.WithTemperature(opts.Temperature))
|
||||
}
|
||||
if opts.TopP > 0 {
|
||||
requestOptions = append(requestOptions, perplexity.WithTopP(opts.TopP))
|
||||
}
|
||||
if opts.PresencePenalty != 0 {
|
||||
// Corrected: Pass float64 directly
|
||||
requestOptions = append(requestOptions, perplexity.WithPresencePenalty(opts.PresencePenalty))
|
||||
}
|
||||
if opts.FrequencyPenalty != 0 {
|
||||
// Corrected: Pass float64 directly
|
||||
requestOptions = append(requestOptions, perplexity.WithFrequencyPenalty(opts.FrequencyPenalty))
|
||||
}
|
||||
|
||||
request := perplexity.NewCompletionRequest(requestOptions...)
|
||||
|
||||
responseChan := make(chan perplexity.CompletionResponse)
|
||||
var wg sync.WaitGroup // Use sync.WaitGroup
|
||||
wg.Add(1)
|
||||
|
||||
go func() {
|
||||
err := c.client.SendSSEHTTPRequest(&wg, request, responseChan)
|
||||
if err != nil {
|
||||
// Log error, can't send to string channel directly.
|
||||
// Consider a mechanism to propagate this error if needed.
|
||||
fmt.Fprintf(os.Stderr, "perplexity streaming error: %v\\n", err) // Corrected capitalization
|
||||
// If the error occurs during stream setup, the channel might not have been closed by the receiver loop.
|
||||
// However, closing it here might cause a panic if the receiver loop also tries to close it.
|
||||
// close(channel) // Caution: Uncommenting this may cause panic, as channel is closed in the receiver goroutine.
|
||||
}
|
||||
}()
|
||||
|
||||
go func() {
|
||||
defer close(channel) // Ensure the output channel is closed when this goroutine finishes
|
||||
var lastResponse *perplexity.CompletionResponse
|
||||
for resp := range responseChan {
|
||||
lastResponse = &resp
|
||||
if len(resp.Choices) > 0 {
|
||||
content := ""
|
||||
// Corrected: Check Delta.Content and Message.Content directly for non-emptiness
|
||||
// as Delta and Message are structs, not pointers, in perplexity.Choice
|
||||
if resp.Choices[0].Delta.Content != "" {
|
||||
content = resp.Choices[0].Delta.Content
|
||||
} else if resp.Choices[0].Message.Content != "" {
|
||||
content = resp.Choices[0].Message.Content
|
||||
}
|
||||
if content != "" {
|
||||
channel <- content
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Send citations at the end if available
|
||||
if lastResponse != nil {
|
||||
citations := lastResponse.GetCitations()
|
||||
if len(citations) > 0 {
|
||||
channel <- "\n\n# CITATIONS\n\n"
|
||||
for i, citation := range citations {
|
||||
channel <- fmt.Sprintf("- [%d] %s\n", i+1, citation)
|
||||
}
|
||||
}
|
||||
}
|
||||
}()
|
||||
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *Client) NeedsRawMode(modelName string) bool {
|
||||
return true
|
||||
}
|
||||
|
||||
// Setup is called by the fabric CLI framework to guide the user through configuration.
|
||||
func (c *Client) Setup() error {
|
||||
return c.PluginBase.Setup()
|
||||
}
|
||||
|
||||
// GetName returns the name of the plugin.
|
||||
func (c *Client) GetName() string {
|
||||
return c.PluginBase.Name
|
||||
}
|
||||
|
||||
// GetEnvNamePrefix returns the environment variable prefix for the plugin.
|
||||
// Corrected: Receiver name
|
||||
func (c *Client) GetEnvNamePrefix() string {
|
||||
return c.PluginBase.EnvNamePrefix
|
||||
}
|
||||
|
||||
// AddSetupQuestion adds a setup question to the plugin.
|
||||
// This is a helper method, usually called from NewClient.
|
||||
func (c *Client) AddSetupQuestion(text string, isSensitive bool) *plugins.SetupQuestion {
|
||||
return c.PluginBase.AddSetupQuestion(text, isSensitive)
|
||||
}
|
||||
|
||||
// GetSetupQuestions returns the setup questions for the plugin.
|
||||
// Corrected: Return the slice of setup questions from PluginBase
|
||||
func (c *Client) GetSetupQuestions() []*plugins.SetupQuestion {
|
||||
return c.PluginBase.SetupQuestions
|
||||
}
|
||||
@@ -112,7 +112,9 @@ func (o *YouTube) GrabTranscriptWithTimestamps(videoId string, language string)
|
||||
return o.tryMethodYtDlpWithTimestamps(videoId, language)
|
||||
}
|
||||
|
||||
func (o *YouTube) tryMethodYtDlp(videoId string, language string) (ret string, err error) {
|
||||
// tryMethodYtDlpInternal is a helper function to reduce duplication between
|
||||
// tryMethodYtDlp and tryMethodYtDlpWithTimestamps.
|
||||
func (o *YouTube) tryMethodYtDlpInternal(videoId string, language string, processVTTFileFunc func(filename string) (string, error)) (ret string, err error) {
|
||||
// Check if yt-dlp is available
|
||||
if _, err = exec.LookPath("yt-dlp"); err != nil {
|
||||
err = fmt.Errorf("yt-dlp not found in PATH. Please install yt-dlp to use YouTube transcript functionality")
|
||||
@@ -130,9 +132,13 @@ func (o *YouTube) tryMethodYtDlp(videoId string, language string) (ret string, e
|
||||
// Use yt-dlp to get transcript
|
||||
videoURL := "https://www.youtube.com/watch?v=" + videoId
|
||||
outputPath := filepath.Join(tempDir, "%(title)s.%(ext)s")
|
||||
lang_match := language
|
||||
if len(language) > 2 {
|
||||
lang_match = language[:2]
|
||||
}
|
||||
cmd := exec.Command("yt-dlp",
|
||||
"--write-auto-subs",
|
||||
"--sub-lang", language,
|
||||
"--sub-lang", lang_match,
|
||||
"--skip-download",
|
||||
"--sub-format", "vtt",
|
||||
"--quiet",
|
||||
@@ -154,52 +160,15 @@ func (o *YouTube) tryMethodYtDlp(videoId string, language string) (ret string, e
|
||||
return "", err
|
||||
}
|
||||
|
||||
return o.readAndCleanVTTFile(vttFiles[0])
|
||||
return processVTTFileFunc(vttFiles[0])
|
||||
}
|
||||
|
||||
func (o *YouTube) tryMethodYtDlp(videoId string, language string) (ret string, err error) {
|
||||
return o.tryMethodYtDlpInternal(videoId, language, o.readAndCleanVTTFile)
|
||||
}
|
||||
|
||||
func (o *YouTube) tryMethodYtDlpWithTimestamps(videoId string, language string) (ret string, err error) {
|
||||
// Check if yt-dlp is available
|
||||
if _, err = exec.LookPath("yt-dlp"); err != nil {
|
||||
err = fmt.Errorf("yt-dlp not found in PATH. Please install yt-dlp to use YouTube transcript functionality")
|
||||
return
|
||||
}
|
||||
|
||||
// Create a temporary directory for yt-dlp output (cross-platform)
|
||||
tempDir := filepath.Join(os.TempDir(), "fabric-youtube-"+videoId)
|
||||
if err = os.MkdirAll(tempDir, 0755); err != nil {
|
||||
err = fmt.Errorf("failed to create temp directory: %v", err)
|
||||
return
|
||||
}
|
||||
defer os.RemoveAll(tempDir)
|
||||
|
||||
// Use yt-dlp to get transcript
|
||||
videoURL := "https://www.youtube.com/watch?v=" + videoId
|
||||
outputPath := filepath.Join(tempDir, "%(title)s.%(ext)s")
|
||||
cmd := exec.Command("yt-dlp",
|
||||
"--write-auto-subs",
|
||||
"--sub-lang", language,
|
||||
"--skip-download",
|
||||
"--sub-format", "vtt",
|
||||
"--quiet",
|
||||
"--no-warnings",
|
||||
"-o", outputPath,
|
||||
videoURL)
|
||||
|
||||
var stderr bytes.Buffer
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
if err = cmd.Run(); err != nil {
|
||||
err = fmt.Errorf("yt-dlp failed: %v, stderr: %s", err, stderr.String())
|
||||
return
|
||||
}
|
||||
|
||||
// Find VTT files using cross-platform approach
|
||||
vttFiles, err := o.findVTTFiles(tempDir, language)
|
||||
if err != nil {
|
||||
return "", err
|
||||
}
|
||||
|
||||
return o.readAndFormatVTTWithTimestamps(vttFiles[0])
|
||||
return o.tryMethodYtDlpInternal(videoId, language, o.readAndFormatVTTWithTimestamps)
|
||||
}
|
||||
|
||||
func (o *YouTube) readAndCleanVTTFile(filename string) (ret string, err error) {
|
||||
|
||||
@@ -24,12 +24,13 @@ type ChatHandler struct {
|
||||
}
|
||||
|
||||
type PromptRequest struct {
|
||||
UserInput string `json:"userInput"`
|
||||
Vendor string `json:"vendor"`
|
||||
Model string `json:"model"`
|
||||
ContextName string `json:"contextName"`
|
||||
PatternName string `json:"patternName"`
|
||||
StrategyName string `json:"strategyName"` // Optional strategy name
|
||||
UserInput string `json:"userInput"`
|
||||
Vendor string `json:"vendor"`
|
||||
Model string `json:"model"`
|
||||
ContextName string `json:"contextName"`
|
||||
PatternName string `json:"patternName"`
|
||||
StrategyName string `json:"strategyName"` // Optional strategy name
|
||||
Variables map[string]string `json:"variables,omitempty"` // Pattern variables
|
||||
}
|
||||
|
||||
type ChatRequest struct {
|
||||
@@ -118,9 +119,10 @@ func (h *ChatHandler) HandleChat(c *gin.Context) {
|
||||
Role: "user",
|
||||
Content: p.UserInput,
|
||||
},
|
||||
PatternName: p.PatternName,
|
||||
ContextName: p.ContextName,
|
||||
Language: request.Language, // Pass the language field
|
||||
PatternName: p.PatternName,
|
||||
ContextName: p.ContextName,
|
||||
PatternVariables: p.Variables, // Pass pattern variables
|
||||
Language: request.Language, // Pass the language field
|
||||
}
|
||||
|
||||
opts := &common.ChatOptions{
|
||||
|
||||
105
restapi/docs/API_VARIABLES_EXAMPLE.md
Normal file
105
restapi/docs/API_VARIABLES_EXAMPLE.md
Normal file
@@ -0,0 +1,105 @@
|
||||
# REST API Pattern Variables Example
|
||||
|
||||
This example demonstrates how to use pattern variables in REST API calls to the `/chat` endpoint.
|
||||
|
||||
## Example: Using the `translate` pattern with variables
|
||||
|
||||
### Request
|
||||
|
||||
```json
|
||||
{
|
||||
"prompts": [
|
||||
{
|
||||
"userInput": "Hello my name is Kayvan",
|
||||
"patternName": "translate",
|
||||
"model": "gpt-4o",
|
||||
"vendor": "openai",
|
||||
"contextName": "",
|
||||
"strategyName": "",
|
||||
"variables": {
|
||||
"lang_code": "fr"
|
||||
}
|
||||
}
|
||||
],
|
||||
"language": "en",
|
||||
"temperature": 0.7,
|
||||
"topP": 0.9,
|
||||
"frequencyPenalty": 0.0,
|
||||
"presencePenalty": 0.0
|
||||
}
|
||||
```
|
||||
|
||||
### Pattern Content
|
||||
|
||||
The `translate` pattern contains:
|
||||
|
||||
```markdown
|
||||
You are an expert translator... translate them as accurately and perfectly as possible into the language specified by its language code {{lang_code}}...
|
||||
|
||||
...
|
||||
|
||||
- Translate the document as accurately as possible keeping a 1:1 copy of the original text translated to {{lang_code}}.
|
||||
|
||||
{{input}}
|
||||
```
|
||||
|
||||
### How it works
|
||||
|
||||
1. The pattern is loaded from `patterns/translate/system.md`
|
||||
2. The `{{lang_code}}` variable is replaced with `"fr"` from the variables map
|
||||
3. The `{{input}}` placeholder is replaced with `"Hello my name is Kayvan"`
|
||||
4. The resulting processed pattern is sent to the AI model
|
||||
|
||||
### Expected Result
|
||||
|
||||
The AI would receive a prompt asking it to translate "Hello my name is Kayvan" to French (fr), and would respond with something like "Bonjour, je m'appelle Kayvan".
|
||||
|
||||
## Testing with curl
|
||||
|
||||
```bash
|
||||
curl -X POST http://localhost:8080/api/chat \
|
||||
-H "Content-Type: application/json" \
|
||||
-d '{
|
||||
"prompts": [
|
||||
{
|
||||
"userInput": "Hello my name is Kayvan",
|
||||
"patternName": "translate",
|
||||
"model": "gpt-4o",
|
||||
"vendor": "openai",
|
||||
"variables": {
|
||||
"lang_code": "fr"
|
||||
}
|
||||
}
|
||||
],
|
||||
"temperature": 0.7
|
||||
}'
|
||||
```
|
||||
|
||||
## Multiple Variables Example
|
||||
|
||||
For patterns that use multiple variables:
|
||||
|
||||
```json
|
||||
{
|
||||
"prompts": [
|
||||
{
|
||||
"userInput": "Analyze this business model",
|
||||
"patternName": "custom_analysis",
|
||||
"model": "gpt-4o",
|
||||
"variables": {
|
||||
"role": "expert consultant",
|
||||
"experience": "15",
|
||||
"focus_areas": "revenue, scalability, market fit",
|
||||
"output_format": "bullet points"
|
||||
}
|
||||
}
|
||||
]
|
||||
}
|
||||
```
|
||||
|
||||
## Implementation Details
|
||||
|
||||
- Variables are passed in the `variables` field as a key-value map
|
||||
- Variables are processed using Go's template system
|
||||
- The `{{input}}` variable is automatically handled and should not be included in the variables map
|
||||
- Variables support the same features as CLI variables (plugins, extensions, etc.)
|
||||
@@ -15,20 +15,70 @@ type PatternsHandler struct {
|
||||
|
||||
// NewPatternsHandler creates a new PatternsHandler
|
||||
func NewPatternsHandler(r *gin.Engine, patterns *fsdb.PatternsEntity) (ret *PatternsHandler) {
|
||||
ret = &PatternsHandler{
|
||||
StorageHandler: NewStorageHandler(r, "patterns", patterns), patterns: patterns}
|
||||
// Create a storage handler but don't register any routes yet
|
||||
storageHandler := &StorageHandler[fsdb.Pattern]{storage: patterns}
|
||||
ret = &PatternsHandler{StorageHandler: storageHandler, patterns: patterns}
|
||||
|
||||
// TODO: Add custom, replacement routes here
|
||||
//r.GET("/patterns/:name", ret.Get)
|
||||
// Register routes manually - use custom Get for patterns, others from StorageHandler
|
||||
r.GET("/patterns/:name", ret.Get) // Custom method with variables support
|
||||
r.GET("/patterns/names", ret.GetNames) // From StorageHandler
|
||||
r.DELETE("/patterns/:name", ret.Delete) // From StorageHandler
|
||||
r.GET("/patterns/exists/:name", ret.Exists) // From StorageHandler
|
||||
r.PUT("/patterns/rename/:oldName/:newName", ret.Rename) // From StorageHandler
|
||||
r.POST("/patterns/:name", ret.Save) // From StorageHandler
|
||||
// Add POST route for patterns with variables in request body
|
||||
r.POST("/patterns/:name/apply", ret.ApplyPattern)
|
||||
return
|
||||
}
|
||||
|
||||
// Get handles the GET /patterns/:name route
|
||||
// Get handles the GET /patterns/:name route - returns raw pattern without variable processing
|
||||
func (h *PatternsHandler) Get(c *gin.Context) {
|
||||
name := c.Param("name")
|
||||
variables := make(map[string]string) // Assuming variables are passed somehow
|
||||
input := "" // Assuming input is passed somehow
|
||||
pattern, err := h.patterns.GetApplyVariables(name, variables, input)
|
||||
|
||||
// Get the raw pattern content without any variable processing
|
||||
content, err := h.patterns.Load(name + "/" + h.patterns.SystemPatternFile)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
}
|
||||
|
||||
// Return raw pattern in the same format as the processed patterns
|
||||
pattern := &fsdb.Pattern{
|
||||
Name: name,
|
||||
Description: "",
|
||||
Pattern: string(content),
|
||||
}
|
||||
c.JSON(http.StatusOK, pattern)
|
||||
}
|
||||
|
||||
// PatternApplyRequest represents the request body for applying a pattern
|
||||
type PatternApplyRequest struct {
|
||||
Input string `json:"input"`
|
||||
Variables map[string]string `json:"variables,omitempty"`
|
||||
}
|
||||
|
||||
// ApplyPattern handles the POST /patterns/:name/apply route
|
||||
func (h *PatternsHandler) ApplyPattern(c *gin.Context) {
|
||||
name := c.Param("name")
|
||||
|
||||
var request PatternApplyRequest
|
||||
if err := c.ShouldBindJSON(&request); err != nil {
|
||||
c.JSON(http.StatusBadRequest, gin.H{"error": err.Error()})
|
||||
return
|
||||
}
|
||||
|
||||
// Merge query parameters with request body variables (body takes precedence)
|
||||
variables := make(map[string]string)
|
||||
for key, values := range c.Request.URL.Query() {
|
||||
if len(values) > 0 {
|
||||
variables[key] = values[0]
|
||||
}
|
||||
}
|
||||
for key, value := range request.Variables {
|
||||
variables[key] = value
|
||||
}
|
||||
|
||||
pattern, err := h.patterns.GetApplyVariables(name, variables, request.Input)
|
||||
if err != nil {
|
||||
c.JSON(http.StatusInternalServerError, err.Error())
|
||||
return
|
||||
|
||||
@@ -1,3 +1,3 @@
|
||||
package main
|
||||
|
||||
var version = "v1.4.205"
|
||||
var version = "v1.4.212"
|
||||
|
||||
@@ -3,8 +3,11 @@
|
||||
import Models from "./Models.svelte";
|
||||
import ModelConfig from "./ModelConfig.svelte";
|
||||
import { Select } from "$lib/components/ui/select";
|
||||
import { Input } from "$lib/components/ui/input";
|
||||
import { Label } from "$lib/components/ui/label";
|
||||
import { languageStore } from '$lib/store/language-store';
|
||||
import { strategies, selectedStrategy, fetchStrategies } from '$lib/store/strategy-store';
|
||||
import { patternVariables } from '$lib/store/pattern-store';
|
||||
import { onMount } from 'svelte';
|
||||
|
||||
const languages = [
|
||||
@@ -18,6 +21,25 @@
|
||||
{ code: 'it', name: 'Italian' }
|
||||
];
|
||||
|
||||
let variablesJsonString = '';
|
||||
|
||||
// Parse JSON string and update variables store
|
||||
function updateVariables() {
|
||||
try {
|
||||
if (variablesJsonString.trim() === '') {
|
||||
patternVariables.set({});
|
||||
} else {
|
||||
const parsed = JSON.parse(variablesJsonString);
|
||||
if (typeof parsed === 'object' && parsed !== null && !Array.isArray(parsed)) {
|
||||
patternVariables.set(parsed);
|
||||
}
|
||||
}
|
||||
} catch (e) {
|
||||
// Don't update the store if JSON is invalid - just ignore the error
|
||||
// This allows partial typing without breaking
|
||||
}
|
||||
}
|
||||
|
||||
onMount(() => {
|
||||
fetchStrategies();
|
||||
});
|
||||
@@ -33,7 +55,7 @@
|
||||
<Models />
|
||||
</div>
|
||||
<div>
|
||||
<Select
|
||||
<Select
|
||||
bind:value={$languageStore}
|
||||
class="bg-primary-800/30 border-none hover:bg-primary-800/40 transition-colors"
|
||||
>
|
||||
@@ -43,7 +65,7 @@
|
||||
</Select>
|
||||
</div>
|
||||
<div>
|
||||
<Select
|
||||
<Select
|
||||
bind:value={$selectedStrategy}
|
||||
class="bg-primary-800/30 border-none hover:bg-primary-800/40 transition-colors"
|
||||
>
|
||||
@@ -53,8 +75,19 @@
|
||||
{/each}
|
||||
</Select>
|
||||
</div>
|
||||
<div>
|
||||
<Label for="pattern-variables" class="text-xs text-white/70 mb-1 block">Pattern Variables (JSON)</Label>
|
||||
<textarea
|
||||
id="pattern-variables"
|
||||
bind:value={variablesJsonString}
|
||||
on:input={updateVariables}
|
||||
placeholder="{`{\"lang_code\": \"fr\", \"role\": \"expert\"}`}"
|
||||
class="w-full h-20 px-3 py-2 text-sm bg-primary-800/30 border-none rounded-md hover:bg-primary-800/40 transition-colors text-white placeholder-white/50 resize-none focus:ring-1 focus:ring-white/20 focus:outline-none"
|
||||
style="font-family: 'JetBrains Mono', 'Fira Code', 'Consolas', monospace;"
|
||||
></textarea>
|
||||
</div>
|
||||
</div>
|
||||
|
||||
|
||||
<!-- Right side - Model Config -->
|
||||
<div class="w-[65%]">
|
||||
<ModelConfig />
|
||||
|
||||
@@ -8,6 +8,7 @@ export interface ChatPrompt {
|
||||
model: string;
|
||||
patternName?: string;
|
||||
strategyName?: string; // Optional strategy name to prepend strategy prompt
|
||||
variables?: { [key: string]: string }; // Pattern variables
|
||||
}
|
||||
|
||||
export interface ChatConfig {
|
||||
|
||||
@@ -6,7 +6,7 @@ import type {
|
||||
} from '$lib/interfaces/chat-interface';
|
||||
import { get } from 'svelte/store';
|
||||
import { modelConfig } from '$lib/store/model-store';
|
||||
import { systemPrompt, selectedPatternName } from '$lib/store/pattern-store';
|
||||
import { systemPrompt, selectedPatternName, patternVariables } from '$lib/store/pattern-store';
|
||||
import { chatConfig } from '$lib/store/chat-config';
|
||||
import { messageStore } from '$lib/store/chat-store';
|
||||
import { languageStore } from '$lib/store/language-store';
|
||||
@@ -75,48 +75,46 @@ export class ChatService {
|
||||
|
||||
private cleanPatternOutput(content: string): string {
|
||||
// Remove markdown fence if present
|
||||
content = content.replace(/^```markdown\n/, '');
|
||||
content = content.replace(/\n```$/, '');
|
||||
|
||||
let cleaned = content.replace(/^```markdown\n/, '');
|
||||
cleaned = cleaned.replace(/\n```$/, '');
|
||||
|
||||
// Existing cleaning
|
||||
content = content.replace(/^# OUTPUT\s*\n/, '');
|
||||
content = content.replace(/^\s*\n/, '');
|
||||
content = content.replace(/\n\s*$/, '');
|
||||
content = content.replace(/^#\s+([A-Z]+):/gm, '$1:');
|
||||
content = content.replace(/^#\s+([A-Z]+)\s*$/gm, '$1');
|
||||
content = content.trim();
|
||||
content = content.replace(/\n{3,}/g, '\n\n');
|
||||
return content;
|
||||
cleaned = cleaned.replace(/^# OUTPUT\s*\n/, '');
|
||||
cleaned = cleaned.replace(/^\s*\n/, '');
|
||||
cleaned = cleaned.replace(/\n\s*$/, '');
|
||||
cleaned = cleaned.replace(/^#\s+([A-Z]+):/gm, '$1:');
|
||||
cleaned = cleaned.replace(/^#\s+([A-Z]+)\s*$/gm, '$1');
|
||||
cleaned = cleaned.trim();
|
||||
cleaned = cleaned.replace(/\n{3,}/g, '\n\n');
|
||||
return cleaned;
|
||||
}
|
||||
|
||||
|
||||
|
||||
private createMessageStream(reader: ReadableStreamDefaultReader<Uint8Array>): ReadableStream<StreamResponse> {
|
||||
let buffer = '';
|
||||
const cleanPatternOutput = this.cleanPatternOutput.bind(this);
|
||||
const language = get(languageStore);
|
||||
const validator = new LanguageValidator(language);
|
||||
|
||||
const processResponse = (response: StreamResponse) => {
|
||||
const pattern = get(selectedPatternName);
|
||||
|
||||
if (pattern) {
|
||||
response.content = cleanPatternOutput(response.content);
|
||||
// Simplified format determination - always markdown unless mermaid
|
||||
const isMermaid = [
|
||||
'graph TD', 'gantt', 'flowchart',
|
||||
'sequenceDiagram', 'classDiagram', 'stateDiagram'
|
||||
].some(starter => response.content.trim().startsWith(starter));
|
||||
|
||||
response.format = isMermaid ? 'mermaid' : 'markdown';
|
||||
}
|
||||
|
||||
if (response.type === 'content') {
|
||||
response.content = validator.enforceLanguage(response.content);
|
||||
}
|
||||
|
||||
return response;
|
||||
};
|
||||
|
||||
private createMessageStream(reader: ReadableStreamDefaultReader<Uint8Array>): ReadableStream<StreamResponse> {
|
||||
let buffer = '';
|
||||
const cleanPatternOutput = this.cleanPatternOutput.bind(this);
|
||||
const language = get(languageStore);
|
||||
const validator = new LanguageValidator(language);
|
||||
|
||||
const processResponse = (response: StreamResponse) => {
|
||||
const pattern = get(selectedPatternName);
|
||||
|
||||
if (pattern) {
|
||||
response.content = cleanPatternOutput(response.content);
|
||||
// Simplified format determination - always markdown unless mermaid
|
||||
const isMermaid = [
|
||||
'graph TD', 'gantt', 'flowchart',
|
||||
'sequenceDiagram', 'classDiagram', 'stateDiagram'
|
||||
].some(starter => response.content.trim().startsWith(starter));
|
||||
|
||||
response.format = isMermaid ? 'mermaid' : 'markdown';
|
||||
}
|
||||
|
||||
if (response.type === 'content') {
|
||||
response.content = validator.enforceLanguage(response.content);
|
||||
}
|
||||
|
||||
return response;
|
||||
};
|
||||
return new ReadableStream({
|
||||
async start(controller) {
|
||||
try {
|
||||
@@ -162,18 +160,18 @@ export class ChatService {
|
||||
}
|
||||
});
|
||||
}
|
||||
|
||||
|
||||
private createChatPrompt(userInput: string, systemPromptText?: string): ChatPrompt {
|
||||
const config = get(modelConfig);
|
||||
const language = get(languageStore);
|
||||
|
||||
const languageInstruction = language !== 'en'
|
||||
|
||||
const languageInstruction = language !== 'en'
|
||||
? `You MUST respond in ${language} language. All output must be in ${language}. `
|
||||
// ? `You MUST respond in ${language} language. ALL output, including section headers, titles, and formatting, MUST be translated into ${language}. It is CRITICAL that you translate ALL headers, such as SUMMARY, IDEAS, QUOTES, TAKEAWAYS, MAIN POINTS, etc., into ${language}. Maintain markdown formatting in the response. Do not output any English headers.`
|
||||
: '';
|
||||
|
||||
|
||||
const finalSystemPrompt = languageInstruction + (systemPromptText ?? get(systemPrompt));
|
||||
|
||||
|
||||
const finalUserInput = language !== 'en'
|
||||
? `${userInput}\n\nIMPORTANT: Respond in ${language} language only.`
|
||||
: userInput;
|
||||
@@ -183,15 +181,11 @@ export class ChatService {
|
||||
systemPrompt: finalSystemPrompt,
|
||||
model: config.model,
|
||||
patternName: get(selectedPatternName),
|
||||
strategyName: get(selectedStrategy) // Add selected strategy to prompt
|
||||
strategyName: get(selectedStrategy), // Add selected strategy to prompt
|
||||
variables: get(patternVariables) // Add pattern variables
|
||||
};
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
public async createChatRequest(userInput: string, systemPromptText?: string, isPattern: boolean = false): Promise<ChatRequest> {
|
||||
const prompt = this.createChatPrompt(userInput, systemPromptText);
|
||||
const config = get(chatConfig);
|
||||
@@ -221,16 +215,16 @@ export class ChatService {
|
||||
onError: (error: Error) => void
|
||||
): Promise<void> {
|
||||
const reader = stream.getReader();
|
||||
|
||||
|
||||
try {
|
||||
while (true) {
|
||||
const { done, value } = await reader.read();
|
||||
if (done) break;
|
||||
|
||||
|
||||
if (value.type === 'error') {
|
||||
throw new ChatError(value.content, 'STREAM_CONTENT_ERROR');
|
||||
}
|
||||
|
||||
|
||||
if (value.type === 'content') {
|
||||
onContent(value.content, value);
|
||||
}
|
||||
@@ -239,11 +233,7 @@ export class ChatService {
|
||||
onError(error instanceof ChatError ? error : new ChatError('Stream processing error', 'STREAM_ERROR', error));
|
||||
} finally {
|
||||
reader.releaseLock();
|
||||
|
||||
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
|
||||
|
||||
}
|
||||
|
||||
@@ -15,11 +15,11 @@ export const patterns = derived(
|
||||
return $allPatterns.filter(p => {
|
||||
// Keep all patterns if no language is selected
|
||||
if (!$language) return true;
|
||||
|
||||
|
||||
// Check if pattern has a language prefix (e.g., en_, fr_)
|
||||
const match = p.Name.match(/^([a-z]{2})_/);
|
||||
if (!match) return true; // Keep patterns without language prefix
|
||||
|
||||
|
||||
// Only filter out patterns that have a different language prefix
|
||||
const patternLang = match[1];
|
||||
return patternLang === $language;
|
||||
@@ -30,6 +30,9 @@ export const patterns = derived(
|
||||
export const systemPrompt = writable<string>('');
|
||||
export const selectedPatternName = writable<string>('');
|
||||
|
||||
// Pattern variables store
|
||||
export const patternVariables = writable<Record<string, string>>({});
|
||||
|
||||
export const setSystemPrompt = (prompt: string) => {
|
||||
console.log('Setting system prompt:', prompt);
|
||||
systemPrompt.set(prompt);
|
||||
@@ -60,13 +63,13 @@ export const patternAPI = {
|
||||
const patternResponse = await fetch(`/api/patterns/${pattern}`);
|
||||
const patternData = await patternResponse.json();
|
||||
console.log(`Pattern ${pattern} content length:`, patternData.Pattern?.length || 0);
|
||||
|
||||
|
||||
// Find matching description from JSON
|
||||
const desc = descriptions.find(d => d.patternName === pattern);
|
||||
if (!desc) {
|
||||
console.warn(`No description found for pattern: ${pattern}`);
|
||||
}
|
||||
|
||||
|
||||
return {
|
||||
Name: pattern,
|
||||
Description: desc?.description || pattern.charAt(0).toUpperCase() + pattern.slice(1),
|
||||
|
||||
Reference in New Issue
Block a user