mirror of
https://github.com/danielmiessler/Fabric.git
synced 2026-01-09 14:28:01 -05:00
feat: add comprehensive internationalization support with English and Spanish locales
- Replace hardcoded strings with i18n.T translations - Add en and es JSON locale files - Implement custom translated help system - Enable language detection from CLI args - Add locale download capability - Localize error messages throughout codebase - Support TTS and notification translations
This commit is contained in:
@@ -72,6 +72,7 @@ Below are the **new features and capabilities** we've added (newest first):
|
||||
|
||||
### Recent Major Features
|
||||
|
||||
- [v1.4.309](https://github.com/danielmiessler/fabric/releases/tag/v1.4.309) (Sep 9, 2025) — **Comprehensive internationalization support**: Includes English and Spanish locale files.
|
||||
- [v1.4.303](https://github.com/danielmiessler/fabric/releases/tag/v1.4.303) (Aug 29, 2025) — **New Binary Releases**: Linux ARM and Windows ARM targets. You can run Fabric on the Raspberry PI and on your Windows Surface!
|
||||
- [v1.4.294](https://github.com/danielmiessler/fabric/releases/tag/v1.4.294) (Aug 20, 2025) — **Venice AI Support**: Added the Venice AI provider. Venice is a Privacy-First, Open-Source AI provider. See their ["About Venice"](https://docs.venice.ai/overview/about-venice) page for details.
|
||||
- [v1.4.291](https://github.com/danielmiessler/fabric/releases/tag/v1.4.291) (Aug 18, 2025) — **Speech To Text**: Add OpenAI speech-to-text support with `--transcribe-file`, `--transcribe-model`, and `--split-media-file` flags.
|
||||
|
||||
7
cmd/generate_changelog/incoming/1756.txt
Normal file
7
cmd/generate_changelog/incoming/1756.txt
Normal file
@@ -0,0 +1,7 @@
|
||||
### PR [#1756](https://github.com/danielmiessler/Fabric/pull/1756) by [ksylvan](https://github.com/ksylvan): Add Internationalization Support with Custom Help System
|
||||
|
||||
- Add comprehensive internationalization support with English and Spanish locales
|
||||
- Replace hardcoded strings with i18n.T translations and add en and es JSON locale files
|
||||
- Implement custom translated help system with language detection from CLI args
|
||||
- Add locale download capability and localize error messages throughout codebase
|
||||
- Support TTS and notification translations
|
||||
@@ -9,6 +9,7 @@ import (
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/db/fsdb"
|
||||
"github.com/danielmiessler/fabric/internal/tools/notifications"
|
||||
@@ -58,12 +59,12 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
isTTSModel := isTTSModel(currentFlags.Model)
|
||||
|
||||
if isTTSModel && !isAudioOutput {
|
||||
err = fmt.Errorf("TTS model '%s' requires audio output. Please specify an audio output file with -o flag (e.g., -o output.wav)", currentFlags.Model)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("tts_model_requires_audio_output"), currentFlags.Model))
|
||||
return
|
||||
}
|
||||
|
||||
if isAudioOutput && !isTTSModel {
|
||||
err = fmt.Errorf("audio output file '%s' specified but model '%s' is not a TTS model. Please use a TTS model like gemini-2.5-flash-preview-tts", currentFlags.Output, currentFlags.Model)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("audio_output_file_specified_but_not_tts_model"), currentFlags.Output, currentFlags.Model))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -75,7 +76,7 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
outputFile += ".wav"
|
||||
}
|
||||
if _, err = os.Stat(outputFile); err == nil {
|
||||
err = fmt.Errorf("file %s already exists. Please choose a different filename or remove the existing file", outputFile)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("file_already_exists_choose_different"), outputFile))
|
||||
return
|
||||
}
|
||||
}
|
||||
@@ -95,7 +96,7 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
if !currentFlags.Stream || currentFlags.SuppressThink {
|
||||
// For TTS models with audio output, show a user-friendly message instead of raw data
|
||||
if isTTSModel && isAudioOutput && strings.HasPrefix(result, "FABRIC_AUDIO_DATA:") {
|
||||
fmt.Printf("TTS audio generated successfully and saved to: %s\n", currentFlags.Output)
|
||||
fmt.Printf(i18n.T("tts_audio_generated_successfully"), currentFlags.Output)
|
||||
} else {
|
||||
// print the result if it was not streamed already or suppress-think disabled streaming output
|
||||
fmt.Println(result)
|
||||
@@ -149,20 +150,20 @@ func handleChatProcessing(currentFlags *Flags, registry *core.PluginRegistry, me
|
||||
// not grapheme clusters. As a result, complex emoji or accented characters with multiple combining
|
||||
// characters may be truncated improperly. This is a limitation of the current implementation.
|
||||
func sendNotification(options *domain.ChatOptions, patternName, result string) error {
|
||||
title := "Fabric Command Complete"
|
||||
title := i18n.T("fabric_command_complete")
|
||||
if patternName != "" {
|
||||
title = fmt.Sprintf("Fabric: %s Complete", patternName)
|
||||
title = fmt.Sprintf(i18n.T("fabric_command_complete_with_pattern"), patternName)
|
||||
}
|
||||
|
||||
// Limit message length for notification display (counts Unicode code points)
|
||||
message := "Command completed successfully"
|
||||
message := i18n.T("command_completed_successfully")
|
||||
if result != "" {
|
||||
maxLength := 100
|
||||
runes := []rune(result)
|
||||
if len(runes) > maxLength {
|
||||
message = fmt.Sprintf("Output: %s...", string(runes[:maxLength]))
|
||||
message = fmt.Sprintf(i18n.T("output_truncated"), string(runes[:maxLength]))
|
||||
} else {
|
||||
message = fmt.Sprintf("Output: %s", result)
|
||||
message = fmt.Sprintf(i18n.T("output_full"), result)
|
||||
}
|
||||
// Clean up newlines for notification display
|
||||
message = strings.ReplaceAll(message, "\n", " ")
|
||||
@@ -184,7 +185,7 @@ func sendNotification(options *domain.ChatOptions, patternName, result string) e
|
||||
// Use built-in notification system
|
||||
notificationManager := notifications.NewNotificationManager()
|
||||
if !notificationManager.IsAvailable() {
|
||||
return fmt.Errorf("no notification system available")
|
||||
return fmt.Errorf("%s", i18n.T("no_notification_system_available"))
|
||||
}
|
||||
|
||||
return notificationManager.Send(title, message)
|
||||
|
||||
@@ -13,6 +13,7 @@ import (
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/chat"
|
||||
"github.com/danielmiessler/fabric/internal/domain"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/util"
|
||||
"github.com/jessevdk/go-flags"
|
||||
@@ -146,9 +147,15 @@ func Init() (ret *Flags, err error) {
|
||||
|
||||
// Parse CLI flags first
|
||||
ret = &Flags{}
|
||||
parser := flags.NewParser(ret, flags.Default)
|
||||
parser := flags.NewParser(ret, flags.HelpFlag|flags.PassDoubleDash)
|
||||
|
||||
var args []string
|
||||
if args, err = parser.Parse(); err != nil {
|
||||
// Check if this is a help request and handle it with our custom help
|
||||
if flagsErr, ok := err.(*flags.Error); ok && flagsErr.Type == flags.ErrHelp {
|
||||
CustomHelpHandler(parser, os.Stdout)
|
||||
os.Exit(0)
|
||||
}
|
||||
return
|
||||
}
|
||||
debuglog.SetLevel(debuglog.LevelFromInt(ret.Debug))
|
||||
@@ -275,30 +282,30 @@ func assignWithConversion(targetField, sourceField reflect.Value) error {
|
||||
return nil
|
||||
}
|
||||
}
|
||||
return fmt.Errorf("cannot convert string %q to %v", str, targetField.Kind())
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("cannot_convert_string"), str, targetField.Kind()))
|
||||
}
|
||||
|
||||
return fmt.Errorf("unsupported conversion from %v to %v", sourceField.Kind(), targetField.Kind())
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("unsupported_conversion"), sourceField.Kind(), targetField.Kind()))
|
||||
}
|
||||
|
||||
func loadYAMLConfig(configPath string) (*Flags, error) {
|
||||
absPath, err := util.GetAbsolutePath(configPath)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("invalid config path: %w", err)
|
||||
return nil, fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_config_path"), err))
|
||||
}
|
||||
|
||||
data, err := os.ReadFile(absPath)
|
||||
if err != nil {
|
||||
if os.IsNotExist(err) {
|
||||
return nil, fmt.Errorf("config file not found: %s", absPath)
|
||||
return nil, fmt.Errorf("%s", fmt.Sprintf(i18n.T("config_file_not_found"), absPath))
|
||||
}
|
||||
return nil, fmt.Errorf("error reading config file: %w", err)
|
||||
return nil, fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_reading_config_file"), err))
|
||||
}
|
||||
|
||||
// Use the existing Flags struct for YAML unmarshal
|
||||
config := &Flags{}
|
||||
if err := yaml.Unmarshal(data, config); err != nil {
|
||||
return nil, fmt.Errorf("error parsing config file: %w", err)
|
||||
return nil, fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_parsing_config_file"), err))
|
||||
}
|
||||
|
||||
debuglog.Debug(debuglog.Detailed, "Config: %v\n", config)
|
||||
@@ -316,7 +323,7 @@ func readStdin() (ret string, err error) {
|
||||
sb.WriteString(line)
|
||||
break
|
||||
}
|
||||
err = fmt.Errorf("error reading piped message from stdin: %w", readErr)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_reading_piped_message"), readErr))
|
||||
return
|
||||
} else {
|
||||
sb.WriteString(line)
|
||||
@@ -334,7 +341,7 @@ func validateImageFile(imagePath string) error {
|
||||
|
||||
// Check if file already exists
|
||||
if _, err := os.Stat(imagePath); err == nil {
|
||||
return fmt.Errorf("image file already exists: %s", imagePath)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("image_file_already_exists"), imagePath))
|
||||
}
|
||||
|
||||
// Check file extension
|
||||
@@ -347,7 +354,7 @@ func validateImageFile(imagePath string) error {
|
||||
}
|
||||
}
|
||||
|
||||
return fmt.Errorf("invalid image file extension '%s'. Supported formats: .png, .jpeg, .jpg, .webp", ext)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_image_file_extension"), ext))
|
||||
}
|
||||
|
||||
// validateImageParameters validates image generation parameters
|
||||
@@ -355,7 +362,7 @@ func validateImageParameters(imagePath, size, quality, background string, compre
|
||||
if imagePath == "" {
|
||||
// Check if any image parameters are specified without --image-file
|
||||
if size != "" || quality != "" || background != "" || compression != 0 {
|
||||
return fmt.Errorf("image parameters (--image-size, --image-quality, --image-background, --image-compression) can only be used with --image-file")
|
||||
return fmt.Errorf("%s", i18n.T("image_parameters_require_image_file"))
|
||||
}
|
||||
return nil
|
||||
}
|
||||
@@ -371,7 +378,7 @@ func validateImageParameters(imagePath, size, quality, background string, compre
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
return fmt.Errorf("invalid image size '%s'. Supported sizes: 1024x1024, 1536x1024, 1024x1536, auto", size)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_image_size"), size))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -386,7 +393,7 @@ func validateImageParameters(imagePath, size, quality, background string, compre
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
return fmt.Errorf("invalid image quality '%s'. Supported qualities: low, medium, high, auto", quality)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_image_quality"), quality))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -401,7 +408,7 @@ func validateImageParameters(imagePath, size, quality, background string, compre
|
||||
}
|
||||
}
|
||||
if !valid {
|
||||
return fmt.Errorf("invalid image background '%s'. Supported backgrounds: opaque, transparent", background)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("invalid_image_background"), background))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -411,17 +418,17 @@ func validateImageParameters(imagePath, size, quality, background string, compre
|
||||
// Validate compression (only for jpeg/webp)
|
||||
if compression != 0 { // 0 means not set
|
||||
if ext != ".jpg" && ext != ".jpeg" && ext != ".webp" {
|
||||
return fmt.Errorf("image compression can only be used with JPEG and WebP formats, not %s", ext)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("image_compression_jpeg_webp_only"), ext))
|
||||
}
|
||||
if compression < 0 || compression > 100 {
|
||||
return fmt.Errorf("image compression must be between 0 and 100, got %d", compression)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("image_compression_range_error"), compression))
|
||||
}
|
||||
}
|
||||
|
||||
// Validate background transparency (only for png/webp)
|
||||
if background == "transparent" {
|
||||
if ext != ".png" && ext != ".webp" {
|
||||
return fmt.Errorf("transparent background can only be used with PNG and WebP formats, not %s", ext)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("transparent_background_png_webp_only"), ext))
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
286
internal/cli/help.go
Normal file
286
internal/cli/help.go
Normal file
@@ -0,0 +1,286 @@
|
||||
package cli
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"reflect"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
"github.com/jessevdk/go-flags"
|
||||
)
|
||||
|
||||
// flagDescriptionMap maps flag names to their i18n keys
|
||||
var flagDescriptionMap = map[string]string{
|
||||
"pattern": "choose_pattern_from_available",
|
||||
"variable": "pattern_variables_help",
|
||||
"context": "choose_context_from_available",
|
||||
"session": "choose_session_from_available",
|
||||
"attachment": "attachment_path_or_url_help",
|
||||
"setup": "run_setup_for_reconfigurable_parts",
|
||||
"temperature": "set_temperature",
|
||||
"topp": "set_top_p",
|
||||
"stream": "stream_help",
|
||||
"presencepenalty": "set_presence_penalty",
|
||||
"raw": "use_model_defaults_raw_help",
|
||||
"frequencypenalty": "set_frequency_penalty",
|
||||
"listpatterns": "list_all_patterns",
|
||||
"listmodels": "list_all_available_models",
|
||||
"listcontexts": "list_all_contexts",
|
||||
"listsessions": "list_all_sessions",
|
||||
"updatepatterns": "update_patterns",
|
||||
"copy": "copy_to_clipboard",
|
||||
"model": "choose_model",
|
||||
"vendor": "specify_vendor_for_model",
|
||||
"modelContextLength": "model_context_length_ollama",
|
||||
"output": "output_to_file",
|
||||
"output-session": "output_entire_session",
|
||||
"latest": "number_of_latest_patterns",
|
||||
"changeDefaultModel": "change_default_model",
|
||||
"youtube": "youtube_url_help",
|
||||
"playlist": "prefer_playlist_over_video",
|
||||
"transcript": "grab_transcript_from_youtube",
|
||||
"transcript-with-timestamps": "grab_transcript_with_timestamps",
|
||||
"comments": "grab_comments_from_youtube",
|
||||
"metadata": "output_video_metadata",
|
||||
"yt-dlp-args": "additional_yt_dlp_args",
|
||||
"language": "specify_language_code",
|
||||
"scrape_url": "scrape_website_url",
|
||||
"scrape_question": "search_question_jina",
|
||||
"seed": "seed_for_lmm_generation",
|
||||
"wipecontext": "wipe_context",
|
||||
"wipesession": "wipe_session",
|
||||
"printcontext": "print_context",
|
||||
"printsession": "print_session",
|
||||
"readability": "convert_html_readability",
|
||||
"input-has-vars": "apply_variables_to_input",
|
||||
"no-variable-replacement": "disable_pattern_variable_replacement",
|
||||
"dry-run": "show_dry_run",
|
||||
"serve": "serve_fabric_rest_api",
|
||||
"serveOllama": "serve_fabric_api_ollama_endpoints",
|
||||
"address": "address_to_bind_rest_api",
|
||||
"api-key": "api_key_secure_server_routes",
|
||||
"config": "path_to_yaml_config",
|
||||
"version": "print_current_version",
|
||||
"listextensions": "list_all_registered_extensions",
|
||||
"addextension": "register_new_extension",
|
||||
"rmextension": "remove_registered_extension",
|
||||
"strategy": "choose_strategy_from_available",
|
||||
"liststrategies": "list_all_strategies",
|
||||
"listvendors": "list_all_vendors",
|
||||
"shell-complete-list": "output_raw_list_shell_completion",
|
||||
"search": "enable_web_search_tool",
|
||||
"search-location": "set_location_web_search",
|
||||
"image-file": "save_generated_image_to_file",
|
||||
"image-size": "image_dimensions_help",
|
||||
"image-quality": "image_quality_help",
|
||||
"image-compression": "compression_level_jpeg_webp",
|
||||
"image-background": "background_type_help",
|
||||
"suppress-think": "suppress_thinking_tags",
|
||||
"think-start-tag": "start_tag_thinking_sections",
|
||||
"think-end-tag": "end_tag_thinking_sections",
|
||||
"disable-responses-api": "disable_openai_responses_api",
|
||||
"transcribe-file": "audio_video_file_transcribe",
|
||||
"transcribe-model": "model_for_transcription",
|
||||
"split-media-file": "split_media_files_ffmpeg",
|
||||
"voice": "tts_voice_name",
|
||||
"list-gemini-voices": "list_gemini_tts_voices",
|
||||
"list-transcription-models": "list_transcription_models",
|
||||
"notification": "send_desktop_notification",
|
||||
"notification-command": "custom_notification_command",
|
||||
"thinking": "set_reasoning_thinking_level",
|
||||
"debug": "set_debug_level",
|
||||
}
|
||||
|
||||
// TranslatedHelpWriter provides custom help output with translated descriptions
|
||||
type TranslatedHelpWriter struct {
|
||||
parser *flags.Parser
|
||||
writer io.Writer
|
||||
}
|
||||
|
||||
// NewTranslatedHelpWriter creates a new help writer with translations
|
||||
func NewTranslatedHelpWriter(parser *flags.Parser, writer io.Writer) *TranslatedHelpWriter {
|
||||
return &TranslatedHelpWriter{
|
||||
parser: parser,
|
||||
writer: writer,
|
||||
}
|
||||
}
|
||||
|
||||
// WriteHelp writes the help output with translated flag descriptions
|
||||
func (h *TranslatedHelpWriter) WriteHelp() {
|
||||
fmt.Fprintf(h.writer, "%s\n", i18n.T("usage_header"))
|
||||
fmt.Fprintf(h.writer, " %s %s\n\n", h.parser.Name, i18n.T("options_placeholder"))
|
||||
|
||||
fmt.Fprintf(h.writer, "%s\n", i18n.T("application_options_header"))
|
||||
h.writeAllFlags()
|
||||
|
||||
fmt.Fprintf(h.writer, "\n%s\n", i18n.T("help_options_header"))
|
||||
fmt.Fprintf(h.writer, " -h, --help %s\n", i18n.T("help_message"))
|
||||
}
|
||||
|
||||
// getTranslatedDescription gets the translated description for a flag
|
||||
func (h *TranslatedHelpWriter) getTranslatedDescription(flagName string) string {
|
||||
if i18nKey, exists := flagDescriptionMap[flagName]; exists {
|
||||
return i18n.T(i18nKey)
|
||||
}
|
||||
|
||||
// Fallback 1: Try to get original description from struct tag
|
||||
if desc := h.getOriginalDescription(flagName); desc != "" {
|
||||
return desc
|
||||
}
|
||||
|
||||
// Fallback 2: Provide a user-friendly default message
|
||||
return i18n.T("no_description_available")
|
||||
}
|
||||
|
||||
// getOriginalDescription retrieves the original description from struct tags
|
||||
func (h *TranslatedHelpWriter) getOriginalDescription(flagName string) string {
|
||||
flags := &Flags{}
|
||||
flagsType := reflect.TypeOf(flags).Elem()
|
||||
|
||||
for i := 0; i < flagsType.NumField(); i++ {
|
||||
field := flagsType.Field(i)
|
||||
longTag := field.Tag.Get("long")
|
||||
|
||||
if longTag == flagName {
|
||||
if description := field.Tag.Get("description"); description != "" {
|
||||
return description
|
||||
}
|
||||
break
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// CustomHelpHandler handles help output with translations
|
||||
func CustomHelpHandler(parser *flags.Parser, writer io.Writer) {
|
||||
// Initialize i18n system with detected language if not already initialized
|
||||
ensureI18nInitialized()
|
||||
|
||||
helpWriter := NewTranslatedHelpWriter(parser, writer)
|
||||
helpWriter.WriteHelp()
|
||||
}
|
||||
|
||||
// ensureI18nInitialized initializes the i18n system if not already done
|
||||
func ensureI18nInitialized() {
|
||||
// Try to detect language from command line args or environment
|
||||
lang := detectLanguageFromArgs()
|
||||
if lang == "" {
|
||||
// Try to detect from environment variables
|
||||
lang = detectLanguageFromEnv()
|
||||
}
|
||||
|
||||
// Initialize i18n with detected language (or empty for system default)
|
||||
i18n.Init(lang)
|
||||
}
|
||||
|
||||
// detectLanguageFromArgs looks for --language/-g flag in os.Args
|
||||
func detectLanguageFromArgs() string {
|
||||
args := os.Args[1:]
|
||||
for i, arg := range args {
|
||||
if arg == "--language" || arg == "-g" {
|
||||
if i+1 < len(args) {
|
||||
return args[i+1]
|
||||
}
|
||||
} else if strings.HasPrefix(arg, "--language=") {
|
||||
return strings.TrimPrefix(arg, "--language=")
|
||||
} else if strings.HasPrefix(arg, "-g=") {
|
||||
return strings.TrimPrefix(arg, "-g=")
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// detectLanguageFromEnv detects language from environment variables
|
||||
func detectLanguageFromEnv() string {
|
||||
// Check standard locale environment variables
|
||||
envVars := []string{"LC_ALL", "LC_MESSAGES", "LANG"}
|
||||
for _, envVar := range envVars {
|
||||
if value := os.Getenv(envVar); value != "" {
|
||||
// Extract language code from locale (e.g., "es_ES.UTF-8" -> "es")
|
||||
if strings.Contains(value, "_") {
|
||||
return strings.Split(value, "_")[0]
|
||||
}
|
||||
if value != "C" && value != "POSIX" {
|
||||
return value
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
// writeAllFlags writes all flags with translated descriptions
|
||||
func (h *TranslatedHelpWriter) writeAllFlags() {
|
||||
// Use direct reflection on the Flags struct to get all flag definitions
|
||||
flags := &Flags{}
|
||||
flagsType := reflect.TypeOf(flags).Elem()
|
||||
|
||||
for i := 0; i < flagsType.NumField(); i++ {
|
||||
field := flagsType.Field(i)
|
||||
|
||||
shortTag := field.Tag.Get("short")
|
||||
longTag := field.Tag.Get("long")
|
||||
defaultTag := field.Tag.Get("default")
|
||||
|
||||
if longTag == "" {
|
||||
continue // Skip fields without long tags
|
||||
}
|
||||
|
||||
// Get translated description
|
||||
description := h.getTranslatedDescription(longTag)
|
||||
|
||||
// Format the flag line
|
||||
var flagLine strings.Builder
|
||||
flagLine.WriteString(" ")
|
||||
|
||||
if shortTag != "" {
|
||||
flagLine.WriteString(fmt.Sprintf("-%s, ", shortTag))
|
||||
}
|
||||
|
||||
flagLine.WriteString(fmt.Sprintf("--%s", longTag))
|
||||
|
||||
// Add parameter indicator for non-boolean flags
|
||||
isBoolFlag := field.Type.Kind() == reflect.Bool ||
|
||||
strings.HasSuffix(longTag, "patterns") ||
|
||||
strings.HasSuffix(longTag, "models") ||
|
||||
strings.HasSuffix(longTag, "contexts") ||
|
||||
strings.HasSuffix(longTag, "sessions") ||
|
||||
strings.HasSuffix(longTag, "extensions") ||
|
||||
strings.HasSuffix(longTag, "strategies") ||
|
||||
strings.HasSuffix(longTag, "vendors") ||
|
||||
strings.HasSuffix(longTag, "voices") ||
|
||||
longTag == "setup" || longTag == "stream" || longTag == "raw" ||
|
||||
longTag == "copy" || longTag == "updatepatterns" ||
|
||||
longTag == "output-session" || longTag == "changeDefaultModel" ||
|
||||
longTag == "playlist" || longTag == "transcript" ||
|
||||
longTag == "transcript-with-timestamps" || longTag == "comments" ||
|
||||
longTag == "metadata" || longTag == "readability" ||
|
||||
longTag == "input-has-vars" || longTag == "no-variable-replacement" ||
|
||||
longTag == "dry-run" || longTag == "serve" || longTag == "serveOllama" ||
|
||||
longTag == "version" || longTag == "shell-complete-list" ||
|
||||
longTag == "search" || longTag == "suppress-think" ||
|
||||
longTag == "disable-responses-api" || longTag == "split-media-file" ||
|
||||
longTag == "notification"
|
||||
|
||||
if !isBoolFlag {
|
||||
flagLine.WriteString("=")
|
||||
}
|
||||
|
||||
// Pad to align descriptions
|
||||
flagStr := flagLine.String()
|
||||
padding := 34 - len(flagStr)
|
||||
if padding < 2 {
|
||||
padding = 2
|
||||
}
|
||||
|
||||
fmt.Fprintf(h.writer, "%s%s%s", flagStr, strings.Repeat(" ", padding), description)
|
||||
|
||||
// Add default value if present
|
||||
if defaultTag != "" && defaultTag != "0" && defaultTag != "false" {
|
||||
fmt.Fprintf(h.writer, " (default: %s)", defaultTag)
|
||||
}
|
||||
|
||||
fmt.Fprintf(h.writer, "\n")
|
||||
}
|
||||
}
|
||||
@@ -6,6 +6,7 @@ import (
|
||||
"path/filepath"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/db/fsdb"
|
||||
)
|
||||
|
||||
@@ -36,20 +37,20 @@ func initializeFabric() (registry *core.PluginRegistry, err error) {
|
||||
func ensureEnvFile() (err error) {
|
||||
var homedir string
|
||||
if homedir, err = os.UserHomeDir(); err != nil {
|
||||
return fmt.Errorf("could not determine user home directory: %w", err)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("could_not_determine_home_dir"), err))
|
||||
}
|
||||
configDir := filepath.Join(homedir, ".config", "fabric")
|
||||
envPath := filepath.Join(configDir, ".env")
|
||||
|
||||
if _, statErr := os.Stat(envPath); statErr != nil {
|
||||
if !os.IsNotExist(statErr) {
|
||||
return fmt.Errorf("could not stat .env file: %w", statErr)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("could_not_stat_env_file"), statErr))
|
||||
}
|
||||
if err = os.MkdirAll(configDir, ConfigDirPerms); err != nil {
|
||||
return fmt.Errorf("could not create config directory: %w", err)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("could_not_create_config_dir"), err))
|
||||
}
|
||||
if err = os.WriteFile(envPath, []byte{}, EnvFilePerms); err != nil {
|
||||
return fmt.Errorf("could not create .env file: %w", err)
|
||||
return fmt.Errorf("%s", fmt.Sprintf(i18n.T("could_not_create_env_file"), err))
|
||||
}
|
||||
}
|
||||
return
|
||||
|
||||
@@ -8,6 +8,7 @@ import (
|
||||
openai "github.com/openai/openai-go"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/gemini"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/db/fsdb"
|
||||
@@ -93,7 +94,7 @@ func listTranscriptionModels(shellComplete bool) {
|
||||
fmt.Println(model)
|
||||
}
|
||||
} else {
|
||||
fmt.Println("Available transcription models:")
|
||||
fmt.Println(i18n.T("available_transcription_models"))
|
||||
for _, model := range models {
|
||||
fmt.Printf(" %s\n", model)
|
||||
}
|
||||
|
||||
@@ -7,29 +7,30 @@ import (
|
||||
"strings"
|
||||
|
||||
"github.com/atotto/clipboard"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
)
|
||||
|
||||
func CopyToClipboard(message string) (err error) {
|
||||
if err = clipboard.WriteAll(message); err != nil {
|
||||
err = fmt.Errorf("could not copy to clipboard: %v", err)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("could_not_copy_to_clipboard"), err))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func CreateOutputFile(message string, fileName string) (err error) {
|
||||
if _, err = os.Stat(fileName); err == nil {
|
||||
err = fmt.Errorf("file %s already exists, not overwriting. Rename the existing file or choose a different name", fileName)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("file_already_exists_not_overwriting"), fileName))
|
||||
return
|
||||
}
|
||||
var file *os.File
|
||||
if file, err = os.Create(fileName); err != nil {
|
||||
err = fmt.Errorf("error creating file: %v", err)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_creating_file"), err))
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
if _, err = file.WriteString(message); err != nil {
|
||||
err = fmt.Errorf("error writing to file: %v", err)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_writing_to_file"), err))
|
||||
} else {
|
||||
debuglog.Log("\n\n[Output also written to %s]\n", fileName)
|
||||
}
|
||||
@@ -46,13 +47,13 @@ func CreateAudioOutputFile(audioData []byte, fileName string) (err error) {
|
||||
// File existence check is now done in the CLI layer before TTS generation
|
||||
var file *os.File
|
||||
if file, err = os.Create(fileName); err != nil {
|
||||
err = fmt.Errorf("error creating audio file: %v", err)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_creating_audio_file"), err))
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
if _, err = file.Write(audioData); err != nil {
|
||||
err = fmt.Errorf("error writing audio data to file: %v", err)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_writing_audio_data"), err))
|
||||
}
|
||||
// No redundant output message here - the CLI layer handles success messaging
|
||||
return
|
||||
|
||||
@@ -4,6 +4,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
"github.com/danielmiessler/fabric/internal/tools/youtube"
|
||||
)
|
||||
|
||||
@@ -11,7 +12,7 @@ import (
|
||||
func handleToolProcessing(currentFlags *Flags, registry *core.PluginRegistry) (messageTools string, err error) {
|
||||
if currentFlags.YouTube != "" {
|
||||
if !registry.YouTube.IsConfigured() {
|
||||
err = fmt.Errorf("YouTube is not configured, please run the setup procedure")
|
||||
err = fmt.Errorf("%s", i18n.T("youtube_not_configured"))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -25,7 +26,7 @@ func handleToolProcessing(currentFlags *Flags, registry *core.PluginRegistry) (m
|
||||
} else {
|
||||
var videos []*youtube.VideoMeta
|
||||
if videos, err = registry.YouTube.FetchPlaylistVideos(playlistId); err != nil {
|
||||
err = fmt.Errorf("error fetching playlist videos: %w", err)
|
||||
err = fmt.Errorf("%s", fmt.Sprintf(i18n.T("error_fetching_playlist_videos"), err))
|
||||
return
|
||||
}
|
||||
|
||||
@@ -58,7 +59,7 @@ func handleToolProcessing(currentFlags *Flags, registry *core.PluginRegistry) (m
|
||||
|
||||
if currentFlags.ScrapeURL != "" || currentFlags.ScrapeQuestion != "" {
|
||||
if !registry.Jina.IsConfigured() {
|
||||
err = fmt.Errorf("scraping functionality is not configured. Please set up Jina to enable scraping")
|
||||
err = fmt.Errorf("%s", i18n.T("scraping_not_configured"))
|
||||
return
|
||||
}
|
||||
// Check if the scrape_url flag is set and call ScrapeURL
|
||||
|
||||
@@ -5,6 +5,7 @@ import (
|
||||
"fmt"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/core"
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
)
|
||||
|
||||
type transcriber interface {
|
||||
@@ -18,15 +19,15 @@ func handleTranscription(flags *Flags, registry *core.PluginRegistry) (message s
|
||||
}
|
||||
vendor, ok := registry.VendorManager.VendorsByName[vendorName]
|
||||
if !ok {
|
||||
return "", fmt.Errorf("vendor %s not configured", vendorName)
|
||||
return "", fmt.Errorf("%s", fmt.Sprintf(i18n.T("vendor_not_configured"), vendorName))
|
||||
}
|
||||
tr, ok := vendor.(transcriber)
|
||||
if !ok {
|
||||
return "", fmt.Errorf("vendor %s does not support audio transcription", vendorName)
|
||||
return "", fmt.Errorf("%s", fmt.Sprintf(i18n.T("vendor_no_transcription_support"), vendorName))
|
||||
}
|
||||
model := flags.TranscribeModel
|
||||
if model == "" {
|
||||
return "", fmt.Errorf("transcription model is required (use --transcribe-model)")
|
||||
return "", fmt.Errorf("%s", i18n.T("transcription_model_required"))
|
||||
}
|
||||
if message, err = tr.TranscribeFile(context.Background(), flags.TranscribeFile, model, flags.SplitMediaFile); err != nil {
|
||||
return
|
||||
|
||||
@@ -10,6 +10,7 @@ import (
|
||||
"strconv"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
debuglog "github.com/danielmiessler/fabric/internal/log"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/anthropic"
|
||||
"github.com/danielmiessler/fabric/internal/plugins/ai/azure"
|
||||
@@ -131,7 +132,7 @@ func (o *PluginRegistry) ListVendors(out io.Writer) error {
|
||||
vendors := lo.Map(o.VendorsAll.Vendors, func(vendor ai.Vendor, _ int) string {
|
||||
return vendor.GetName()
|
||||
})
|
||||
fmt.Fprint(out, "Available Vendors:\n\n")
|
||||
fmt.Fprintf(out, "%s\n\n", i18n.T("available_vendors_header"))
|
||||
for _, vendor := range vendors {
|
||||
fmt.Fprintf(out, "%s\n", vendor)
|
||||
}
|
||||
|
||||
@@ -66,12 +66,12 @@ func Init(locale string) (*i18n.Localizer, error) {
|
||||
if _, err := os.Stat(path); os.IsNotExist(err) && !embedded {
|
||||
if err := downloadLocale(path, locale); err != nil {
|
||||
// if download fails, still continue with embedded translations
|
||||
fmt.Fprintln(os.Stderr, "i18n download failed:", err)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", fmt.Sprintf(getErrorMessage("i18n_download_failed", "Failed to download translation for language '%s': %v"), locale, err))
|
||||
}
|
||||
}
|
||||
if _, err := os.Stat(path); err == nil {
|
||||
if _, err := bundle.LoadMessageFile(path); err != nil {
|
||||
fmt.Fprintln(os.Stderr, "i18n load failed:", err)
|
||||
fmt.Fprintf(os.Stderr, "%s\n", fmt.Sprintf(getErrorMessage("i18n_load_failed", "Failed to load translation file: %v"), err))
|
||||
}
|
||||
}
|
||||
|
||||
@@ -119,3 +119,42 @@ func downloadLocale(path, locale string) error {
|
||||
_, err = io.Copy(f, resp.Body)
|
||||
return err
|
||||
}
|
||||
|
||||
// getErrorMessage tries to get a translated error message, falling back to system locale
|
||||
// and then to the provided fallback message. This is used during initialization when
|
||||
// the translator may not be fully ready.
|
||||
func getErrorMessage(messageID, fallback string) string {
|
||||
// Try to get system locale for error messages
|
||||
systemLocale := getPreferredLocale("")
|
||||
if systemLocale == "" {
|
||||
systemLocale = "en"
|
||||
}
|
||||
|
||||
// First try the system locale
|
||||
if msg := tryGetMessage(systemLocale, messageID); msg != "" {
|
||||
return msg
|
||||
}
|
||||
|
||||
// Fall back to English
|
||||
if systemLocale != "en" {
|
||||
if msg := tryGetMessage("en", messageID); msg != "" {
|
||||
return msg
|
||||
}
|
||||
}
|
||||
|
||||
// Final fallback to hardcoded message
|
||||
return fallback
|
||||
}
|
||||
|
||||
// tryGetMessage attempts to get a message from embedded locale files
|
||||
func tryGetMessage(locale, messageID string) string {
|
||||
if data, err := localeFS.ReadFile("locales/" + locale + ".json"); err == nil {
|
||||
var messages map[string]string
|
||||
if json.Unmarshal(data, &messages) == nil {
|
||||
if msg, exists := messages[messageID]; exists {
|
||||
return msg
|
||||
}
|
||||
}
|
||||
}
|
||||
return ""
|
||||
}
|
||||
|
||||
@@ -1,3 +1,136 @@
|
||||
{
|
||||
"html_readability_error": "use original input, because can't apply html readability"
|
||||
"html_readability_error": "use original input, because can't apply html readability",
|
||||
"vendor_not_configured": "vendor %s not configured",
|
||||
"vendor_no_transcription_support": "vendor %s does not support audio transcription",
|
||||
"transcription_model_required": "transcription model is required (use --transcribe-model)",
|
||||
"youtube_not_configured": "YouTube is not configured, please run the setup procedure",
|
||||
"error_fetching_playlist_videos": "error fetching playlist videos: %w",
|
||||
"scraping_not_configured": "scraping functionality is not configured. Please set up Jina to enable scraping",
|
||||
"could_not_determine_home_dir": "could not determine user home directory: %w",
|
||||
"could_not_stat_env_file": "could not stat .env file: %w",
|
||||
"could_not_create_config_dir": "could not create config directory: %w",
|
||||
"could_not_create_env_file": "could not create .env file: %w",
|
||||
"could_not_copy_to_clipboard": "could not copy to clipboard: %v",
|
||||
"file_already_exists_not_overwriting": "file %s already exists, not overwriting. Rename the existing file or choose a different name",
|
||||
"error_creating_file": "error creating file: %v",
|
||||
"error_writing_to_file": "error writing to file: %v",
|
||||
"error_creating_audio_file": "error creating audio file: %v",
|
||||
"error_writing_audio_data": "error writing audio data to file: %v",
|
||||
"tts_model_requires_audio_output": "TTS model '%s' requires audio output. Please specify an audio output file with -o flag (e.g., -o output.wav)",
|
||||
"audio_output_file_specified_but_not_tts_model": "audio output file '%s' specified but model '%s' is not a TTS model. Please use a TTS model like gemini-2.5-flash-preview-tts",
|
||||
"file_already_exists_choose_different": "file %s already exists. Please choose a different filename or remove the existing file",
|
||||
"no_notification_system_available": "no notification system available",
|
||||
"cannot_convert_string": "cannot convert string %q to %v",
|
||||
"unsupported_conversion": "unsupported conversion from %v to %v",
|
||||
"invalid_config_path": "invalid config path: %w",
|
||||
"config_file_not_found": "config file not found: %s",
|
||||
"error_reading_config_file": "error reading config file: %w",
|
||||
"error_parsing_config_file": "error parsing config file: %w",
|
||||
"error_reading_piped_message": "error reading piped message from stdin: %w",
|
||||
"image_file_already_exists": "image file already exists: %s",
|
||||
"invalid_image_file_extension": "invalid image file extension '%s'. Supported formats: .png, .jpeg, .jpg, .webp",
|
||||
"image_parameters_require_image_file": "image parameters (--image-size, --image-quality, --image-background, --image-compression) can only be used with --image-file",
|
||||
"invalid_image_size": "invalid image size '%s'. Supported sizes: 1024x1024, 1536x1024, 1024x1536, auto",
|
||||
"invalid_image_quality": "invalid image quality '%s'. Supported qualities: low, medium, high, auto",
|
||||
"invalid_image_background": "invalid image background '%s'. Supported backgrounds: opaque, transparent",
|
||||
"image_compression_jpeg_webp_only": "image compression can only be used with JPEG and WebP formats, not %s",
|
||||
"image_compression_range_error": "image compression must be between 0 and 100, got %d",
|
||||
"transparent_background_png_webp_only": "transparent background can only be used with PNG and WebP formats, not %s",
|
||||
"available_transcription_models": "Available transcription models:",
|
||||
"tts_audio_generated_successfully": "TTS audio generated successfully and saved to: %s\n",
|
||||
"fabric_command_complete": "Fabric Command Complete",
|
||||
"fabric_command_complete_with_pattern": "Fabric: %s Complete",
|
||||
"command_completed_successfully": "Command completed successfully",
|
||||
"output_truncated": "Output: %s...",
|
||||
"output_full": "Output: %s",
|
||||
"choose_pattern_from_available": "Choose a pattern from the available patterns",
|
||||
"pattern_variables_help": "Values for pattern variables, e.g. -v=#role:expert -v=#points:30",
|
||||
"choose_context_from_available": "Choose a context from the available contexts",
|
||||
"choose_session_from_available": "Choose a session from the available sessions",
|
||||
"attachment_path_or_url_help": "Attachment path or URL (e.g. for OpenAI image recognition messages)",
|
||||
"run_setup_for_reconfigurable_parts": "Run setup for all reconfigurable parts of fabric",
|
||||
"set_temperature": "Set temperature",
|
||||
"set_top_p": "Set top P",
|
||||
"stream_help": "Stream",
|
||||
"set_presence_penalty": "Set presence penalty",
|
||||
"use_model_defaults_raw_help": "Use the defaults of the model without sending chat options (like temperature etc.) and use the user role instead of the system role for patterns.",
|
||||
"set_frequency_penalty": "Set frequency penalty",
|
||||
"list_all_patterns": "List all patterns",
|
||||
"list_all_available_models": "List all available models",
|
||||
"list_all_contexts": "List all contexts",
|
||||
"list_all_sessions": "List all sessions",
|
||||
"update_patterns": "Update patterns",
|
||||
"messages_to_send_to_chat": "Messages to send to chat",
|
||||
"copy_to_clipboard": "Copy to clipboard",
|
||||
"choose_model": "Choose model",
|
||||
"specify_vendor_for_model": "Specify vendor for the selected model (e.g., -V \"LM Studio\" -m openai/gpt-oss-20b)",
|
||||
"model_context_length_ollama": "Model context length (only affects ollama)",
|
||||
"output_to_file": "Output to file",
|
||||
"output_entire_session": "Output the entire session (also a temporary one) to the output file",
|
||||
"number_of_latest_patterns": "Number of latest patterns to list",
|
||||
"change_default_model": "Change default model",
|
||||
"youtube_url_help": "YouTube video or play list \"URL\" to grab transcript, comments from it and send to chat or print it put to the console and store it in the output file",
|
||||
"prefer_playlist_over_video": "Prefer playlist over video if both ids are present in the URL",
|
||||
"grab_transcript_from_youtube": "Grab transcript from YouTube video and send to chat (it is used per default).",
|
||||
"grab_transcript_with_timestamps": "Grab transcript from YouTube video with timestamps and send to chat",
|
||||
"grab_comments_from_youtube": "Grab comments from YouTube video and send to chat",
|
||||
"output_video_metadata": "Output video metadata",
|
||||
"additional_yt_dlp_args": "Additional arguments to pass to yt-dlp (e.g. '--cookies-from-browser brave')",
|
||||
"specify_language_code": "Specify the Language Code for the chat, e.g. -g=en -g=zh",
|
||||
"scrape_website_url": "Scrape website URL to markdown using Jina AI",
|
||||
"search_question_jina": "Search question using Jina AI",
|
||||
"seed_for_lmm_generation": "Seed to be used for LMM generation",
|
||||
"wipe_context": "Wipe context",
|
||||
"wipe_session": "Wipe session",
|
||||
"print_context": "Print context",
|
||||
"print_session": "Print session",
|
||||
"convert_html_readability": "Convert HTML input into a clean, readable view",
|
||||
"apply_variables_to_input": "Apply variables to user input",
|
||||
"disable_pattern_variable_replacement": "Disable pattern variable replacement",
|
||||
"show_dry_run": "Show what would be sent to the model without actually sending it",
|
||||
"serve_fabric_rest_api": "Serve the Fabric Rest API",
|
||||
"serve_fabric_api_ollama_endpoints": "Serve the Fabric Rest API with ollama endpoints",
|
||||
"address_to_bind_rest_api": "The address to bind the REST API",
|
||||
"api_key_secure_server_routes": "API key used to secure server routes",
|
||||
"path_to_yaml_config": "Path to YAML config file",
|
||||
"print_current_version": "Print current version",
|
||||
"list_all_registered_extensions": "List all registered extensions",
|
||||
"register_new_extension": "Register a new extension from config file path",
|
||||
"remove_registered_extension": "Remove a registered extension by name",
|
||||
"choose_strategy_from_available": "Choose a strategy from the available strategies",
|
||||
"list_all_strategies": "List all strategies",
|
||||
"list_all_vendors": "List all vendors",
|
||||
"output_raw_list_shell_completion": "Output raw list without headers/formatting (for shell completion)",
|
||||
"enable_web_search_tool": "Enable web search tool for supported models (Anthropic, OpenAI, Gemini)",
|
||||
"set_location_web_search": "Set location for web search results (e.g., 'America/Los_Angeles')",
|
||||
"save_generated_image_to_file": "Save generated image to specified file path (e.g., 'output.png')",
|
||||
"image_dimensions_help": "Image dimensions: 1024x1024, 1536x1024, 1024x1536, auto (default: auto)",
|
||||
"image_quality_help": "Image quality: low, medium, high, auto (default: auto)",
|
||||
"compression_level_jpeg_webp": "Compression level 0-100 for JPEG/WebP formats (default: not set)",
|
||||
"background_type_help": "Background type: opaque, transparent (default: opaque, only for PNG/WebP)",
|
||||
"suppress_thinking_tags": "Suppress text enclosed in thinking tags",
|
||||
"start_tag_thinking_sections": "Start tag for thinking sections",
|
||||
"end_tag_thinking_sections": "End tag for thinking sections",
|
||||
"disable_openai_responses_api": "Disable OpenAI Responses API (default: false)",
|
||||
"audio_video_file_transcribe": "Audio or video file to transcribe",
|
||||
"model_for_transcription": "Model to use for transcription (separate from chat model)",
|
||||
"split_media_files_ffmpeg": "Split audio/video files larger than 25MB using ffmpeg",
|
||||
"tts_voice_name": "TTS voice name for supported models (e.g., Kore, Charon, Puck)",
|
||||
"list_gemini_tts_voices": "List all available Gemini TTS voices",
|
||||
"list_transcription_models": "List all available transcription models",
|
||||
"send_desktop_notification": "Send desktop notification when command completes",
|
||||
"custom_notification_command": "Custom command to run for notifications (overrides built-in notifications)",
|
||||
"set_reasoning_thinking_level": "Set reasoning/thinking level (e.g., off, low, medium, high, or numeric tokens for Anthropic or Google Gemini)",
|
||||
"set_debug_level": "Set debug level (0=off, 1=basic, 2=detailed, 3=trace)",
|
||||
"usage_header": "Usage:",
|
||||
"application_options_header": "Application Options:",
|
||||
"help_options_header": "Help Options:",
|
||||
"help_message": "Show this help message",
|
||||
"options_placeholder": "[OPTIONS]",
|
||||
"available_vendors_header": "Available Vendors:",
|
||||
"available_models_header": "Available models",
|
||||
"no_items_found": "No %s",
|
||||
"no_description_available": "No description available",
|
||||
"i18n_download_failed": "Failed to download translation for language '%s': %v",
|
||||
"i18n_load_failed": "Failed to load translation file: %v"
|
||||
}
|
||||
|
||||
@@ -1,3 +1,136 @@
|
||||
{
|
||||
"html_readability_error": "usa la entrada original, porque no se puede aplicar la legibilidad de html"
|
||||
"html_readability_error": "usa la entrada original, porque no se puede aplicar la legibilidad de html",
|
||||
"vendor_not_configured": "el proveedor %s no está configurado",
|
||||
"vendor_no_transcription_support": "el proveedor %s no admite transcripción de audio",
|
||||
"transcription_model_required": "se requiere un modelo de transcripción (usa --transcribe-model)",
|
||||
"youtube_not_configured": "YouTube no está configurado, por favor ejecuta el procedimiento de configuración",
|
||||
"error_fetching_playlist_videos": "error al obtener videos de la lista de reproducción: %w",
|
||||
"scraping_not_configured": "la funcionalidad de extracción no está configurada. Por favor configura Jina para habilitar la extracción",
|
||||
"could_not_determine_home_dir": "no se pudo determinar el directorio home del usuario: %w",
|
||||
"could_not_stat_env_file": "no se pudo verificar el archivo .env: %w",
|
||||
"could_not_create_config_dir": "no se pudo crear el directorio de configuración: %w",
|
||||
"could_not_create_env_file": "no se pudo crear el archivo .env: %w",
|
||||
"could_not_copy_to_clipboard": "no se pudo copiar al portapapeles: %v",
|
||||
"file_already_exists_not_overwriting": "el archivo %s ya existe, no se sobrescribirá. Renombra el archivo existente o elige un nombre diferente",
|
||||
"error_creating_file": "error al crear el archivo: %v",
|
||||
"error_writing_to_file": "error al escribir al archivo: %v",
|
||||
"error_creating_audio_file": "error al crear el archivo de audio: %v",
|
||||
"error_writing_audio_data": "error al escribir datos de audio al archivo: %v",
|
||||
"tts_model_requires_audio_output": "el modelo TTS '%s' requiere salida de audio. Por favor especifica un archivo de salida de audio con la bandera -o (ej., -o output.wav)",
|
||||
"audio_output_file_specified_but_not_tts_model": "se especificó el archivo de salida de audio '%s' pero el modelo '%s' no es un modelo TTS. Por favor usa un modelo TTS como gemini-2.5-flash-preview-tts",
|
||||
"file_already_exists_choose_different": "el archivo %s ya existe. Por favor elige un nombre diferente o elimina el archivo existente",
|
||||
"no_notification_system_available": "no hay sistema de notificaciones disponible",
|
||||
"cannot_convert_string": "no se puede convertir la cadena %q a %v",
|
||||
"unsupported_conversion": "conversión no soportada de %v a %v",
|
||||
"invalid_config_path": "ruta de configuración inválida: %w",
|
||||
"config_file_not_found": "archivo de configuración no encontrado: %s",
|
||||
"error_reading_config_file": "error al leer el archivo de configuración: %w",
|
||||
"error_parsing_config_file": "error al analizar el archivo de configuración: %w",
|
||||
"error_reading_piped_message": "error al leer mensaje desde stdin: %w",
|
||||
"image_file_already_exists": "el archivo de imagen ya existe: %s",
|
||||
"invalid_image_file_extension": "extensión de archivo de imagen inválida '%s'. Formatos soportados: .png, .jpeg, .jpg, .webp",
|
||||
"image_parameters_require_image_file": "los parámetros de imagen (--image-size, --image-quality, --image-background, --image-compression) solo pueden usarse con --image-file",
|
||||
"invalid_image_size": "tamaño de imagen inválido '%s'. Tamaños soportados: 1024x1024, 1536x1024, 1024x1536, auto",
|
||||
"invalid_image_quality": "calidad de imagen inválida '%s'. Calidades soportadas: low, medium, high, auto",
|
||||
"invalid_image_background": "fondo de imagen inválido '%s'. Fondos soportados: opaque, transparent",
|
||||
"image_compression_jpeg_webp_only": "la compresión de imagen solo puede usarse con formatos JPEG y WebP, no %s",
|
||||
"image_compression_range_error": "la compresión de imagen debe estar entre 0 y 100, se obtuvo %d",
|
||||
"transparent_background_png_webp_only": "el fondo transparente solo puede usarse con formatos PNG y WebP, no %s",
|
||||
"available_transcription_models": "Modelos de transcripción disponibles:",
|
||||
"tts_audio_generated_successfully": "Audio TTS generado exitosamente y guardado en: %s\n",
|
||||
"fabric_command_complete": "Comando Fabric Completado",
|
||||
"fabric_command_complete_with_pattern": "Fabric: %s Completado",
|
||||
"command_completed_successfully": "Comando completado exitosamente",
|
||||
"output_truncated": "Salida: %s...",
|
||||
"output_full": "Salida: %s",
|
||||
"choose_pattern_from_available": "Elige un patrón de los patrones disponibles",
|
||||
"pattern_variables_help": "Valores para variables de patrón, ej. -v=#role:expert -v=#points:30",
|
||||
"choose_context_from_available": "Elige un contexto de los contextos disponibles",
|
||||
"choose_session_from_available": "Elige una sesión de las sesiones disponibles",
|
||||
"attachment_path_or_url_help": "Ruta de adjunto o URL (ej. para mensajes de reconocimiento de imagen de OpenAI)",
|
||||
"run_setup_for_reconfigurable_parts": "Ejecutar configuración para todas las partes reconfigurables de fabric",
|
||||
"set_temperature": "Establecer temperatura",
|
||||
"set_top_p": "Establecer top P",
|
||||
"stream_help": "Transmitir",
|
||||
"set_presence_penalty": "Establecer penalización de presencia",
|
||||
"use_model_defaults_raw_help": "Usar los valores predeterminados del modelo sin enviar opciones de chat (como temperatura, etc.) y usar el rol de usuario en lugar del rol del sistema para patrones.",
|
||||
"set_frequency_penalty": "Establecer penalización de frecuencia",
|
||||
"list_all_patterns": "Listar todos los patrones",
|
||||
"list_all_available_models": "Listar todos los modelos disponibles",
|
||||
"list_all_contexts": "Listar todos los contextos",
|
||||
"list_all_sessions": "Listar todas las sesiones",
|
||||
"update_patterns": "Actualizar patrones",
|
||||
"messages_to_send_to_chat": "Mensajes para enviar al chat",
|
||||
"copy_to_clipboard": "Copiar al portapapeles",
|
||||
"choose_model": "Elegir modelo",
|
||||
"specify_vendor_for_model": "Especificar proveedor para el modelo seleccionado (ej., -V \"LM Studio\" -m openai/gpt-oss-20b)",
|
||||
"model_context_length_ollama": "Longitud de contexto del modelo (solo afecta a ollama)",
|
||||
"output_to_file": "Salida a archivo",
|
||||
"output_entire_session": "Salida de toda la sesión (también una temporal) al archivo de salida",
|
||||
"number_of_latest_patterns": "Número de patrones más recientes a listar",
|
||||
"change_default_model": "Cambiar modelo predeterminado",
|
||||
"youtube_url_help": "Video de YouTube o \"URL\" de lista de reproducción para obtener transcripción, comentarios y enviar al chat o imprimir en la consola y almacenar en el archivo de salida",
|
||||
"prefer_playlist_over_video": "Preferir lista de reproducción sobre video si ambos ids están presentes en la URL",
|
||||
"grab_transcript_from_youtube": "Obtener transcripción del video de YouTube y enviar al chat (se usa por defecto).",
|
||||
"grab_transcript_with_timestamps": "Obtener transcripción del video de YouTube con marcas de tiempo y enviar al chat",
|
||||
"grab_comments_from_youtube": "Obtener comentarios del video de YouTube y enviar al chat",
|
||||
"output_video_metadata": "Salida de metadatos del video",
|
||||
"additional_yt_dlp_args": "Argumentos adicionales para pasar a yt-dlp (ej. '--cookies-from-browser brave')",
|
||||
"specify_language_code": "Especificar el Código de Idioma para el chat, ej. -g=en -g=zh",
|
||||
"scrape_website_url": "Extraer URL del sitio web a markdown usando Jina AI",
|
||||
"search_question_jina": "Pregunta de búsqueda usando Jina AI",
|
||||
"seed_for_lmm_generation": "Semilla para ser usada en la generación LMM",
|
||||
"wipe_context": "Limpiar contexto",
|
||||
"wipe_session": "Limpiar sesión",
|
||||
"print_context": "Imprimir contexto",
|
||||
"print_session": "Imprimir sesión",
|
||||
"convert_html_readability": "Convertir entrada HTML en una vista limpia y legible",
|
||||
"apply_variables_to_input": "Aplicar variables a la entrada del usuario",
|
||||
"disable_pattern_variable_replacement": "Deshabilitar reemplazo de variables de patrón",
|
||||
"show_dry_run": "Mostrar lo que se enviaría al modelo sin enviarlo realmente",
|
||||
"serve_fabric_rest_api": "Servir la API REST de Fabric",
|
||||
"serve_fabric_api_ollama_endpoints": "Servir la API REST de Fabric con endpoints de ollama",
|
||||
"address_to_bind_rest_api": "La dirección para vincular la API REST",
|
||||
"api_key_secure_server_routes": "Clave API usada para asegurar rutas del servidor",
|
||||
"path_to_yaml_config": "Ruta al archivo de configuración YAML",
|
||||
"print_current_version": "Imprimir versión actual",
|
||||
"list_all_registered_extensions": "Listar todas las extensiones registradas",
|
||||
"register_new_extension": "Registrar una nueva extensión desde la ruta del archivo de configuración",
|
||||
"remove_registered_extension": "Eliminar una extensión registrada por nombre",
|
||||
"choose_strategy_from_available": "Elegir una estrategia de las estrategias disponibles",
|
||||
"list_all_strategies": "Listar todas las estrategias",
|
||||
"list_all_vendors": "Listar todos los proveedores",
|
||||
"output_raw_list_shell_completion": "Salida de lista sin procesar sin encabezados/formato (para completado de shell)",
|
||||
"enable_web_search_tool": "Habilitar herramienta de búsqueda web para modelos soportados (Anthropic, OpenAI, Gemini)",
|
||||
"set_location_web_search": "Establecer ubicación para resultados de búsqueda web (ej., 'America/Los_Angeles')",
|
||||
"save_generated_image_to_file": "Guardar imagen generada en la ruta de archivo especificada (ej., 'output.png')",
|
||||
"image_dimensions_help": "Dimensiones de imagen: 1024x1024, 1536x1024, 1024x1536, auto (predeterminado: auto)",
|
||||
"image_quality_help": "Calidad de imagen: low, medium, high, auto (predeterminado: auto)",
|
||||
"compression_level_jpeg_webp": "Nivel de compresión 0-100 para formatos JPEG/WebP (predeterminado: no establecido)",
|
||||
"background_type_help": "Tipo de fondo: opaque, transparent (predeterminado: opaque, solo para PNG/WebP)",
|
||||
"suppress_thinking_tags": "Suprimir texto encerrado en etiquetas de pensamiento",
|
||||
"start_tag_thinking_sections": "Etiqueta de inicio para secciones de pensamiento",
|
||||
"end_tag_thinking_sections": "Etiqueta de fin para secciones de pensamiento",
|
||||
"disable_openai_responses_api": "Deshabilitar API de Respuestas de OpenAI (predeterminado: false)",
|
||||
"audio_video_file_transcribe": "Archivo de audio o video para transcribir",
|
||||
"model_for_transcription": "Modelo para usar en transcripción (separado del modelo de chat)",
|
||||
"split_media_files_ffmpeg": "Dividir archivos de audio/video mayores a 25MB usando ffmpeg",
|
||||
"tts_voice_name": "Nombre de voz TTS para modelos soportados (ej., Kore, Charon, Puck)",
|
||||
"list_gemini_tts_voices": "Listar todas las voces TTS de Gemini disponibles",
|
||||
"list_transcription_models": "Listar todos los modelos de transcripción disponibles",
|
||||
"send_desktop_notification": "Enviar notificación de escritorio cuando se complete el comando",
|
||||
"custom_notification_command": "Comando personalizado para ejecutar notificaciones (anula las notificaciones integradas)",
|
||||
"set_reasoning_thinking_level": "Establecer nivel de razonamiento/pensamiento (ej., off, low, medium, high, o tokens numéricos para Anthropic o Google Gemini)",
|
||||
"set_debug_level": "Establecer nivel de depuración (0=apagado, 1=básico, 2=detallado, 3=rastreo)",
|
||||
"usage_header": "Uso:",
|
||||
"application_options_header": "Opciones de la Aplicación:",
|
||||
"help_options_header": "Opciones de Ayuda:",
|
||||
"help_message": "Mostrar este mensaje de ayuda",
|
||||
"options_placeholder": "[OPCIONES]",
|
||||
"available_vendors_header": "Proveedores Disponibles:",
|
||||
"available_models_header": "Modelos disponibles",
|
||||
"no_items_found": "No hay %s",
|
||||
"no_description_available": "No hay descripción disponible",
|
||||
"i18n_download_failed": "Error al descargar traducción para el idioma '%s': %v",
|
||||
"i18n_load_failed": "Error al cargar archivo de traducción: %v"
|
||||
}
|
||||
|
||||
@@ -5,11 +5,12 @@ import (
|
||||
"sort"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
"github.com/danielmiessler/fabric/internal/util"
|
||||
)
|
||||
|
||||
func NewVendorsModels() *VendorsModels {
|
||||
return &VendorsModels{GroupsItemsSelectorString: util.NewGroupsItemsSelectorString("Available models")}
|
||||
return &VendorsModels{GroupsItemsSelectorString: util.NewGroupsItemsSelectorString(i18n.T("available_models_header"))}
|
||||
}
|
||||
|
||||
type VendorsModels struct {
|
||||
@@ -21,7 +22,7 @@ type VendorsModels struct {
|
||||
// Default vendor and model are highlighted with an asterisk.
|
||||
func (o *VendorsModels) PrintWithVendor(shellCompleteList bool, defaultVendor, defaultModel string) {
|
||||
if !shellCompleteList {
|
||||
fmt.Printf("\n%v:\n", o.SelectionLabel)
|
||||
fmt.Printf("%s:\n\n", o.SelectionLabel)
|
||||
}
|
||||
|
||||
var currentItemIndex int
|
||||
|
||||
@@ -7,6 +7,7 @@ import (
|
||||
"path/filepath"
|
||||
"strings"
|
||||
|
||||
"github.com/danielmiessler/fabric/internal/i18n"
|
||||
"github.com/danielmiessler/fabric/internal/util"
|
||||
)
|
||||
|
||||
@@ -108,7 +109,7 @@ func (o *StorageEntity) ListNames(shellCompleteList bool) (err error) {
|
||||
|
||||
if len(names) == 0 {
|
||||
if !shellCompleteList {
|
||||
fmt.Printf("\nNo %v\n", o.Label)
|
||||
fmt.Printf("%s\n", fmt.Sprintf(i18n.T("no_items_found"), o.Label))
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user