[vector sets] Add --ollama-url option to configure embedding server (#14317)

This PR adds a `--ollama-url` option to `cli.py`, the lightweight
redis-cli-like tool that expands !"text" arguments into embeddings via
Ollama.

Previously, the embedding call was hardcoded to
http://localhost:11434/api/embeddings. With this change, users can
specify a custom Ollama server URL when starting the tool.

If no URL is provided, the tool defaults to what it was before.
This commit is contained in:
Rushabh Mehta
2025-09-15 07:03:18 +05:30
committed by GitHub
parent 9b63e99d05
commit e8726d18e5

View File

@@ -8,6 +8,7 @@
#
#!/usr/bin/env python3
import argparse
import redis
import requests
import re
@@ -15,9 +16,12 @@ import shlex
from prompt_toolkit import PromptSession
from prompt_toolkit.history import InMemoryHistory
# Default Ollama embeddings URL (can be overridden with --ollama-url)
OLLAMA_URL = "http://localhost:11434/api/embeddings"
def get_embedding(text):
"""Get embedding from local Ollama API"""
url = "http://localhost:11434/api/embeddings"
url = OLLAMA_URL
payload = {
"model": "mxbai-embed-large",
"prompt": text
@@ -73,6 +77,15 @@ def format_response(response):
return str(response)
def main():
global OLLAMA_URL
parser = argparse.ArgumentParser(prog="cli.py", add_help=False)
parser.add_argument("--ollama-url", dest="ollama_url",
help="Ollama embeddings API URL (default: {OLLAMA_URL})",
default=OLLAMA_URL)
args, _ = parser.parse_known_args()
OLLAMA_URL = args.ollama_url
# Default connection to localhost:6379
r = redis.Redis(host='localhost', port=6379, decode_responses=True)