mirror of
https://github.com/All-Hands-AI/OpenHands.git
synced 2026-01-09 14:57:59 -05:00
fix(llm): ensure base_url has protocol prefix for model info fetch when using LiteLLM (#7782)
Co-authored-by: Engel Nyst <enyst@users.noreply.github.com> Co-authored-by: sp.wack <83104063+amanape@users.noreply.github.com>
This commit is contained in:
@@ -1,4 +1,4 @@
|
||||
cd frontend
|
||||
npm run check-unlocalized-strings
|
||||
npx lint-staged
|
||||
npm test
|
||||
npm test
|
||||
|
||||
@@ -37,4 +37,4 @@ describe("CopyToClipboardButton", () => {
|
||||
const button = screen.getByTestId("copy-to-clipboard");
|
||||
expect(button).toHaveAttribute("aria-label", "BUTTON$COPIED");
|
||||
});
|
||||
});
|
||||
});
|
||||
|
||||
@@ -76,11 +76,11 @@ describe("ConversationCard", () => {
|
||||
const card = screen.getByTestId("conversation-card");
|
||||
|
||||
within(card).getByText("Conversation 1");
|
||||
|
||||
|
||||
// Just check that the card contains the expected text content
|
||||
expect(card).toHaveTextContent("Created");
|
||||
expect(card).toHaveTextContent("ago");
|
||||
|
||||
|
||||
// Use a regex to match the time part since it might have whitespace
|
||||
const timeRegex = new RegExp(formatTimeDelta(new Date("2021-10-01T12:00:00Z")));
|
||||
expect(card).toHaveTextContent(timeRegex);
|
||||
|
||||
@@ -124,7 +124,7 @@ function AccountSettings() {
|
||||
formData.get("enable-memory-condenser-switch")?.toString() === "on";
|
||||
const enableSoundNotifications =
|
||||
formData.get("enable-sound-notifications-switch")?.toString() === "on";
|
||||
const llmBaseUrl = formData.get("base-url-input")?.toString() || "";
|
||||
const llmBaseUrl = formData.get("base-url-input")?.toString().trim() || "";
|
||||
const inputApiKey = formData.get("llm-api-key-input")?.toString() || "";
|
||||
const llmApiKey =
|
||||
inputApiKey === "" && isLLMKeySet
|
||||
|
||||
@@ -375,12 +375,17 @@ class LLM(RetryMixin, DebugMixin):
|
||||
if self.config.model.startswith('litellm_proxy/'):
|
||||
# IF we are using LiteLLM proxy, get model info from LiteLLM proxy
|
||||
# GET {base_url}/v1/model/info with litellm_model_id as path param
|
||||
base_url = self.config.base_url.strip() if self.config.base_url else ''
|
||||
if not base_url.startswith(('http://', 'https://')):
|
||||
base_url = 'http://' + base_url
|
||||
|
||||
response = httpx.get(
|
||||
f'{self.config.base_url}/v1/model/info',
|
||||
f'{base_url}/v1/model/info',
|
||||
headers={
|
||||
'Authorization': f'Bearer {self.config.api_key.get_secret_value() if self.config.api_key else None}'
|
||||
},
|
||||
)
|
||||
|
||||
resp_json = response.json()
|
||||
if 'data' not in resp_json:
|
||||
logger.error(
|
||||
|
||||
@@ -896,3 +896,22 @@ def test_completion_with_log_completions(mock_litellm_completion, default_config
|
||||
files = list(Path(temp_dir).iterdir())
|
||||
# Expect a log to be generated
|
||||
assert len(files) == 1
|
||||
|
||||
|
||||
@patch('httpx.get')
|
||||
def test_llm_base_url_auto_protocol_patch(mock_get):
|
||||
"""Test that LLM base_url without protocol is automatically fixed with 'http://'."""
|
||||
config = LLMConfig(
|
||||
model='litellm_proxy/test-model',
|
||||
api_key='fake-key',
|
||||
base_url=' api.example.com ',
|
||||
)
|
||||
|
||||
mock_get.return_value.status_code = 200
|
||||
mock_get.return_value.json.return_value = {'model': 'fake'}
|
||||
|
||||
llm = LLM(config=config)
|
||||
llm.init_model_info()
|
||||
|
||||
called_url = mock_get.call_args[0][0]
|
||||
assert called_url.startswith('http://') or called_url.startswith('https://')
|
||||
|
||||
Reference in New Issue
Block a user