Skip to content

fix(llm): ensure base_url has protocol prefix for model info fetch when using LiteLLM #7782

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 14 commits into from
Apr 10, 2025
Merged
Show file tree
Hide file tree
Changes from 11 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion frontend/.husky/pre-commit
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
cd frontend
npm run check-unlocalized-strings
npx lint-staged
npm test
npm test
Original file line number Diff line number Diff line change
Expand Up @@ -37,4 +37,4 @@ describe("CopyToClipboardButton", () => {
const button = screen.getByTestId("copy-to-clipboard");
expect(button).toHaveAttribute("aria-label", "BUTTON$COPIED");
});
});
});
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,11 @@ describe("ConversationCard", () => {
const card = screen.getByTestId("conversation-card");

within(card).getByText("Conversation 1");

// Just check that the card contains the expected text content
expect(card).toHaveTextContent("Created");
expect(card).toHaveTextContent("ago");

// Use a regex to match the time part since it might have whitespace
const timeRegex = new RegExp(formatTimeDelta(new Date("2021-10-01T12:00:00Z")));
expect(card).toHaveTextContent(timeRegex);
Expand Down
2 changes: 1 addition & 1 deletion frontend/src/i18n/translation.json
Original file line number Diff line number Diff line change
Expand Up @@ -6089,4 +6089,4 @@
"tr": "belgelendirme",
"de": "Dokumentation"
}
}
}
7 changes: 6 additions & 1 deletion openhands/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,12 +375,17 @@ def init_model_info(self) -> None:
if self.config.model.startswith('litellm_proxy/'):
# IF we are using LiteLLM proxy, get model info from LiteLLM proxy
# GET {base_url}/v1/model/info with litellm_model_id as path param
base_url = (self.config.base_url or '').strip()
if not base_url.startswith(('http://', 'https://')):
base_url = 'http://' + base_url

response = httpx.get(
f'{self.config.base_url}/v1/model/info',
f'{base_url}/v1/model/info',
headers={
'Authorization': f'Bearer {self.config.api_key.get_secret_value() if self.config.api_key else None}'
},
)

resp_json = response.json()
if 'data' not in resp_json:
logger.error(
Expand Down
19 changes: 19 additions & 0 deletions tests/unit/test_llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -896,3 +896,22 @@ def test_completion_with_log_completions(mock_litellm_completion, default_config
files = list(Path(temp_dir).iterdir())
# Expect a log to be generated
assert len(files) == 1


@patch('httpx.get')
def test_llm_base_url_auto_protocol_patch(mock_get):
"""Test that LLM base_url without protocol is automatically fixed with 'http://'."""
config = LLMConfig(
model='litellm_proxy/test-model',
api_key='fake-key',
base_url=' api.example.com ',
)

mock_get.return_value.status_code = 200
mock_get.return_value.json.return_value = {'model': 'fake'}

llm = LLM(config=config)
llm.init_model_info()

called_url = mock_get.call_args[0][0]
assert called_url.startswith('http://') or called_url.startswith('https://')
Loading