Skip to content

fix(llm): ensure base_url has protocol prefix for model info fetch when using LiteLLM #7782

New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Merged
merged 14 commits into from
Apr 10, 2025
Merged
Show file tree
Hide file tree
Changes from 7 commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion frontend/.husky/pre-commit
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
cd frontend
npm run check-unlocalized-strings
npx lint-staged
npm test
npm test
Original file line number Diff line number Diff line change
Expand Up @@ -37,4 +37,4 @@ describe("CopyToClipboardButton", () => {
const button = screen.getByTestId("copy-to-clipboard");
expect(button).toHaveAttribute("aria-label", "BUTTON$COPIED");
});
});
});
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,11 @@ describe("ConversationCard", () => {
const card = screen.getByTestId("conversation-card");

within(card).getByText("Conversation 1");

// Just check that the card contains the expected text content
expect(card).toHaveTextContent("Created");
expect(card).toHaveTextContent("ago");

// Use a regex to match the time part since it might have whitespace
const timeRegex = new RegExp(formatTimeDelta(new Date("2021-10-01T12:00:00Z")));
expect(card).toHaveTextContent(timeRegex);
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,7 @@ export function SetupPaymentModal() {
</h1>
<p>
<Trans
i18nKey="BILLING$CLAIM_YOUR_50"
i18nKey={t(I18nKey.BILLING$CLAIM_YOUR_50)}
components={{ b: <strong /> }}
/>
</p>
Expand Down
2 changes: 1 addition & 1 deletion frontend/src/i18n/translation.json
Original file line number Diff line number Diff line change
Expand Up @@ -6089,4 +6089,4 @@
"tr": "belgelendirme",
"de": "Dokumentation"
}
}
}
7 changes: 6 additions & 1 deletion openhands/llm/llm.py
Original file line number Diff line number Diff line change
Expand Up @@ -375,12 +375,17 @@ def init_model_info(self) -> None:
if self.config.model.startswith('litellm_proxy/'):
# IF we are using LiteLLM proxy, get model info from LiteLLM proxy
# GET {base_url}/v1/model/info with litellm_model_id as path param
base_url = (self.config.base_url or '').strip()
if not base_url.startswith(('http://', 'https://')):
base_url = 'http://' + base_url

response = httpx.get(
f'{self.config.base_url}/v1/model/info',
f'{base_url}/v1/model/info',
headers={
'Authorization': f'Bearer {self.config.api_key.get_secret_value() if self.config.api_key else None}'
},
)

resp_json = response.json()
if 'data' not in resp_json:
logger.error(
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -420,22 +420,24 @@ async def _update_conversation_for_event(
conversation_store = await self._get_conversation_store(user_id, github_user_id)
conversation = await conversation_store.get_metadata(conversation_id)
conversation.last_updated_at = datetime.now(timezone.utc)

# Update cost/token metrics if event has llm_metrics
if event and hasattr(event, 'llm_metrics') and event.llm_metrics:
metrics = event.llm_metrics

# Update accumulated cost
if hasattr(metrics, 'accumulated_cost'):
conversation.accumulated_cost = metrics.accumulated_cost

# Update token usage
if hasattr(metrics, 'accumulated_token_usage'):
token_usage = metrics.accumulated_token_usage
conversation.prompt_tokens = token_usage.prompt_tokens
conversation.completion_tokens = token_usage.completion_tokens
conversation.total_tokens = token_usage.prompt_tokens + token_usage.completion_tokens

conversation.total_tokens = (
token_usage.prompt_tokens + token_usage.completion_tokens
)

await conversation_store.save_metadata(conversation)


Expand Down
8 changes: 5 additions & 3 deletions openhands/server/routes/git.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from fastapi import APIRouter, Depends, status
from fastapi.responses import JSONResponse
from pydantic import SecretStr
from openhands.server.shared import server_config

from openhands.integrations.github.github_service import GithubServiceImpl
from openhands.integrations.provider import (
PROVIDER_TOKEN_TYPE,
Expand All @@ -16,7 +16,7 @@
User,
)
from openhands.server.auth import get_access_token, get_provider_tokens
from openhands.server.types import AppMode
from openhands.server.shared import server_config

app = APIRouter(prefix='/api/user')

Expand All @@ -33,7 +33,9 @@ async def get_user_repositories(
)

try:
repos: list[Repository] = await client.get_repositories(sort, server_config.app_mode)
repos: list[Repository] = await client.get_repositories(
sort, server_config.app_mode
)
return repos

except AuthenticationError as e:
Expand Down
4 changes: 3 additions & 1 deletion openhands/server/routes/manage_conversations.py
Original file line number Diff line number Diff line change
Expand Up @@ -112,7 +112,9 @@ async def _create_new_conversation(
title=conversation_title,
user_id=user_id,
github_user_id=None,
selected_repository=selected_repository.full_name if selected_repository else selected_repository,
selected_repository=selected_repository.full_name
if selected_repository
else selected_repository,
selected_branch=selected_branch,
)
)
Expand Down
4 changes: 3 additions & 1 deletion openhands/storage/conversation/conversation_validator.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,9 @@
class ConversationValidator:
"""Storage for conversation metadata. May or may not support multiple users depending on the environment."""

async def validate(self, conversation_id: str, cookies_str: str) -> tuple[None, None]:
async def validate(
self, conversation_id: str, cookies_str: str
) -> tuple[None, None]:
return None, None


Expand Down
42 changes: 42 additions & 0 deletions tests/unit/test_no_protocol_url_test.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,42 @@
from unittest.mock import patch

from openhands.core.config import LLMConfig
from openhands.llm.llm import LLM


def test_base_url_protocol_is_fixed_before_request():
"""
Test that LLM automatically prepends a protocol to the base_url
if it is missing, to prevent httpx.UnsupportedProtocol error.

This avoids runtime crashes when users forget to include 'http://' or 'https://'
in the LLMConfig.base_url.

Steps:
1. Create an LLMConfig with a base_url missing the protocol.
2. Patch httpx.get to intercept the actual URL used in the request.
3. Initialize an LLM instance using this config.
4. Call init_model_info() to trigger the request.
5. Assert that the URL passed to httpx.get starts with 'http://' or 'https://'.
"""

# Create config with base_url missing protocol
config = LLMConfig(
model='litellm_proxy/test-model', api_key='fake-key', base_url='api.example.com'
)

# Patch httpx.get to intercept the actual request
with patch('httpx.get') as mock_get:
mock_get.return_value.status_code = 200
mock_get.return_value.json.return_value = {'model': 'mock'}

llm = LLM(config=config)

# Trigger model info fetch
llm.init_model_info()

# Extract the requested URL and assert protocol is included
called_url = mock_get.call_args[0][0]
print('Final URL used:', called_url)

assert called_url.startswith('http://') or called_url.startswith('https://')
Loading