Skip to content

Commit 13e673b

Browse files
committed
Merge branch 'main' of github.com:All-Hands-AI/OpenHands into enyst/gemini
2 parents 738f7d0 + 408ad1f commit 13e673b

21 files changed

+216
-152
lines changed

.github/ISSUE_TEMPLATE/bug_template.yml

Lines changed: 3 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -5,11 +5,12 @@ labels: ['bug']
55
body:
66
- type: markdown
77
attributes:
8-
value: Thank you for taking the time to fill out this bug report. Please provide as much information as possible to help us understand and address the issue effectively.
8+
value: Thank you for taking the time to fill out this bug report. Please provide as much information as possible
9+
to help us understand and address the issue effectively.
910

1011
- type: checkboxes
1112
attributes:
12-
label: Is there an existing issue for the same bug?
13+
label: Is there an existing issue for the same bug? (If one exists, thumbs up or comment on the issue instead).
1314
description: Please check if an issue already exists for the bug you encountered.
1415
options:
1516
- label: I have checked the existing issues.
Lines changed: 3 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
---
2-
name: Feature Request
3-
about: Suggest an idea for OpenHands features
2+
name: Feature Request or Enhancement
3+
about: Suggest an idea for an OpenHands feature or enhancement
44
title: ''
55
labels: 'enhancement'
66
assignees: ''
@@ -9,10 +9,6 @@ assignees: ''
99

1010
**What problem or use case are you trying to solve?**
1111

12-
**Describe the UX of the solution you'd like**
13-
14-
**Do you have thoughts on the technical implementation?**
15-
16-
**Describe alternatives you've considered**
12+
**Describe the UX or technical implementation you have in mind**
1713

1814
**Additional context**

.github/ISSUE_TEMPLATE/technical_proposal.md

Lines changed: 0 additions & 18 deletions
This file was deleted.

openhands/llm/async_llm.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import asyncio
22
from functools import partial
3-
from typing import Any
3+
from typing import Any, Callable
44

55
from litellm import acompletion as litellm_acompletion
66

@@ -17,7 +17,7 @@
1717
class AsyncLLM(LLM):
1818
"""Asynchronous LLM class."""
1919

20-
def __init__(self, *args, **kwargs):
20+
def __init__(self, *args: Any, **kwargs: Any) -> None:
2121
super().__init__(*args, **kwargs)
2222

2323
self._async_completion = partial(
@@ -46,7 +46,7 @@ def __init__(self, *args, **kwargs):
4646
retry_max_wait=self.config.retry_max_wait,
4747
retry_multiplier=self.config.retry_multiplier,
4848
)
49-
async def async_completion_wrapper(*args, **kwargs):
49+
async def async_completion_wrapper(*args: Any, **kwargs: Any) -> Any:
5050
"""Wrapper for the litellm acompletion function that adds logging and cost tracking."""
5151
messages: list[dict[str, Any]] | dict[str, Any] = []
5252

@@ -77,7 +77,7 @@ async def async_completion_wrapper(*args, **kwargs):
7777

7878
self.log_prompt(messages)
7979

80-
async def check_stopped():
80+
async def check_stopped() -> None:
8181
while should_continue():
8282
if (
8383
hasattr(self.config, 'on_cancel_requested_fn')
@@ -117,14 +117,14 @@ async def check_stopped():
117117
except asyncio.CancelledError:
118118
pass
119119

120-
self._async_completion = async_completion_wrapper # type: ignore
120+
self._async_completion = async_completion_wrapper
121121

122-
async def _call_acompletion(self, *args, **kwargs):
122+
async def _call_acompletion(self, *args: Any, **kwargs: Any) -> Any:
123123
"""Wrapper for the litellm acompletion function."""
124124
# Used in testing?
125125
return await litellm_acompletion(*args, **kwargs)
126126

127127
@property
128-
def async_completion(self):
128+
def async_completion(self) -> Callable:
129129
"""Decorator for the async litellm acompletion function."""
130130
return self._async_completion

openhands/llm/bedrock.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -28,5 +28,5 @@ def list_foundation_models(
2828
return []
2929

3030

31-
def remove_error_modelId(model_list):
31+
def remove_error_modelId(model_list: list[str]) -> list[str]:
3232
return list(filter(lambda m: not m.startswith('bedrock'), model_list))

openhands/llm/debug_mixin.py

Lines changed: 7 additions & 7 deletions
Original file line numberDiff line numberDiff line change
@@ -7,7 +7,7 @@
77

88

99
class DebugMixin:
10-
def log_prompt(self, messages: list[dict[str, Any]] | dict[str, Any]):
10+
def log_prompt(self, messages: list[dict[str, Any]] | dict[str, Any]) -> None:
1111
if not messages:
1212
logger.debug('No completion messages!')
1313
return
@@ -24,30 +24,30 @@ def log_prompt(self, messages: list[dict[str, Any]] | dict[str, Any]):
2424
else:
2525
logger.debug('No completion messages!')
2626

27-
def log_response(self, message_back: str):
27+
def log_response(self, message_back: str) -> None:
2828
if message_back:
2929
llm_response_logger.debug(message_back)
3030

31-
def _format_message_content(self, message: dict[str, Any]):
31+
def _format_message_content(self, message: dict[str, Any]) -> str:
3232
content = message['content']
3333
if isinstance(content, list):
3434
return '\n'.join(
3535
self._format_content_element(element) for element in content
3636
)
3737
return str(content)
3838

39-
def _format_content_element(self, element: dict[str, Any]):
39+
def _format_content_element(self, element: dict[str, Any] | Any) -> str:
4040
if isinstance(element, dict):
4141
if 'text' in element:
42-
return element['text']
42+
return str(element['text'])
4343
if (
4444
self.vision_is_active()
4545
and 'image_url' in element
4646
and 'url' in element['image_url']
4747
):
48-
return element['image_url']['url']
48+
return str(element['image_url']['url'])
4949
return str(element)
5050

5151
# This method should be implemented in the class that uses DebugMixin
52-
def vision_is_active(self):
52+
def vision_is_active(self) -> bool:
5353
raise NotImplementedError

openhands/llm/llm.py

Lines changed: 14 additions & 12 deletions
Original file line numberDiff line numberDiff line change
@@ -187,7 +187,7 @@ def __init__(
187187
retry_multiplier=self.config.retry_multiplier,
188188
retry_listener=self.retry_listener,
189189
)
190-
def wrapper(*args, **kwargs):
190+
def wrapper(*args: Any, **kwargs: Any) -> Any:
191191
"""Wrapper for the litellm completion function. Logs the input and output of the completion function."""
192192
from openhands.io import json
193193

@@ -356,14 +356,14 @@ def wrapper(*args, **kwargs):
356356
self._completion = wrapper
357357

358358
@property
359-
def completion(self):
359+
def completion(self) -> Callable:
360360
"""Decorator for the litellm completion function.
361361
362362
Check the complete documentation at https://litellm.vercel.app/docs/completion
363363
"""
364364
return self._completion
365365

366-
def init_model_info(self):
366+
def init_model_info(self) -> None:
367367
if self._tried_model_info:
368368
return
369369
self._tried_model_info = True
@@ -623,10 +623,12 @@ def get_token_count(self, messages: list[dict] | list[Message]) -> int:
623623
# try to get the token count with the default litellm tokenizers
624624
# or the custom tokenizer if set for this LLM configuration
625625
try:
626-
return litellm.token_counter(
627-
model=self.config.model,
628-
messages=messages,
629-
custom_tokenizer=self.tokenizer,
626+
return int(
627+
litellm.token_counter(
628+
model=self.config.model,
629+
messages=messages,
630+
custom_tokenizer=self.tokenizer,
631+
)
630632
)
631633
except Exception as e:
632634
# limit logspam in case token count is not supported
@@ -655,7 +657,7 @@ def _is_local(self) -> bool:
655657
return True
656658
return False
657659

658-
def _completion_cost(self, response) -> float:
660+
def _completion_cost(self, response: Any) -> float:
659661
"""Calculate completion cost and update metrics with running total.
660662
661663
Calculate the cost of a completion response based on the model. Local models are treated as free.
@@ -708,21 +710,21 @@ def _completion_cost(self, response) -> float:
708710
logger.debug(
709711
f'Using fallback model name {_model_name} to get cost: {cost}'
710712
)
711-
self.metrics.add_cost(cost)
712-
return cost
713+
self.metrics.add_cost(float(cost))
714+
return float(cost)
713715
except Exception:
714716
self.cost_metric_supported = False
715717
logger.debug('Cost calculation not supported for this model.')
716718
return 0.0
717719

718-
def __str__(self):
720+
def __str__(self) -> str:
719721
if self.config.api_version:
720722
return f'LLM(model={self.config.model}, api_version={self.config.api_version}, base_url={self.config.base_url})'
721723
elif self.config.base_url:
722724
return f'LLM(model={self.config.model}, base_url={self.config.base_url})'
723725
return f'LLM(model={self.config.model})'
724726

725-
def __repr__(self):
727+
def __repr__(self) -> str:
726728
return str(self)
727729

728730
def reset(self) -> None:

openhands/llm/metrics.py

Lines changed: 3 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -177,7 +177,7 @@ def get(self) -> dict:
177177
'token_usages': [usage.model_dump() for usage in self._token_usages],
178178
}
179179

180-
def reset(self):
180+
def reset(self) -> None:
181181
self._accumulated_cost = 0.0
182182
self._costs = []
183183
self._response_latencies = []
@@ -192,13 +192,13 @@ def reset(self):
192192
response_id='',
193193
)
194194

195-
def log(self):
195+
def log(self) -> str:
196196
"""Log the metrics."""
197197
metrics = self.get()
198198
logs = ''
199199
for key, value in metrics.items():
200200
logs += f'{key}: {value}\n'
201201
return logs
202202

203-
def __repr__(self):
203+
def __repr__(self) -> str:
204204
return f'Metrics({self.get()}'

openhands/llm/retry_mixin.py

Lines changed: 6 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,6 @@
11
import json
22
import re
3+
from typing import Any, Callable
34

45
from tenacity import (
56
retry,
@@ -90,7 +91,7 @@ def custom_wait_strategy(self, retry_state) -> float:
9091

9192
return default_wait
9293

93-
def retry_decorator(self, **kwargs):
94+
def retry_decorator(self, **kwargs: Any) -> Callable:
9495
"""
9596
Create a LLM retry decorator with customizable parameters. This is used for 429 errors, and a few other exceptions in LLM classes.
9697
@@ -109,7 +110,7 @@ def retry_decorator(self, **kwargs):
109110
self.retry_multiplier = kwargs.get('retry_multiplier')
110111
retry_listener = kwargs.get('retry_listener')
111112

112-
def before_sleep(retry_state):
113+
def before_sleep(retry_state: Any) -> None:
113114
self.log_retry_attempt(retry_state)
114115
if retry_listener:
115116
retry_listener(retry_state.attempt_number, self.num_retries)
@@ -130,15 +131,16 @@ def before_sleep(retry_state):
130131
f'LLMNoResponseError detected with temperature={current_temp}, keeping original temperature'
131132
)
132133

133-
return retry(
134+
retry_decorator: Callable = retry(
134135
before_sleep=before_sleep,
135136
stop=stop_after_attempt(self.num_retries) | stop_if_should_exit(),
136137
reraise=True,
137138
retry=retry_if_exception_type(self.retry_exceptions),
138139
wait=self.custom_wait_strategy,
139140
)
141+
return retry_decorator
140142

141-
def log_retry_attempt(self, retry_state):
143+
def log_retry_attempt(self, retry_state: Any) -> None:
142144
"""Log retry attempts."""
143145
exception = retry_state.outcome.exception()
144146
logger.error(

openhands/llm/streaming_llm.py

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -1,6 +1,6 @@
11
import asyncio
22
from functools import partial
3-
from typing import Any
3+
from typing import Any, Callable
44

55
from openhands.core.exceptions import UserCancelledError
66
from openhands.core.logger import openhands_logger as logger
@@ -11,7 +11,7 @@
1111
class StreamingLLM(AsyncLLM):
1212
"""Streaming LLM class."""
1313

14-
def __init__(self, *args, **kwargs):
14+
def __init__(self, *args: Any, **kwargs: Any) -> None:
1515
super().__init__(*args, **kwargs)
1616

1717
self._async_streaming_completion = partial(
@@ -40,7 +40,7 @@ def __init__(self, *args, **kwargs):
4040
retry_max_wait=self.config.retry_max_wait,
4141
retry_multiplier=self.config.retry_multiplier,
4242
)
43-
async def async_streaming_completion_wrapper(*args, **kwargs):
43+
async def async_streaming_completion_wrapper(*args: Any, **kwargs: Any) -> Any:
4444
messages: list[dict[str, Any]] | dict[str, Any] = []
4545

4646
# some callers might send the model and messages directly
@@ -108,6 +108,6 @@ async def async_streaming_completion_wrapper(*args, **kwargs):
108108
self._async_streaming_completion = async_streaming_completion_wrapper
109109

110110
@property
111-
def async_streaming_completion(self):
111+
def async_streaming_completion(self) -> Callable:
112112
"""Decorator for the async litellm acompletion function with streaming."""
113113
return self._async_streaming_completion

openhands/runtime/action_execution_server.py

Lines changed: 4 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -585,7 +585,10 @@ async def validation_exception_handler(
585585
logger.error(f'Validation error occurred: {exc}')
586586
return JSONResponse(
587587
status_code=422,
588-
content={'detail': 'Invalid request parameters', 'errors': exc.errors()},
588+
content={
589+
'detail': 'Invalid request parameters',
590+
'errors': str(exc.errors()),
591+
},
589592
)
590593

591594
@app.middleware('http')

openhands/runtime/impl/action_execution/action_execution_client.py

Lines changed: 6 additions & 6 deletions
Original file line numberDiff line numberDiff line change
@@ -45,7 +45,7 @@
4545
from openhands.utils.tenacity_stop import stop_if_should_exit
4646

4747

48-
def _is_retryable_check_alive_error(exception):
48+
def _is_retryable_error(exception):
4949
return isinstance(
5050
exception, (httpx.RemoteProtocolError, httpcore.RemoteProtocolError)
5151
)
@@ -93,6 +93,11 @@ def __init__(
9393
def _get_action_execution_server_host(self) -> str:
9494
pass
9595

96+
@retry(
97+
retry=retry_if_exception(_is_retryable_error),
98+
stop=stop_after_attempt(5) | stop_if_should_exit(),
99+
wait=wait_exponential(multiplier=1, min=4, max=15),
100+
)
96101
def _send_action_server_request(
97102
self,
98103
method: str,
@@ -114,11 +119,6 @@ def _send_action_server_request(
114119
"""
115120
return send_request(self.session, method, url, **kwargs)
116121

117-
@retry(
118-
retry=retry_if_exception(_is_retryable_check_alive_error),
119-
stop=stop_after_attempt(5) | stop_if_should_exit(),
120-
wait=wait_exponential(multiplier=1, min=4, max=15),
121-
)
122122
def check_if_alive(self) -> None:
123123
response = self._send_action_server_request(
124124
'GET',

0 commit comments

Comments
 (0)