Skip to content

Commit a16a1c4

Browse files
fix(http_handler.py): allow setting ca bundle path
1 parent f1ce7bb commit a16a1c4

File tree

3 files changed

+12
-3
lines changed

3 files changed

+12
-3
lines changed

litellm/__init__.py

+1
Original file line numberDiff line numberDiff line change
@@ -102,6 +102,7 @@
102102
}
103103
use_client: bool = False
104104
ssl_verify: bool = True
105+
ssl_certificate: Optional[str] = None
105106
disable_streaming_logging: bool = False
106107
in_memory_llm_clients_cache: dict = {}
107108
### GUARDRAILS ###

litellm/llms/custom_httpx/http_handler.py

+8-2
Original file line numberDiff line numberDiff line change
@@ -12,15 +12,16 @@ def __init__(
1212
timeout: Optional[Union[float, httpx.Timeout]] = None,
1313
concurrent_limit=1000,
1414
):
15-
sync_proxy_mounts = None
1615
async_proxy_mounts = None
1716
# Check if the HTTP_PROXY and HTTPS_PROXY environment variables are set and use them accordingly.
1817
http_proxy = os.getenv("HTTP_PROXY", None)
1918
https_proxy = os.getenv("HTTPS_PROXY", None)
2019
no_proxy = os.getenv("NO_PROXY", None)
2120
ssl_verify = bool(os.getenv("SSL_VERIFY", litellm.ssl_verify))
21+
cert = os.getenv(
22+
"SSL_CERTIFICATE", litellm.ssl_certificate
23+
) # /path/to/client.pem
2224

23-
sync_proxy_mounts = None
2425
if http_proxy is not None and https_proxy is not None:
2526
async_proxy_mounts = {
2627
"http://": httpx.AsyncHTTPTransport(proxy=httpx.Proxy(url=http_proxy)),
@@ -46,6 +47,7 @@ def __init__(
4647
),
4748
verify=ssl_verify,
4849
mounts=async_proxy_mounts,
50+
cert=cert,
4951
)
5052

5153
async def close(self):
@@ -108,6 +110,9 @@ def __init__(
108110
https_proxy = os.getenv("HTTPS_PROXY", None)
109111
no_proxy = os.getenv("NO_PROXY", None)
110112
ssl_verify = bool(os.getenv("SSL_VERIFY", litellm.ssl_verify))
113+
cert = os.getenv(
114+
"SSL_CERTIFICATE", litellm.ssl_certificate
115+
) # /path/to/client.pem
111116

112117
sync_proxy_mounts = None
113118
if http_proxy is not None and https_proxy is not None:
@@ -132,6 +137,7 @@ def __init__(
132137
),
133138
verify=ssl_verify,
134139
mounts=sync_proxy_mounts,
140+
cert=cert,
135141
)
136142
else:
137143
self.client = client

litellm/main.py

+3-1
Original file line numberDiff line numberDiff line change
@@ -223,7 +223,7 @@ async def acompletion(
223223
extra_headers: Optional[dict] = None,
224224
# Optional liteLLM function params
225225
**kwargs,
226-
):
226+
) -> Union[ModelResponse, CustomStreamWrapper]:
227227
"""
228228
Asynchronously executes a litellm.completion() call for any of litellm supported llms (example gpt-4, gpt-3.5-turbo, claude-2, command-nightly)
229229
@@ -339,6 +339,8 @@ async def acompletion(
339339
if isinstance(init_response, dict) or isinstance(
340340
init_response, ModelResponse
341341
): ## CACHING SCENARIO
342+
if isinstance(init_response, dict):
343+
response = ModelResponse(**init_response)
342344
response = init_response
343345
elif asyncio.iscoroutine(init_response):
344346
response = await init_response

0 commit comments

Comments
 (0)