File tree 1 file changed +8
-9
lines changed
vertexai/generative_models
1 file changed +8
-9
lines changed Original file line number Diff line number Diff line change @@ -1022,17 +1022,17 @@ def _send_message_streaming(
1022
1022
full_response = None
1023
1023
for chunk in stream :
1024
1024
chunks .append (chunk )
1025
- if full_response :
1026
- _append_response (full_response , chunk )
1027
- else :
1028
- full_response = chunk
1029
1025
# By default we're not adding incomplete interactions to history.
1030
1026
if self ._response_validator is not None :
1031
1027
self ._response_validator (
1032
1028
response = chunk ,
1033
1029
request_contents = request_history ,
1034
1030
response_chunks = chunks ,
1035
1031
)
1032
+ if full_response :
1033
+ _append_response (full_response , chunk )
1034
+ else :
1035
+ full_response = chunk
1036
1036
yield chunk
1037
1037
if not full_response :
1038
1038
return
@@ -1089,18 +1089,17 @@ async def async_generator():
1089
1089
full_response = None
1090
1090
async for chunk in stream :
1091
1091
chunks .append (chunk )
1092
- if full_response :
1093
- _append_response (full_response , chunk )
1094
- else :
1095
- full_response = chunk
1096
1092
# By default we're not adding incomplete interactions to history.
1097
1093
if self ._response_validator is not None :
1098
1094
self ._response_validator (
1099
1095
response = chunk ,
1100
1096
request_contents = request_history ,
1101
1097
response_chunks = chunks ,
1102
1098
)
1103
-
1099
+ if full_response :
1100
+ _append_response (full_response , chunk )
1101
+ else :
1102
+ full_response = chunk
1104
1103
yield chunk
1105
1104
if not full_response :
1106
1105
return
You can’t perform that action at this time.
0 commit comments