Skip to content

Commit 7f7a878

Browse files
authored
Merge pull request #1333 from newrelic/langchain-str-response-fix
Support str response type in Langchain
2 parents 9223f7a + 1eb33f2 commit 7f7a878

File tree

3 files changed

+132
-10
lines changed

3 files changed

+132
-10
lines changed

newrelic/hooks/mlmodel_langchain.py

+12-9
Original file line numberDiff line numberDiff line change
@@ -700,16 +700,19 @@ def _create_successful_chain_run_events(
700700
trace_id = linking_metadata.get("trace.id")
701701
input_message_list = [_input]
702702
output_message_list = []
703-
try:
704-
output_message_list = [response[0]] if response else []
705-
except:
703+
if isinstance(response, str):
704+
output_message_list = [response]
705+
else:
706706
try:
707-
output_message_list = [str(response)]
708-
except Exception as e:
709-
_logger.warning(
710-
"Unable to capture response inside langchain chain instrumentation. No response message event will be captured. Report this issue to New Relic Support.\n%s",
711-
traceback.format_exception(*sys.exc_info()),
712-
)
707+
output_message_list = [response[0]] if response else []
708+
except:
709+
try:
710+
output_message_list = [str(response)]
711+
except Exception as e:
712+
_logger.warning(
713+
"Unable to capture response inside langchain chain instrumentation. No response message event will be captured. Report this issue to New Relic Support.\n%s",
714+
traceback.format_exception(*sys.exc_info()),
715+
)
713716

714717
# Make sure the builtin attributes take precedence over metadata attributes.
715718
full_chat_completion_summary_dict = {f"metadata.{key}": value for key, value in metadata.items()}

tests/mlmodel_langchain/_mock_external_openai_server.py

+36
Original file line numberDiff line numberDiff line change
@@ -381,6 +381,42 @@
381381
"system_fingerprint": None,
382382
},
383383
],
384+
"You are a helpful assistant who generates a random first name. A user will pass in a first letter, and you should generate a name that starts with that first letter.": [
385+
{
386+
"Content-Type": "application/json",
387+
"openai-model": "gpt-3.5-turbo-0613",
388+
"openai-organization": "foobar-jtbczk",
389+
"openai-processing-ms": "488",
390+
"openai-version": "2020-10-01",
391+
"x-ratelimit-limit-requests": "200",
392+
"x-ratelimit-limit-tokens": "40000",
393+
"x-ratelimit-limit-tokens_usage_based": "40000",
394+
"x-ratelimit-remaining-requests": "199",
395+
"x-ratelimit-remaining-tokens": "39921",
396+
"x-ratelimit-remaining-tokens_usage_based": "39921",
397+
"x-ratelimit-reset-requests": "7m12s",
398+
"x-ratelimit-reset-tokens": "118ms",
399+
"x-ratelimit-reset-tokens_usage_based": "118ms",
400+
"x-request-id": "f3de99e17ccc360430cffa243b74dcbd",
401+
},
402+
200,
403+
{
404+
"id": "chatcmpl-8XEjOPNHth7yS2jt1You3fEwB6w9i",
405+
"object": "chat.completion",
406+
"created": 1702932142,
407+
"model": "gpt-3.5-turbo-0613",
408+
"choices": [
409+
{
410+
"index": 0,
411+
"message": {"role": "assistant", "content": "Milo"},
412+
"logprobs": None,
413+
"finish_reason": "stop",
414+
}
415+
],
416+
"usage": {"prompt_tokens": 60, "completion_tokens": 9, "total_tokens": 69},
417+
"system_fingerprint": None,
418+
},
419+
],
384420
"9906": [
385421
{
386422
"content-type": "application/json",

tests/mlmodel_langchain/test_chain.py

+84-1
Original file line numberDiff line numberDiff line change
@@ -532,7 +532,7 @@
532532
"ingest_source": "Python",
533533
"is_response": True,
534534
"virtual_llm": True,
535-
"content": "`",
535+
"content": "```html\n<!DOCTYPE html>\n<html>\n<head>\n <title>Math Quiz</title>\n</head>\n<body>\n <h2>Math Quiz Questions</h2>\n <ol>\n <li>What is the result of 5 + 3?</li>\n <ul>\n <li>A) 7</li>\n <li>B) 8</li>\n <li>C) 9</li>\n <li>D) 10</li>\n </ul>\n <li>What is the product of 6 x 7?</li>\n <ul>\n <li>A) 36</li>\n <li>B) 42</li>\n <li>C) 48</li>\n <li>D) 56</li>\n </ul>\n <li>What is the square root of 64?</li>\n <ul>\n <li>A) 6</li>\n <li>B) 7</li>\n <li>C) 8</li>\n <li>D) 9</li>\n </ul>\n <li>What is the result of 12 / 4?</li>\n <ul>\n <li>A) 2</li>\n <li>B) 3</li>\n <li>C) 4</li>\n <li>D) 5</li>\n </ul>\n <li>What is the sum of 15 + 9?</li>\n <ul>\n <li>A) 22</li>\n <li>B) 23</li>\n <li>C) 24</li>\n <li>D) 25</li>\n </ul>\n </ol>\n</body>\n</html>\n```",
536536
},
537537
],
538538
[
@@ -553,6 +553,60 @@
553553
],
554554
]
555555

556+
chat_completion_recorded_events_str_response = [
557+
(
558+
{"type": "LlmChatCompletionSummary"},
559+
{
560+
"id": None,
561+
"llm.conversation_id": "my-awesome-id",
562+
"llm.foo": "bar",
563+
"span_id": None,
564+
"trace_id": "trace-id",
565+
"vendor": "langchain",
566+
"ingest_source": "Python",
567+
"virtual_llm": True,
568+
"request_id": None,
569+
"duration": None,
570+
"response.number_of_messages": 2,
571+
"metadata.id": "123",
572+
},
573+
),
574+
(
575+
{"type": "LlmChatCompletionMessage"},
576+
{
577+
"id": None,
578+
"llm.conversation_id": "my-awesome-id",
579+
"llm.foo": "bar",
580+
"request_id": None,
581+
"span_id": None,
582+
"trace_id": "trace-id",
583+
"content": "{'text': 'M'}",
584+
"completion_id": None,
585+
"sequence": 0,
586+
"vendor": "langchain",
587+
"ingest_source": "Python",
588+
"virtual_llm": True,
589+
},
590+
),
591+
(
592+
{"type": "LlmChatCompletionMessage"},
593+
{
594+
"id": None,
595+
"llm.conversation_id": "my-awesome-id",
596+
"llm.foo": "bar",
597+
"request_id": None,
598+
"span_id": None,
599+
"trace_id": "trace-id",
600+
"content": "Milo",
601+
"completion_id": None,
602+
"sequence": 1,
603+
"vendor": "langchain",
604+
"ingest_source": "Python",
605+
"is_response": True,
606+
"virtual_llm": True,
607+
},
608+
),
609+
]
556610
chat_completion_recorded_events_list_response = [
557611
(
558612
{"type": "LlmChatCompletionSummary"},
@@ -682,6 +736,35 @@
682736
]
683737

684738

739+
@reset_core_stats_engine()
740+
@validate_custom_events(events_with_context_attrs(chat_completion_recorded_events_str_response))
741+
@validate_custom_event_count(count=7)
742+
@validate_transaction_metrics(
743+
name="test_chain:test_langchain_chain_str_response",
744+
scoped_metrics=[("Llm/chain/LangChain/invoke", 1)],
745+
rollup_metrics=[("Llm/chain/LangChain/invoke", 1)],
746+
custom_metrics=[(f"Supportability/Python/ML/LangChain/{langchain.__version__}", 1)],
747+
background_task=True,
748+
)
749+
@background_task()
750+
def test_langchain_chain_str_response(set_trace_info, chat_openai_client):
751+
set_trace_info()
752+
add_custom_attribute("llm.conversation_id", "my-awesome-id")
753+
add_custom_attribute("llm.foo", "bar")
754+
add_custom_attribute("non_llm_attr", "python-agent")
755+
756+
template = """You are a helpful assistant who generates a random first name. A user will pass in a first letter, and you should generate a name that starts with that first letter."""
757+
human_template = "{text}"
758+
759+
chat_prompt = langchain_core.prompts.ChatPromptTemplate.from_messages(
760+
[("system", template), ("human", human_template)]
761+
)
762+
str_output_parser = langchain_core.output_parsers.string.StrOutputParser()
763+
chain = chat_prompt | chat_openai_client | str_output_parser
764+
with WithLlmCustomAttributes({"context": "attr"}):
765+
chain.invoke({"text": "M"}, config={"metadata": {"id": "123"}})
766+
767+
685768
@reset_core_stats_engine()
686769
@validate_custom_events(events_with_context_attrs(chat_completion_recorded_events_list_response))
687770
@validate_custom_event_count(count=7)

0 commit comments

Comments
 (0)