-
Notifications
You must be signed in to change notification settings - Fork 262
Expand file tree
/
Copy pathtest_langchain.py
More file actions
251 lines (210 loc) · 7.92 KB
/
test_langchain.py
File metadata and controls
251 lines (210 loc) · 7.92 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
from contextvars import copy_context
from unittest.mock import patch
from uuid import uuid4
import pytest
from langchain.messages import HumanMessage
from langchain_core.messages import AIMessage
from langchain_core.output_parsers import StrOutputParser
from langchain_core.outputs import ChatGeneration, ChatResult, Generation, LLMResult
from langchain_core.prompts import ChatPromptTemplate
from langchain_openai import ChatOpenAI, OpenAI
from langfuse._client.attributes import LangfuseOtelSpanAttributes
from langfuse.langchain import CallbackHandler
def _assert_parent_child(parent_span, child_span) -> None:
assert child_span.parent is not None
assert child_span.parent.span_id == parent_span.context.span_id
def test_chat_model_callback_exports_generation_span(
langfuse_memory_client, get_span, json_attr
):
response = ChatResult(
generations=[
ChatGeneration(message=AIMessage(content="bonjour"), text="bonjour")
],
llm_output={
"token_usage": {
"prompt_tokens": 4,
"completion_tokens": 2,
"total_tokens": 6,
},
"model_name": "gpt-4o-mini",
},
)
with patch.object(ChatOpenAI, "_generate", return_value=response):
handler = CallbackHandler()
with langfuse_memory_client.start_as_current_observation(name="parent"):
ChatOpenAI(api_key="test", temperature=0).invoke(
[HumanMessage(content="hello")],
config={"callbacks": [handler]},
)
langfuse_memory_client.flush()
parent_span = get_span("parent")
generation_span = get_span("ChatOpenAI")
_assert_parent_child(parent_span, generation_span)
assert (
generation_span.attributes[LangfuseOtelSpanAttributes.OBSERVATION_TYPE]
== "generation"
)
assert json_attr(generation_span, LangfuseOtelSpanAttributes.OBSERVATION_INPUT) == [
{"role": "user", "content": "hello"}
]
assert json_attr(
generation_span, LangfuseOtelSpanAttributes.OBSERVATION_OUTPUT
) == {
"role": "assistant",
"content": "bonjour",
}
assert (
generation_span.attributes[LangfuseOtelSpanAttributes.OBSERVATION_MODEL]
== "gpt-4o-mini"
)
assert json_attr(
generation_span, LangfuseOtelSpanAttributes.OBSERVATION_USAGE_DETAILS
) == {
"prompt_tokens": 4,
"completion_tokens": 2,
"total_tokens": 6,
}
def test_llm_callback_exports_generation_span(langfuse_memory_client, get_span):
response = LLMResult(
generations=[[Generation(text="sockzilla")]],
llm_output={
"token_usage": {
"prompt_tokens": 7,
"completion_tokens": 3,
"total_tokens": 10,
},
"model_name": "gpt-4o-mini-instruct",
},
)
with patch.object(OpenAI, "_generate", return_value=response):
handler = CallbackHandler()
with langfuse_memory_client.start_as_current_observation(name="parent"):
OpenAI(api_key="test", temperature=0).invoke(
"name a sock company",
config={"callbacks": [handler], "run_name": "sock-name"},
)
langfuse_memory_client.flush()
span = get_span("sock-name")
assert span.attributes[LangfuseOtelSpanAttributes.OBSERVATION_OUTPUT] == "sockzilla"
assert (
span.attributes[LangfuseOtelSpanAttributes.OBSERVATION_MODEL]
== "gpt-4o-mini-instruct"
)
def test_lcel_chain_exports_intermediate_chain_spans(
langfuse_memory_client, get_span, find_spans
):
response = ChatResult(
generations=[
ChatGeneration(
message=AIMessage(content="knock knock"),
text="knock knock",
)
],
llm_output={
"token_usage": {
"prompt_tokens": 4,
"completion_tokens": 2,
"total_tokens": 6,
},
"model_name": "gpt-4o-mini",
},
)
with patch.object(ChatOpenAI, "_generate", return_value=response):
handler = CallbackHandler()
prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")
chain = prompt | ChatOpenAI(api_key="test", temperature=0) | StrOutputParser()
with langfuse_memory_client.start_as_current_observation(name="parent"):
result = chain.invoke({"topic": "otters"}, config={"callbacks": [handler]})
assert result == "knock knock"
langfuse_memory_client.flush()
sequence_span = get_span("RunnableSequence")
prompt_span = get_span("ChatPromptTemplate")
generation_span = get_span("ChatOpenAI")
parser_span = get_span("StrOutputParser")
_assert_parent_child(sequence_span, prompt_span)
_assert_parent_child(sequence_span, generation_span)
_assert_parent_child(sequence_span, parser_span)
assert len(find_spans("ChatOpenAI")) == 1
def test_chat_model_error_marks_generation_error(langfuse_memory_client, get_span):
with patch.object(ChatOpenAI, "_generate", side_effect=RuntimeError("boom")):
handler = CallbackHandler()
with langfuse_memory_client.start_as_current_observation(name="parent"):
with pytest.raises(RuntimeError, match="boom"):
ChatOpenAI(api_key="test", temperature=0).invoke(
[HumanMessage(content="hello")],
config={"callbacks": [handler]},
)
langfuse_memory_client.flush()
span = get_span("ChatOpenAI")
assert span.attributes[LangfuseOtelSpanAttributes.OBSERVATION_LEVEL] == "ERROR"
assert (
"boom" in span.attributes[LangfuseOtelSpanAttributes.OBSERVATION_STATUS_MESSAGE]
)
def test_root_chain_metadata_propagates_trace_name(
langfuse_memory_client, get_span, find_spans
):
response = ChatResult(
generations=[
ChatGeneration(
message=AIMessage(content="knock knock"),
text="knock knock",
)
],
llm_output={
"token_usage": {
"prompt_tokens": 4,
"completion_tokens": 2,
"total_tokens": 6,
},
"model_name": "gpt-4o-mini",
},
)
with patch.object(ChatOpenAI, "_generate", return_value=response):
handler = CallbackHandler()
prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}")
chain = prompt | ChatOpenAI(api_key="test", temperature=0) | StrOutputParser()
result = chain.invoke(
{"topic": "otters"},
config={
"callbacks": [handler],
"metadata": {"langfuse_trace_name": "langchain-trace-name"},
},
)
assert result == "knock knock"
langfuse_memory_client.flush()
root_span = get_span("RunnableSequence")
generation_span = get_span("ChatOpenAI")
assert (
root_span.attributes[LangfuseOtelSpanAttributes.TRACE_NAME]
== "langchain-trace-name"
)
assert (
generation_span.attributes[LangfuseOtelSpanAttributes.TRACE_NAME]
== "langchain-trace-name"
)
assert (
f"{LangfuseOtelSpanAttributes.OBSERVATION_METADATA}.langfuse_trace_name"
not in root_span.attributes
)
assert len(find_spans("ChatOpenAI")) == 1
def test_root_chain_exports_when_end_runs_in_copied_context(
langfuse_memory_client, get_span
):
handler = CallbackHandler()
run_id = uuid4()
handler.on_chain_start(
{"id": ["RunnableSequence"]},
{"topic": "otters"},
run_id=run_id,
metadata={"langfuse_trace_name": "async-root-trace"},
)
copy_context().run(
handler.on_chain_end,
{"output": "knock knock"},
run_id=run_id,
)
langfuse_memory_client.flush()
root_span = get_span("RunnableSequence")
assert root_span.attributes[LangfuseOtelSpanAttributes.TRACE_NAME] == (
"async-root-trace"
)