|
| 1 | +from contextvars import copy_context |
1 | 2 | from unittest.mock import patch |
| 3 | +from uuid import uuid4 |
2 | 4 |
|
3 | 5 | import pytest |
4 | 6 | from langchain.messages import HumanMessage |
@@ -166,3 +168,84 @@ def test_chat_model_error_marks_generation_error(langfuse_memory_client, get_spa |
166 | 168 | assert ( |
167 | 169 | "boom" in span.attributes[LangfuseOtelSpanAttributes.OBSERVATION_STATUS_MESSAGE] |
168 | 170 | ) |
| 171 | + |
| 172 | + |
| 173 | +def test_root_chain_metadata_propagates_trace_name( |
| 174 | + langfuse_memory_client, get_span, find_spans |
| 175 | +): |
| 176 | + response = ChatResult( |
| 177 | + generations=[ |
| 178 | + ChatGeneration( |
| 179 | + message=AIMessage(content="knock knock"), |
| 180 | + text="knock knock", |
| 181 | + ) |
| 182 | + ], |
| 183 | + llm_output={ |
| 184 | + "token_usage": { |
| 185 | + "prompt_tokens": 4, |
| 186 | + "completion_tokens": 2, |
| 187 | + "total_tokens": 6, |
| 188 | + }, |
| 189 | + "model_name": "gpt-4o-mini", |
| 190 | + }, |
| 191 | + ) |
| 192 | + |
| 193 | + with patch.object(ChatOpenAI, "_generate", return_value=response): |
| 194 | + handler = CallbackHandler() |
| 195 | + prompt = ChatPromptTemplate.from_template("tell me a joke about {topic}") |
| 196 | + chain = prompt | ChatOpenAI(api_key="test", temperature=0) | StrOutputParser() |
| 197 | + |
| 198 | + result = chain.invoke( |
| 199 | + {"topic": "otters"}, |
| 200 | + config={ |
| 201 | + "callbacks": [handler], |
| 202 | + "metadata": {"langfuse_trace_name": "langchain-trace-name"}, |
| 203 | + }, |
| 204 | + ) |
| 205 | + |
| 206 | + assert result == "knock knock" |
| 207 | + |
| 208 | + langfuse_memory_client.flush() |
| 209 | + root_span = get_span("RunnableSequence") |
| 210 | + generation_span = get_span("ChatOpenAI") |
| 211 | + |
| 212 | + assert ( |
| 213 | + root_span.attributes[LangfuseOtelSpanAttributes.TRACE_NAME] |
| 214 | + == "langchain-trace-name" |
| 215 | + ) |
| 216 | + assert ( |
| 217 | + generation_span.attributes[LangfuseOtelSpanAttributes.TRACE_NAME] |
| 218 | + == "langchain-trace-name" |
| 219 | + ) |
| 220 | + assert ( |
| 221 | + f"{LangfuseOtelSpanAttributes.OBSERVATION_METADATA}.langfuse_trace_name" |
| 222 | + not in root_span.attributes |
| 223 | + ) |
| 224 | + assert len(find_spans("ChatOpenAI")) == 1 |
| 225 | + |
| 226 | + |
| 227 | +def test_root_chain_exports_when_end_runs_in_copied_context( |
| 228 | + langfuse_memory_client, get_span |
| 229 | +): |
| 230 | + handler = CallbackHandler() |
| 231 | + run_id = uuid4() |
| 232 | + |
| 233 | + handler.on_chain_start( |
| 234 | + {"id": ["RunnableSequence"]}, |
| 235 | + {"topic": "otters"}, |
| 236 | + run_id=run_id, |
| 237 | + metadata={"langfuse_trace_name": "async-root-trace"}, |
| 238 | + ) |
| 239 | + |
| 240 | + copy_context().run( |
| 241 | + handler.on_chain_end, |
| 242 | + {"output": "knock knock"}, |
| 243 | + run_id=run_id, |
| 244 | + ) |
| 245 | + |
| 246 | + langfuse_memory_client.flush() |
| 247 | + root_span = get_span("RunnableSequence") |
| 248 | + |
| 249 | + assert root_span.attributes[LangfuseOtelSpanAttributes.TRACE_NAME] == ( |
| 250 | + "async-root-trace" |
| 251 | + ) |
0 commit comments