|
| 1 | +from typing import Optional, Any, Union, Dict, Mapping |
| 2 | + |
| 3 | +from langfuse.client import ( |
| 4 | + Langfuse, |
| 5 | +) |
| 6 | +from langfuse.model import ModelUsage |
| 7 | + |
| 8 | + |
| 9 | +try: |
| 10 | + from llama_index.core.base.llms.types import ( |
| 11 | + ChatResponse, |
| 12 | + CompletionResponse, |
| 13 | + ) |
| 14 | + from llama_index.core.instrumentation.events import BaseEvent |
| 15 | + from llama_index.core.instrumentation.events.embedding import ( |
| 16 | + EmbeddingStartEvent, |
| 17 | + EmbeddingEndEvent, |
| 18 | + ) |
| 19 | + from llama_index.core.instrumentation.event_handlers import BaseEventHandler |
| 20 | + from llama_index.core.instrumentation.events.llm import ( |
| 21 | + LLMCompletionEndEvent, |
| 22 | + LLMCompletionStartEvent, |
| 23 | + LLMChatEndEvent, |
| 24 | + LLMChatStartEvent, |
| 25 | + ) |
| 26 | + from llama_index.core.utilities.token_counting import TokenCounter |
| 27 | + |
| 28 | +except ImportError: |
| 29 | + raise ModuleNotFoundError( |
| 30 | + "Please install llama-index to use the Langfuse llama-index integration: 'pip install llama-index'" |
| 31 | + ) |
| 32 | + |
| 33 | +from logging import getLogger |
| 34 | + |
| 35 | +logger = getLogger(__name__) |
| 36 | + |
| 37 | + |
| 38 | +class LlamaIndexEventHandler(BaseEventHandler, extra="allow"): |
| 39 | + def __init__( |
| 40 | + self, |
| 41 | + *, |
| 42 | + langfuse_client: Langfuse, |
| 43 | + observation_updates: Dict[str, Dict[str, Any]], |
| 44 | + ): |
| 45 | + super().__init__() |
| 46 | + |
| 47 | + self._langfuse = langfuse_client |
| 48 | + self._observation_updates = observation_updates |
| 49 | + self._token_counter = TokenCounter() |
| 50 | + |
| 51 | + @classmethod |
| 52 | + def class_name(cls) -> str: |
| 53 | + """Class name.""" |
| 54 | + return "LlamaIndexEventHandler" |
| 55 | + |
| 56 | + def handle(self, event: BaseEvent) -> None: |
| 57 | + logger.debug(f"Event {type(event).__name__} received: {event}") |
| 58 | + |
| 59 | + if isinstance( |
| 60 | + event, (LLMCompletionStartEvent, LLMChatStartEvent, EmbeddingStartEvent) |
| 61 | + ): |
| 62 | + self.update_generation_from_start_event(event) |
| 63 | + elif isinstance( |
| 64 | + event, (LLMCompletionEndEvent, LLMChatEndEvent, EmbeddingEndEvent) |
| 65 | + ): |
| 66 | + self.update_generation_from_end_event(event) |
| 67 | + |
| 68 | + def update_generation_from_start_event( |
| 69 | + self, |
| 70 | + event: Union[LLMCompletionStartEvent, LLMChatStartEvent, EmbeddingStartEvent], |
| 71 | + ) -> None: |
| 72 | + if event.span_id is None: |
| 73 | + logger.warning("Span ID is not set") |
| 74 | + return |
| 75 | + |
| 76 | + model_data = event.model_dict |
| 77 | + model = model_data.pop("model", None) or model_data.pop("model_name", None) |
| 78 | + traced_model_data = { |
| 79 | + k: str(v) |
| 80 | + for k, v in model_data.items() |
| 81 | + if v is not None |
| 82 | + and k |
| 83 | + in [ |
| 84 | + "max_tokens", |
| 85 | + "max_retries", |
| 86 | + "temperature", |
| 87 | + "timeout", |
| 88 | + "strict", |
| 89 | + "top_logprobs", |
| 90 | + "logprobs", |
| 91 | + "embed_batch_size", |
| 92 | + ] |
| 93 | + } |
| 94 | + |
| 95 | + self._update_observation_updates( |
| 96 | + event.span_id, model=model, model_parameters=traced_model_data |
| 97 | + ) |
| 98 | + |
| 99 | + def update_generation_from_end_event( |
| 100 | + self, event: Union[LLMCompletionEndEvent, LLMChatEndEvent, EmbeddingEndEvent] |
| 101 | + ) -> None: |
| 102 | + if event.span_id is None: |
| 103 | + logger.warning("Span ID is not set") |
| 104 | + return |
| 105 | + |
| 106 | + usage = None |
| 107 | + |
| 108 | + if isinstance(event, (LLMCompletionEndEvent, LLMChatEndEvent)): |
| 109 | + usage = self._parse_token_usage(event.response) if event.response else None |
| 110 | + |
| 111 | + if isinstance(event, EmbeddingEndEvent): |
| 112 | + token_count = sum( |
| 113 | + self._token_counter.get_string_tokens(chunk) for chunk in event.chunks |
| 114 | + ) |
| 115 | + |
| 116 | + usage = { |
| 117 | + "input": 0, |
| 118 | + "output": 0, |
| 119 | + "total": token_count or None, |
| 120 | + } |
| 121 | + |
| 122 | + self._update_observation_updates(event.span_id, usage=usage) |
| 123 | + |
| 124 | + def _update_observation_updates(self, id_: str, **kwargs) -> None: |
| 125 | + if id_ not in self._observation_updates: |
| 126 | + return |
| 127 | + |
| 128 | + self._observation_updates[id_].update(kwargs) |
| 129 | + |
| 130 | + def _parse_token_usage( |
| 131 | + self, response: Union[ChatResponse, CompletionResponse] |
| 132 | + ) -> Optional[ModelUsage]: |
| 133 | + if ( |
| 134 | + (raw := getattr(response, "raw", None)) |
| 135 | + and hasattr(raw, "get") |
| 136 | + and (usage := raw.get("usage")) |
| 137 | + ): |
| 138 | + return _parse_usage_from_mapping(usage) |
| 139 | + |
| 140 | + if additional_kwargs := getattr(response, "additional_kwargs", None): |
| 141 | + return _parse_usage_from_mapping(additional_kwargs) |
| 142 | + |
| 143 | + |
| 144 | +def _parse_usage_from_mapping( |
| 145 | + usage: Union[object, Mapping[str, Any]], |
| 146 | +) -> ModelUsage: |
| 147 | + if isinstance(usage, Mapping): |
| 148 | + return _get_token_counts_from_mapping(usage) |
| 149 | + |
| 150 | + return _parse_usage_from_object(usage) |
| 151 | + |
| 152 | + |
| 153 | +def _parse_usage_from_object(usage: object) -> ModelUsage: |
| 154 | + model_usage: ModelUsage = { |
| 155 | + "unit": None, |
| 156 | + "input": None, |
| 157 | + "output": None, |
| 158 | + "total": None, |
| 159 | + "input_cost": None, |
| 160 | + "output_cost": None, |
| 161 | + "total_cost": None, |
| 162 | + } |
| 163 | + |
| 164 | + if (prompt_tokens := getattr(usage, "prompt_tokens", None)) is not None: |
| 165 | + model_usage["input"] = prompt_tokens |
| 166 | + if (completion_tokens := getattr(usage, "completion_tokens", None)) is not None: |
| 167 | + model_usage["output"] = completion_tokens |
| 168 | + if (total_tokens := getattr(usage, "total_tokens", None)) is not None: |
| 169 | + model_usage["total"] = total_tokens |
| 170 | + |
| 171 | + return model_usage |
| 172 | + |
| 173 | + |
| 174 | +def _get_token_counts_from_mapping( |
| 175 | + usage_mapping: Mapping[str, Any], |
| 176 | +) -> ModelUsage: |
| 177 | + model_usage: ModelUsage = { |
| 178 | + "unit": None, |
| 179 | + "input": None, |
| 180 | + "output": None, |
| 181 | + "total": None, |
| 182 | + "input_cost": None, |
| 183 | + "output_cost": None, |
| 184 | + "total_cost": None, |
| 185 | + } |
| 186 | + if (prompt_tokens := usage_mapping.get("prompt_tokens")) is not None: |
| 187 | + model_usage["input"] = prompt_tokens |
| 188 | + if (completion_tokens := usage_mapping.get("completion_tokens")) is not None: |
| 189 | + model_usage["output"] = completion_tokens |
| 190 | + if (total_tokens := usage_mapping.get("total_tokens")) is not None: |
| 191 | + model_usage["total"] = total_tokens |
| 192 | + |
| 193 | + return model_usage |
0 commit comments