diff --git a/newrelic/core/custom_event.py b/newrelic/core/custom_event.py index 9bf5f75eda..c960a0afa2 100644 --- a/newrelic/core/custom_event.py +++ b/newrelic/core/custom_event.py @@ -141,7 +141,7 @@ def create_custom_event(event_type, params, settings=None, is_ml_event=False): ) return None - intrinsics = {"type": name, "timestamp": int(1000.0 * time.time())} + intrinsics = {"type": name, "timestamp": params.get("timestamp") or int(1000.0 * time.time())} event = [intrinsics, attributes] return event diff --git a/newrelic/hooks/external_aiobotocore.py b/newrelic/hooks/external_aiobotocore.py index 15daa7bd6d..1dbb2f2816 100644 --- a/newrelic/hooks/external_aiobotocore.py +++ b/newrelic/hooks/external_aiobotocore.py @@ -98,6 +98,7 @@ async def wrap_client__make_api_call(wrapped, instance, args, kwargs): response_extractor = getattr(instance, "_nr_response_extractor", None) stream_extractor = getattr(instance, "_nr_stream_extractor", None) response_streaming = getattr(instance, "_nr_response_streaming", False) + request_timestamp = getattr(instance, "_nr_request_timestamp", None) is_converse = getattr(instance, "_nr_is_converse", False) ft = getattr(instance, "_nr_ft", None) @@ -125,6 +126,7 @@ async def wrap_client__make_api_call(wrapped, instance, args, kwargs): transaction, bedrock_args, is_converse, + request_timestamp, ) raise @@ -187,7 +189,9 @@ async def wrap_client__make_api_call(wrapped, instance, args, kwargs): if ft: ft.__exit__(None, None, None) bedrock_attrs["duration"] = ft.duration * 1000 - run_bedrock_response_extractor(response_extractor, response_body, bedrock_attrs, is_embedding, transaction) + run_bedrock_response_extractor( + response_extractor, response_body, bedrock_attrs, is_embedding, transaction, request_timestamp + ) except Exception: _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, exc_info=True) diff --git a/newrelic/hooks/external_botocore.py b/newrelic/hooks/external_botocore.py index e00e50b770..d481ce8450 100644 --- a/newrelic/hooks/external_botocore.py +++ b/newrelic/hooks/external_botocore.py @@ -17,6 +17,7 @@ import logging import re import sys +import time import uuid from io import BytesIO @@ -193,6 +194,7 @@ def create_chat_completion_message_event( request_id, llm_metadata_dict, response_id=None, + request_timestamp=None, ): if not transaction: return @@ -227,6 +229,8 @@ def create_chat_completion_message_event( if settings.ai_monitoring.record_content.enabled: chat_completion_message_dict["content"] = content + if request_timestamp: + chat_completion_message_dict["timestamp"] = request_timestamp chat_completion_message_dict.update(llm_metadata_dict) @@ -266,6 +270,8 @@ def create_chat_completion_message_event( if settings.ai_monitoring.record_content.enabled: chat_completion_message_dict["content"] = content + if request_timestamp: + chat_completion_message_dict["timestamp"] = request_timestamp chat_completion_message_dict.update(llm_metadata_dict) @@ -542,10 +548,22 @@ def extract_bedrock_cohere_model_streaming_response(response_body, bedrock_attrs def handle_bedrock_exception( - exc, is_embedding, model, span_id, trace_id, request_extractor, request_body, ft, transaction, kwargs, is_converse + exc, + is_embedding, + model, + span_id, + trace_id, + request_extractor, + request_body, + ft, + transaction, + kwargs, + is_converse, + request_timestamp=None, ): try: bedrock_attrs = {"model": model, "span_id": span_id, "trace_id": trace_id} + if is_converse: try: input_message_list = [ @@ -589,12 +607,14 @@ def handle_bedrock_exception( if is_embedding: handle_embedding_event(transaction, error_attributes) else: - handle_chat_completion_event(transaction, error_attributes) + handle_chat_completion_event(transaction, error_attributes, request_timestamp) except Exception: _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE, exc_info=True) -def run_bedrock_response_extractor(response_extractor, response_body, bedrock_attrs, is_embedding, transaction): +def run_bedrock_response_extractor( + response_extractor, response_body, bedrock_attrs, is_embedding, transaction, request_timestamp=None +): # Run response extractor for non-streaming responses try: response_extractor(response_body, bedrock_attrs) @@ -604,7 +624,7 @@ def run_bedrock_response_extractor(response_extractor, response_body, bedrock_at if is_embedding: handle_embedding_event(transaction, bedrock_attrs) else: - handle_chat_completion_event(transaction, bedrock_attrs) + handle_chat_completion_event(transaction, bedrock_attrs, request_timestamp) def run_bedrock_request_extractor(request_extractor, request_body, bedrock_attrs): @@ -628,6 +648,8 @@ def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): if not settings.ai_monitoring.enabled: return wrapped(*args, **kwargs) + request_timestamp = int(1000.0 * time.time()) + transaction.add_ml_model_info("Bedrock", BOTOCORE_VERSION) transaction._add_agent_attribute("llm", True) @@ -683,6 +705,7 @@ def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): instance._nr_ft = ft instance._nr_response_streaming = response_streaming instance._nr_settings = settings + instance._nr_request_timestamp = request_timestamp # Add a bedrock flag to instance so we can determine when make_api_call instrumentation is hit from non-Bedrock paths and bypass it if so instance._nr_is_bedrock = True @@ -703,6 +726,7 @@ def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): transaction, kwargs, is_converse=False, + request_timestamp=request_timestamp, ) raise @@ -733,6 +757,8 @@ def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): run_bedrock_request_extractor(request_extractor, request_body, bedrock_attrs) try: + bedrock_attrs.pop("timestamp", None) # The request timestamp is only needed for request extraction + if response_streaming: # Wrap EventStream object here to intercept __iter__ method instead of instrumenting class. # This class is used in numerous other services in botocore, and would cause conflicts. @@ -748,7 +774,14 @@ def _wrap_bedrock_runtime_invoke_model(wrapped, instance, args, kwargs): bedrock_attrs["duration"] = ft.duration * 1000 response["body"] = StreamingBody(BytesIO(response_body), len(response_body)) - run_bedrock_response_extractor(response_extractor, response_body, bedrock_attrs, is_embedding, transaction) + run_bedrock_response_extractor( + response_extractor, + response_body, + bedrock_attrs, + is_embedding, + transaction, + request_timestamp=request_timestamp, + ) except Exception: _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, exc_info=True) @@ -770,6 +803,8 @@ def _wrap_bedrock_runtime_converse(wrapped, instance, args, kwargs): if not settings.ai_monitoring.enabled: return wrapped(*args, **kwargs) + request_timestamp = int(1000.0 * time.time()) + transaction.add_ml_model_info("Bedrock", BOTOCORE_VERSION) transaction._add_agent_attribute("llm", True) @@ -800,6 +835,7 @@ def _wrap_bedrock_runtime_converse(wrapped, instance, args, kwargs): instance._nr_ft = ft instance._nr_response_streaming = response_streaming instance._nr_settings = settings + instance._nr_request_timestamp = request_timestamp instance._nr_is_converse = True # Add a bedrock flag to instance so we can determine when make_api_call instrumentation is hit from non-Bedrock paths and bypass it if so @@ -810,7 +846,18 @@ def _wrap_bedrock_runtime_converse(wrapped, instance, args, kwargs): response = wrapped(*args, **kwargs) except Exception as exc: handle_bedrock_exception( - exc, False, model, span_id, trace_id, request_extractor, {}, ft, transaction, kwargs, is_converse=True + exc, + False, + model, + span_id, + trace_id, + request_extractor, + {}, + ft, + transaction, + kwargs, + is_converse=True, + request_timestamp=request_timestamp, ) raise @@ -824,6 +871,7 @@ def _wrap_bedrock_runtime_converse(wrapped, instance, args, kwargs): response_headers = response.get("ResponseMetadata", {}).get("HTTPHeaders") or {} bedrock_attrs = extract_bedrock_converse_attrs(kwargs, response, response_headers, model, span_id, trace_id) + bedrock_attrs["timestamp"] = request_timestamp try: if response_streaming: @@ -838,7 +886,9 @@ def _wrap_bedrock_runtime_converse(wrapped, instance, args, kwargs): ft.__exit__(None, None, None) bedrock_attrs["duration"] = ft.duration * 1000 - run_bedrock_response_extractor(response_extractor, {}, bedrock_attrs, False, transaction) + run_bedrock_response_extractor( + response_extractor, {}, bedrock_attrs, False, transaction, request_timestamp=request_timestamp + ) except Exception: _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, exc_info=True) @@ -888,7 +938,7 @@ def extract_bedrock_converse_attrs(kwargs, response, response_headers, model, sp class BedrockRecordEventMixin: - def record_events_on_stop_iteration(self, transaction): + def record_events_on_stop_iteration(self, transaction, request_timestamp=None): if hasattr(self, "_nr_ft"): bedrock_attrs = getattr(self, "_nr_bedrock_attrs", {}) self._nr_ft.__exit__(None, None, None) @@ -899,14 +949,14 @@ def record_events_on_stop_iteration(self, transaction): try: bedrock_attrs["duration"] = self._nr_ft.duration * 1000 - handle_chat_completion_event(transaction, bedrock_attrs) + handle_chat_completion_event(transaction, bedrock_attrs, request_timestamp) except Exception: _logger.warning(RESPONSE_PROCESSING_FAILURE_LOG_MESSAGE, exc_info=True) # Clear cached data as this can be very large. self._nr_bedrock_attrs.clear() - def record_error(self, transaction, exc): + def record_error(self, transaction, exc, request_timestamp=None): if hasattr(self, "_nr_ft"): try: ft = self._nr_ft @@ -929,24 +979,24 @@ def record_error(self, transaction, exc): ft.__exit__(*sys.exc_info()) error_attributes["duration"] = ft.duration * 1000 - handle_chat_completion_event(transaction, error_attributes) + handle_chat_completion_event(transaction, error_attributes, request_timestamp) # Clear cached data as this can be very large. error_attributes.clear() except Exception: _logger.warning(EXCEPTION_HANDLING_FAILURE_LOG_MESSAGE, exc_info=True) - def record_stream_chunk(self, event, transaction): + def record_stream_chunk(self, event, transaction, request_timestamp=None): if event: try: if getattr(self, "_nr_is_converse", False): return self.converse_record_stream_chunk(event, transaction) else: - return self.invoke_record_stream_chunk(event, transaction) + return self.invoke_record_stream_chunk(event, transaction, request_timestamp) except Exception: _logger.warning(RESPONSE_EXTRACTOR_FAILURE_LOG_MESSAGE, exc_info=True) - def invoke_record_stream_chunk(self, event, transaction): + def invoke_record_stream_chunk(self, event, transaction, request_timestamp=None): bedrock_attrs = getattr(self, "_nr_bedrock_attrs", {}) chunk = json.loads(event["chunk"]["bytes"].decode("utf-8")) self._nr_model_extractor(chunk, bedrock_attrs) @@ -954,7 +1004,7 @@ def invoke_record_stream_chunk(self, event, transaction): # So we need to call the record events here since stop iteration will not be raised. _type = chunk.get("type") if _type == "content_block_stop": - self.record_events_on_stop_iteration(transaction) + self.record_events_on_stop_iteration(transaction, request_timestamp) def converse_record_stream_chunk(self, event, transaction): bedrock_attrs = getattr(self, "_nr_bedrock_attrs", {}) @@ -984,6 +1034,7 @@ def __iter__(self): class GeneratorProxy(BedrockRecordEventMixin, ObjectProxy): def __init__(self, wrapped): super().__init__(wrapped) + self._nr_request_timestamp = int(1000.0 * time.time()) def __iter__(self): return self @@ -996,12 +1047,12 @@ def __next__(self): return_val = None try: return_val = self.__wrapped__.__next__() - self.record_stream_chunk(return_val, transaction) + self.record_stream_chunk(return_val, transaction, self._nr_request_timestamp) except StopIteration: - self.record_events_on_stop_iteration(transaction) + self.record_events_on_stop_iteration(transaction, self._nr_request_timestamp) raise except Exception as exc: - self.record_error(transaction, exc) + self.record_error(transaction, exc, self._nr_request_timestamp) raise return return_val @@ -1020,6 +1071,10 @@ def __aiter__(self): class AsyncGeneratorProxy(BedrockRecordEventMixin, ObjectProxy): + def __init__(self, wrapped): + super().__init__(wrapped) + self._nr_request_timestamp = int(1000.0 * time.time()) + def __aiter__(self): return self @@ -1030,12 +1085,12 @@ async def __anext__(self): return_val = None try: return_val = await self.__wrapped__.__anext__() - self.record_stream_chunk(return_val, transaction) + self.record_stream_chunk(return_val, transaction, self._nr_request_timestamp) except StopAsyncIteration: - self.record_events_on_stop_iteration(transaction) + self.record_events_on_stop_iteration(transaction, self._nr_request_timestamp) raise except Exception as exc: - self.record_error(transaction, exc) + self.record_error(transaction, exc, self._nr_request_timestamp) raise return return_val @@ -1084,7 +1139,7 @@ def handle_embedding_event(transaction, bedrock_attrs): transaction.record_custom_event("LlmEmbedding", embedding_dict) -def handle_chat_completion_event(transaction, bedrock_attrs): +def handle_chat_completion_event(transaction, bedrock_attrs, request_timestamp=None): chat_completion_id = str(uuid.uuid4()) # Grab LLM-related custom attributes off of the transaction to store as metadata on LLM events custom_attrs_dict = transaction._custom_params @@ -1128,6 +1183,7 @@ def handle_chat_completion_event(transaction, bedrock_attrs): "response.number_of_messages": number_of_messages, "response.choices.finish_reason": bedrock_attrs.get("response.choices.finish_reason", None), "error": bedrock_attrs.get("error", None), + "timestamp": request_timestamp or None, } chat_completion_summary_dict.update(llm_metadata_dict) chat_completion_summary_dict = {k: v for k, v in chat_completion_summary_dict.items() if v is not None} @@ -1144,6 +1200,7 @@ def handle_chat_completion_event(transaction, bedrock_attrs): request_id=request_id, llm_metadata_dict=llm_metadata_dict, response_id=response_id, + request_timestamp=request_timestamp, ) diff --git a/newrelic/hooks/mlmodel_gemini.py b/newrelic/hooks/mlmodel_gemini.py index 8aeb1355d0..6fffbebb47 100644 --- a/newrelic/hooks/mlmodel_gemini.py +++ b/newrelic/hooks/mlmodel_gemini.py @@ -14,6 +14,7 @@ import logging import sys +import time import uuid import google @@ -226,6 +227,7 @@ def wrap_generate_content_sync(wrapped, instance, args, kwargs): transaction._add_agent_attribute("llm", True) completion_id = str(uuid.uuid4()) + request_timestamp = int(1000.0 * time.time()) ft = FunctionTrace(name=wrapped.__name__, group="Llm/completion/Gemini") ft.__enter__() @@ -236,12 +238,12 @@ def wrap_generate_content_sync(wrapped, instance, args, kwargs): except Exception as exc: # In error cases, exit the function trace in _record_generation_error before recording the LLM error event so # that the duration is calculated correctly. - _record_generation_error(transaction, linking_metadata, completion_id, kwargs, ft, exc) + _record_generation_error(transaction, linking_metadata, completion_id, kwargs, ft, exc, request_timestamp) raise ft.__exit__(None, None, None) - _handle_generation_success(transaction, linking_metadata, completion_id, kwargs, ft, return_val) + _handle_generation_success(transaction, linking_metadata, completion_id, kwargs, ft, return_val, request_timestamp) return return_val @@ -260,6 +262,7 @@ async def wrap_generate_content_async(wrapped, instance, args, kwargs): transaction._add_agent_attribute("llm", True) completion_id = str(uuid.uuid4()) + request_timestamp = int(1000.0 * time.time()) ft = FunctionTrace(name=wrapped.__name__, group="Llm/completion/Gemini") ft.__enter__() @@ -269,17 +272,17 @@ async def wrap_generate_content_async(wrapped, instance, args, kwargs): except Exception as exc: # In error cases, exit the function trace in _record_generation_error before recording the LLM error event so # that the duration is calculated correctly. - _record_generation_error(transaction, linking_metadata, completion_id, kwargs, ft, exc) + _record_generation_error(transaction, linking_metadata, completion_id, kwargs, ft, exc, request_timestamp) raise ft.__exit__(None, None, None) - _handle_generation_success(transaction, linking_metadata, completion_id, kwargs, ft, return_val) + _handle_generation_success(transaction, linking_metadata, completion_id, kwargs, ft, return_val, request_timestamp) return return_val -def _record_generation_error(transaction, linking_metadata, completion_id, kwargs, ft, exc): +def _record_generation_error(transaction, linking_metadata, completion_id, kwargs, ft, exc, request_timestamp=None): span_id = linking_metadata.get("span.id") trace_id = linking_metadata.get("trace.id") @@ -339,6 +342,7 @@ def _record_generation_error(transaction, linking_metadata, completion_id, kwarg "ingest_source": "Python", "duration": ft.duration * 1000, "error": True, + "timestamp": request_timestamp, } llm_metadata = _get_llm_attributes(transaction) error_chat_completion_dict.update(llm_metadata) @@ -357,12 +361,15 @@ def _record_generation_error(transaction, linking_metadata, completion_id, kwarg request_model, llm_metadata, output_message_list, + request_timestamp, ) except Exception: _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, exc_info=True) -def _handle_generation_success(transaction, linking_metadata, completion_id, kwargs, ft, return_val): +def _handle_generation_success( + transaction, linking_metadata, completion_id, kwargs, ft, return_val, request_timestamp=None +): if not return_val: return @@ -370,13 +377,17 @@ def _handle_generation_success(transaction, linking_metadata, completion_id, kwa # Response objects are pydantic models so this function call converts the response into a dict response = return_val.model_dump() if hasattr(return_val, "model_dump") else return_val - _record_generation_success(transaction, linking_metadata, completion_id, kwargs, ft, response) + _record_generation_success( + transaction, linking_metadata, completion_id, kwargs, ft, response, request_timestamp + ) except Exception: _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, exc_info=True) -def _record_generation_success(transaction, linking_metadata, completion_id, kwargs, ft, response): +def _record_generation_success( + transaction, linking_metadata, completion_id, kwargs, ft, response, request_timestamp=None +): span_id = linking_metadata.get("span.id") trace_id = linking_metadata.get("trace.id") try: @@ -436,6 +447,7 @@ def _record_generation_success(transaction, linking_metadata, completion_id, kwa # message This value should be 2 in almost all cases since we will report a summary event for each # separate request (every input and output from the LLM) "response.number_of_messages": 1 + len(output_message_list), + "timestamp": request_timestamp, } llm_metadata = _get_llm_attributes(transaction) @@ -452,6 +464,7 @@ def _record_generation_success(transaction, linking_metadata, completion_id, kwa request_model, llm_metadata, output_message_list, + request_timestamp, ) except Exception: _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, exc_info=True) @@ -467,6 +480,7 @@ def create_chat_completion_message_event( request_model, llm_metadata, output_message_list, + request_timestamp=None, ): try: settings = transaction.settings or global_settings() @@ -510,6 +524,8 @@ def create_chat_completion_message_event( if settings.ai_monitoring.record_content.enabled: chat_completion_input_message_dict["content"] = input_message_content + if request_timestamp: + chat_completion_input_message_dict["timestamp"] = request_timestamp chat_completion_input_message_dict.update(llm_metadata) @@ -548,6 +564,8 @@ def create_chat_completion_message_event( if settings.ai_monitoring.record_content.enabled: chat_completion_output_message_dict["content"] = message_content + if request_timestamp: + chat_completion_output_message_dict["timestamp"] = request_timestamp chat_completion_output_message_dict.update(llm_metadata) diff --git a/newrelic/hooks/mlmodel_langchain.py b/newrelic/hooks/mlmodel_langchain.py index cfcc031e9d..318e1313a7 100644 --- a/newrelic/hooks/mlmodel_langchain.py +++ b/newrelic/hooks/mlmodel_langchain.py @@ -14,6 +14,7 @@ import logging import sys +import time import traceback import uuid @@ -549,6 +550,7 @@ async def wrap_chain_async_run(wrapped, instance, args, kwargs): transaction._add_agent_attribute("llm", True) run_args = bind_args(wrapped, args, kwargs) + run_args["timestamp"] = int(1000.0 * time.time()) completion_id = str(uuid.uuid4()) add_nr_completion_id(run_args, completion_id) # Check to see if launched from agent or directly from chain. @@ -593,6 +595,7 @@ def wrap_chain_sync_run(wrapped, instance, args, kwargs): transaction._add_agent_attribute("llm", True) run_args = bind_args(wrapped, args, kwargs) + run_args["timestamp"] = int(1000.0 * time.time()) completion_id = str(uuid.uuid4()) add_nr_completion_id(run_args, completion_id) # Check to see if launched from agent or directly from chain. @@ -658,12 +661,21 @@ def _create_error_chain_run_events(transaction, instance, run_args, completion_i "response.number_of_messages": len(input_message_list), "tags": tags, "error": True, + "timestamp": run_args.get("timestamp") or None, } ) full_chat_completion_summary_dict.update(llm_metadata_dict) transaction.record_custom_event("LlmChatCompletionSummary", full_chat_completion_summary_dict) create_chat_completion_message_event( - transaction, input_message_list, completion_id, span_id, trace_id, run_id, llm_metadata_dict, [] + transaction, + input_message_list, + completion_id, + span_id, + trace_id, + run_id, + llm_metadata_dict, + [], + run_args["timestamp"] or None, ) @@ -728,8 +740,13 @@ def _create_successful_chain_run_events( "duration": duration, "response.number_of_messages": len(input_message_list) + len(output_message_list), "tags": tags, + "timestamp": run_args.get("timestamp") or None, } ) + + if run_args.get("timestamp"): + full_chat_completion_summary_dict["timestamp"] = run_args.get("timestamp") + full_chat_completion_summary_dict.update(llm_metadata_dict) transaction.record_custom_event("LlmChatCompletionSummary", full_chat_completion_summary_dict) create_chat_completion_message_event( @@ -741,6 +758,7 @@ def _create_successful_chain_run_events( run_id, llm_metadata_dict, output_message_list, + run_args["timestamp"] or None, ) @@ -753,6 +771,7 @@ def create_chat_completion_message_event( run_id, llm_metadata_dict, output_message_list, + request_timestamp=None, ): settings = transaction.settings if transaction.settings is not None else global_settings() @@ -768,9 +787,12 @@ def create_chat_completion_message_event( "vendor": "langchain", "ingest_source": "Python", "virtual_llm": True, + "role": "user", # default role for input messages, overridden by values in llm_metadata_dict } if settings.ai_monitoring.record_content.enabled: chat_completion_input_message_dict["content"] = message + if request_timestamp: + chat_completion_input_message_dict["timestamp"] = request_timestamp chat_completion_input_message_dict.update(llm_metadata_dict) transaction.record_custom_event("LlmChatCompletionMessage", chat_completion_input_message_dict) @@ -791,9 +813,12 @@ def create_chat_completion_message_event( "ingest_source": "Python", "is_response": True, "virtual_llm": True, + "role": "assistant", # default role for output messages, overridden by values in llm_metadata_dict } if settings.ai_monitoring.record_content.enabled: chat_completion_output_message_dict["content"] = message + if request_timestamp: + chat_completion_output_message_dict["timestamp"] = request_timestamp chat_completion_output_message_dict.update(llm_metadata_dict) transaction.record_custom_event("LlmChatCompletionMessage", chat_completion_output_message_dict) diff --git a/newrelic/hooks/mlmodel_openai.py b/newrelic/hooks/mlmodel_openai.py index c3f7960b6e..59f7060394 100644 --- a/newrelic/hooks/mlmodel_openai.py +++ b/newrelic/hooks/mlmodel_openai.py @@ -15,6 +15,7 @@ import json import logging import sys +import time import traceback import uuid @@ -84,6 +85,8 @@ def wrap_chat_completion_sync(wrapped, instance, args, kwargs): if (kwargs.get("extra_headers") or {}).get("X-Stainless-Raw-Response") == "stream": return wrapped(*args, **kwargs) + request_timestamp = int(1000.0 * time.time()) + settings = transaction.settings if transaction.settings is not None else global_settings() if not settings.ai_monitoring.enabled: return wrapped(*args, **kwargs) @@ -100,9 +103,10 @@ def wrap_chat_completion_sync(wrapped, instance, args, kwargs): try: return_val = wrapped(*args, **kwargs) except Exception as exc: - _record_completion_error(transaction, linking_metadata, completion_id, kwargs, ft, exc) + _record_completion_error(transaction, linking_metadata, completion_id, kwargs, ft, exc, request_timestamp) raise - _handle_completion_success(transaction, linking_metadata, completion_id, kwargs, ft, return_val) + + _handle_completion_success(transaction, linking_metadata, completion_id, kwargs, ft, return_val, request_timestamp) return return_val @@ -134,6 +138,7 @@ def create_chat_completion_message_event( request_id, llm_metadata, output_message_list, + request_timestamp=None, ): settings = transaction.settings if transaction.settings is not None else global_settings() @@ -168,6 +173,8 @@ def create_chat_completion_message_event( if settings.ai_monitoring.record_content.enabled: chat_completion_input_message_dict["content"] = message_content + if request_timestamp: + chat_completion_input_message_dict["timestamp"] = request_timestamp chat_completion_input_message_dict.update(llm_metadata) @@ -209,6 +216,8 @@ def create_chat_completion_message_event( if settings.ai_monitoring.record_content.enabled: chat_completion_output_message_dict["content"] = message_content + if request_timestamp: + chat_completion_output_message_dict["timestamp"] = request_timestamp chat_completion_output_message_dict.update(llm_metadata) @@ -403,6 +412,8 @@ async def wrap_chat_completion_async(wrapped, instance, args, kwargs): if (kwargs.get("extra_headers") or {}).get("X-Stainless-Raw-Response") == "stream": return await wrapped(*args, **kwargs) + request_timestamp = int(1000.0 * time.time()) + settings = transaction.settings if transaction.settings is not None else global_settings() if not settings.ai_monitoring.enabled: return await wrapped(*args, **kwargs) @@ -419,14 +430,16 @@ async def wrap_chat_completion_async(wrapped, instance, args, kwargs): try: return_val = await wrapped(*args, **kwargs) except Exception as exc: - _record_completion_error(transaction, linking_metadata, completion_id, kwargs, ft, exc) + _record_completion_error(transaction, linking_metadata, completion_id, kwargs, ft, exc, request_timestamp) raise - _handle_completion_success(transaction, linking_metadata, completion_id, kwargs, ft, return_val) + _handle_completion_success(transaction, linking_metadata, completion_id, kwargs, ft, return_val, request_timestamp) return return_val -def _handle_completion_success(transaction, linking_metadata, completion_id, kwargs, ft, return_val): +def _handle_completion_success( + transaction, linking_metadata, completion_id, kwargs, ft, return_val, request_timestamp=None +): settings = transaction.settings if transaction.settings is not None else global_settings() stream = kwargs.get("stream", False) # Only if streaming and streaming monitoring is enabled and the response is not empty @@ -469,12 +482,16 @@ def _handle_completion_success(transaction, linking_metadata, completion_id, kwa # openai._legacy_response.LegacyAPIResponse response = json.loads(response.http_response.text.strip()) - _record_completion_success(transaction, linking_metadata, completion_id, kwargs, ft, response_headers, response) + _record_completion_success( + transaction, linking_metadata, completion_id, kwargs, ft, response_headers, response, request_timestamp + ) except Exception: _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) -def _record_completion_success(transaction, linking_metadata, completion_id, kwargs, ft, response_headers, response): +def _record_completion_success( + transaction, linking_metadata, completion_id, kwargs, ft, response_headers, response, request_timestamp=None +): span_id = linking_metadata.get("span.id") trace_id = linking_metadata.get("trace.id") try: @@ -552,6 +569,7 @@ def _record_completion_success(transaction, linking_metadata, completion_id, kwa response_headers, "x-ratelimit-remaining-tokens_usage_based", True ), "response.number_of_messages": len(input_message_list) + len(output_message_list), + "timestamp": request_timestamp, } llm_metadata = _get_llm_attributes(transaction) full_chat_completion_summary_dict.update(llm_metadata) @@ -569,12 +587,13 @@ def _record_completion_success(transaction, linking_metadata, completion_id, kwa request_id, llm_metadata, output_message_list, + request_timestamp, ) except Exception: _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) -def _record_completion_error(transaction, linking_metadata, completion_id, kwargs, ft, exc): +def _record_completion_error(transaction, linking_metadata, completion_id, kwargs, ft, exc, request_timestamp=None): span_id = linking_metadata.get("span.id") trace_id = linking_metadata.get("trace.id") request_message_list = kwargs.get("messages", None) or [] @@ -635,6 +654,7 @@ def _record_completion_error(transaction, linking_metadata, completion_id, kwarg "response.organization": exc_organization, "duration": ft.duration * 1000, "error": True, + "timestamp": request_timestamp, } llm_metadata = _get_llm_attributes(transaction) error_chat_completion_dict.update(llm_metadata) @@ -655,6 +675,7 @@ def _record_completion_error(transaction, linking_metadata, completion_id, kwarg request_id, llm_metadata, output_message_list, + request_timestamp, ) except Exception: _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) @@ -719,6 +740,7 @@ async def wrap_base_client_process_response_async(wrapped, instance, args, kwarg class GeneratorProxy(ObjectProxy): def __init__(self, wrapped): super().__init__(wrapped) + self._nr_request_timestamp = int(1000.0 * time.time()) def __iter__(self): return self @@ -733,10 +755,10 @@ def __next__(self): return_val = self.__wrapped__.__next__() _record_stream_chunk(self, return_val) except StopIteration: - _record_events_on_stop_iteration(self, transaction) + _record_events_on_stop_iteration(self, transaction, self._nr_request_timestamp) raise except Exception as exc: - _handle_streaming_completion_error(self, transaction, exc) + _handle_streaming_completion_error(self, transaction, exc, self._nr_request_timestamp) raise return return_val @@ -770,7 +792,7 @@ def _record_stream_chunk(self, return_val): _logger.warning(STREAM_PARSING_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) -def _record_events_on_stop_iteration(self, transaction): +def _record_events_on_stop_iteration(self, transaction, request_timestamp=None): if hasattr(self, "_nr_ft"): # We first check for our saved linking metadata before making a new call to get_trace_linking_metadata # Directly calling get_trace_linking_metadata() causes the incorrect span ID to be captured and associated with the LLM call @@ -787,7 +809,14 @@ def _record_events_on_stop_iteration(self, transaction): completion_id = str(uuid.uuid4()) response_headers = openai_attrs.get("response_headers") or {} _record_completion_success( - transaction, linking_metadata, completion_id, openai_attrs, self._nr_ft, response_headers, None + transaction, + linking_metadata, + completion_id, + openai_attrs, + self._nr_ft, + response_headers, + None, + request_timestamp, ) except Exception: _logger.warning(RECORD_EVENTS_FAILURE_LOG_MESSAGE, traceback.format_exception(*sys.exc_info())) @@ -802,7 +831,7 @@ def _record_events_on_stop_iteration(self, transaction): self._nr_openai_attrs.clear() -def _handle_streaming_completion_error(self, transaction, exc): +def _handle_streaming_completion_error(self, transaction, exc, request_timestamp=None): if hasattr(self, "_nr_ft"): openai_attrs = getattr(self, "_nr_openai_attrs", {}) @@ -812,12 +841,15 @@ def _handle_streaming_completion_error(self, transaction, exc): return linking_metadata = get_trace_linking_metadata() completion_id = str(uuid.uuid4()) - _record_completion_error(transaction, linking_metadata, completion_id, openai_attrs, self._nr_ft, exc) + _record_completion_error( + transaction, linking_metadata, completion_id, openai_attrs, self._nr_ft, exc, request_timestamp + ) class AsyncGeneratorProxy(ObjectProxy): def __init__(self, wrapped): super().__init__(wrapped) + self._nr_request_timestamp = int(1000.0 * time.time()) def __aiter__(self): self._nr_wrapped_iter = self.__wrapped__.__aiter__() @@ -833,10 +865,10 @@ async def __anext__(self): return_val = await self._nr_wrapped_iter.__anext__() _record_stream_chunk(self, return_val) except StopAsyncIteration: - _record_events_on_stop_iteration(self, transaction) + _record_events_on_stop_iteration(self, transaction, self._nr_request_timestamp) raise except Exception as exc: - _handle_streaming_completion_error(self, transaction, exc) + _handle_streaming_completion_error(self, transaction, exc, self._nr_request_timestamp) raise return return_val diff --git a/tests/external_botocore/_test_bedrock_chat_completion_converse.py b/tests/external_botocore/_test_bedrock_chat_completion_converse.py index cdec652292..7cde46faf8 100644 --- a/tests/external_botocore/_test_bedrock_chat_completion_converse.py +++ b/tests/external_botocore/_test_bedrock_chat_completion_converse.py @@ -20,6 +20,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -40,6 +41,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", @@ -58,6 +60,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", @@ -76,6 +79,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "c20d345e-6878-4778-b674-6b187bae8ecf", @@ -98,6 +102,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -118,6 +123,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "f070b880-e0fb-4537-8093-796671c39239", @@ -136,6 +142,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "f070b880-e0fb-4537-8093-796671c39239", @@ -154,6 +161,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "f070b880-e0fb-4537-8093-796671c39239", @@ -176,6 +184,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -196,6 +205,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "e1206e19-2318-4a9d-be98-017c73f06118", @@ -216,6 +226,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", @@ -236,6 +247,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, diff --git a/tests/external_botocore/_test_bedrock_chat_completion_invoke_model.py b/tests/external_botocore/_test_bedrock_chat_completion_invoke_model.py index fd970b0603..f72b9fa583 100644 --- a/tests/external_botocore/_test_bedrock_chat_completion_invoke_model.py +++ b/tests/external_botocore/_test_bedrock_chat_completion_invoke_model.py @@ -31,6 +31,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -51,6 +52,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "48c7ee13-7790-461f-959f-04b0a4cf91c8", @@ -69,6 +71,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "48c7ee13-7790-461f-959f-04b0a4cf91c8", @@ -90,6 +93,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -110,6 +114,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "81508a1c-33a8-4294-8743-f0c629af2f49", @@ -128,6 +133,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "81508a1c-33a8-4294-8743-f0c629af2f49", @@ -149,6 +155,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -170,6 +177,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "1234-0", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "228ee63f-4eca-4b7d-b679-bc920de63525", @@ -188,6 +196,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "1234-1", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "228ee63f-4eca-4b7d-b679-bc920de63525", @@ -209,6 +218,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -229,6 +239,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "6a886158-b39f-46ce-b214-97458ab76f2f", @@ -247,6 +258,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "6a886158-b39f-46ce-b214-97458ab76f2f", @@ -268,6 +280,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -288,6 +301,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "ab38295d-df9c-4141-8173-38221651bf46", @@ -306,6 +320,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "ab38295d-df9c-4141-8173-38221651bf46", @@ -327,6 +342,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -348,6 +364,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "12912a17-aa13-45f3-914c-cc82166f3601", @@ -366,6 +383,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "12912a17-aa13-45f3-914c-cc82166f3601", @@ -387,6 +405,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -407,6 +426,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "a168214d-742d-4244-bd7f-62214ffa07df", @@ -425,6 +445,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "a168214d-742d-4244-bd7f-62214ffa07df", @@ -448,6 +469,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -468,6 +490,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "48c7ee13-7790-461f-959f-04b0a4cf91c8", @@ -486,6 +509,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "48c7ee13-7790-461f-959f-04b0a4cf91c8", @@ -507,6 +531,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -525,6 +550,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "884db5c9-18ab-4f27-8892-33656176a2e6", @@ -543,6 +569,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "884db5c9-18ab-4f27-8892-33656176a2e6", @@ -564,6 +591,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -581,6 +609,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "1a72a1f6-310f-469c-af1d-2c59eb600089", @@ -599,6 +628,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "1a72a1f6-310f-469c-af1d-2c59eb600089", @@ -620,6 +650,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -637,6 +668,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "e8fc1dd7-3d1e-42c6-9c58-535cae563bff", @@ -655,6 +687,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "e8fc1dd7-3d1e-42c6-9c58-535cae563bff", @@ -676,6 +709,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -694,6 +728,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "cce6b34c-812c-4f97-8885-515829aa9639", @@ -712,6 +747,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "cce6b34c-812c-4f97-8885-515829aa9639", @@ -735,6 +771,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -755,6 +792,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "48c7ee13-7790-461f-959f-04b0a4cf91c8", @@ -773,6 +811,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "48c7ee13-7790-461f-959f-04b0a4cf91c8", @@ -794,6 +833,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -812,6 +852,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "884db5c9-18ab-4f27-8892-33656176a2e6", @@ -830,6 +871,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "884db5c9-18ab-4f27-8892-33656176a2e6", @@ -851,6 +893,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -869,6 +912,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "1a72a1f6-310f-469c-af1d-2c59eb600089", @@ -887,6 +931,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "1a72a1f6-310f-469c-af1d-2c59eb600089", @@ -908,6 +953,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -926,6 +972,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "96c7306d-2d60-4629-83e9-dbd6befb0e4e", @@ -944,6 +991,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "96c7306d-2d60-4629-83e9-dbd6befb0e4e", @@ -965,6 +1013,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -983,6 +1032,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "cce6b34c-812c-4f97-8885-515829aa9639", @@ -1001,6 +1051,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "cce6b34c-812c-4f97-8885-515829aa9639", @@ -1025,6 +1076,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1045,6 +1097,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "48c7ee13-7790-461f-959f-04b0a4cf91c8", @@ -1063,6 +1116,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "48c7ee13-7790-461f-959f-04b0a4cf91c8", @@ -1084,6 +1138,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1104,6 +1159,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "request_id": "b427270f-371a-458d-81b6-a05aafb2704c", "span_id": None, "trace_id": "trace-id", @@ -1122,6 +1178,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "request_id": "b427270f-371a-458d-81b6-a05aafb2704c", "span_id": None, "trace_id": "trace-id", @@ -1143,6 +1200,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1163,6 +1221,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "request_id": "a645548f-0b3a-47ce-a675-f51e6e9037de", "span_id": None, "trace_id": "trace-id", @@ -1181,6 +1240,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "request_id": "a645548f-0b3a-47ce-a675-f51e6e9037de", "span_id": None, "trace_id": "trace-id", @@ -1202,6 +1262,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1221,6 +1282,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "1efe6197-80f9-43a6-89a5-bb536c1b822f", @@ -1239,6 +1301,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "1efe6197-80f9-43a6-89a5-bb536c1b822f", @@ -1260,6 +1323,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1281,6 +1345,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "request_id": "4f8ab6c5-42d1-4e35-9573-30f9f41f821e", "span_id": None, "trace_id": "trace-id", @@ -1299,6 +1364,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "request_id": "4f8ab6c5-42d1-4e35-9573-30f9f41f821e", "span_id": None, "trace_id": "trace-id", @@ -1320,6 +1386,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1340,6 +1407,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "request_id": "6dd99878-0919-4f92-850c-48f50f923b76", "span_id": None, "trace_id": "trace-id", @@ -1358,6 +1426,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "request_id": "6dd99878-0919-4f92-850c-48f50f923b76", "span_id": None, "trace_id": "trace-id", @@ -1381,6 +1450,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "f4908827-3db9-4742-9103-2bbc34578b03", @@ -1402,6 +1472,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1422,6 +1493,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "48c7ee13-7790-461f-959f-04b0a4cf91c8", @@ -1442,6 +1514,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1462,6 +1535,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "15b39c8b-8e85-42c9-9623-06720301bda3", @@ -1482,6 +1556,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1502,6 +1577,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "9021791d-3797-493d-9277-e33aa6f6d544", @@ -1522,6 +1598,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1542,6 +1619,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "37396f55-b721-4bae-9461-4c369f5a080d", @@ -1562,6 +1640,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1582,6 +1661,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "282ba076-576f-46aa-a2e6-680392132e87", @@ -1602,6 +1682,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1622,6 +1703,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "22476490-a0d6-42db-b5ea-32d0b8a7f751", @@ -1642,6 +1724,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1662,6 +1745,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "22476490-a0d6-42db-b5ea-32d0b8a7f751", @@ -1685,6 +1769,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1705,6 +1790,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1724,6 +1810,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "81508a1c-33a8-4294-8743-f0c629af2f49", @@ -1745,6 +1832,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1764,6 +1852,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "a5a8cebb-fd33-4437-8168-5667fbdfc1fb", @@ -1785,6 +1874,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1804,6 +1894,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "a5a8cebb-fd33-4437-8168-5667fbdfc1fb", @@ -1826,6 +1917,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -1845,6 +1937,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, diff --git a/tests/mlmodel_gemini/test_text_generation.py b/tests/mlmodel_gemini/test_text_generation.py index faec66aa75..1c789f8197 100644 --- a/tests/mlmodel_gemini/test_text_generation.py +++ b/tests/mlmodel_gemini/test_text_generation.py @@ -37,6 +37,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -56,6 +57,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -73,6 +75,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, diff --git a/tests/mlmodel_gemini/test_text_generation_error.py b/tests/mlmodel_gemini/test_text_generation_error.py index 5e6f1c04de..eb8aec950f 100644 --- a/tests/mlmodel_gemini/test_text_generation_error.py +++ b/tests/mlmodel_gemini/test_text_generation_error.py @@ -42,6 +42,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -58,6 +59,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -145,6 +147,7 @@ def _test(): {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -162,6 +165,7 @@ def _test(): {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -246,6 +250,7 @@ def test_text_generation_invalid_request_error_invalid_model_chat(gemini_dev_cli {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "span_id": None, "trace_id": "trace-id", "duration": None, # Response time varies each test run @@ -262,6 +267,7 @@ def test_text_generation_invalid_request_error_invalid_model_chat(gemini_dev_cli {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "span_id": None, "trace_id": "trace-id", "content": "Invalid API key.", diff --git a/tests/mlmodel_langchain/test_chain.py b/tests/mlmodel_langchain/test_chain.py index a6b7470a9a..2f52f85504 100644 --- a/tests/mlmodel_langchain/test_chain.py +++ b/tests/mlmodel_langchain/test_chain.py @@ -65,6 +65,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -83,6 +84,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -93,6 +95,7 @@ "sequence": 0, "vendor": "langchain", "ingest_source": "Python", + "role": "user", "virtual_llm": True, }, ), @@ -103,6 +106,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -121,6 +125,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -131,6 +136,7 @@ "sequence": 0, "vendor": "langchain", "ingest_source": "Python", + "role": "user", "virtual_llm": True, }, ), @@ -140,6 +146,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -158,6 +165,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -168,6 +176,7 @@ "sequence": 0, "vendor": "langchain", "ingest_source": "Python", + "role": "user", "virtual_llm": True, }, ), @@ -175,6 +184,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -185,6 +195,7 @@ "sequence": 1, "vendor": "langchain", "ingest_source": "Python", + "role": "assistant", "is_response": True, "virtual_llm": True, }, @@ -195,6 +206,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -213,6 +225,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -223,6 +236,7 @@ "sequence": 0, "vendor": "langchain", "ingest_source": "Python", + "role": "user", "virtual_llm": True, }, ), @@ -230,6 +244,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -240,6 +255,7 @@ "sequence": 1, "vendor": "langchain", "ingest_source": "Python", + "role": "assistant", "is_response": True, "virtual_llm": True, }, @@ -250,6 +266,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -266,6 +283,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -276,6 +294,7 @@ "sequence": 0, "vendor": "langchain", "ingest_source": "Python", + "role": "user", "virtual_llm": True, }, ), @@ -283,6 +302,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -293,6 +313,7 @@ "sequence": 1, "vendor": "langchain", "ingest_source": "Python", + "role": "assistant", "is_response": True, "virtual_llm": True, }, @@ -303,6 +324,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -319,6 +341,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -329,6 +352,7 @@ "sequence": 0, "vendor": "langchain", "ingest_source": "Python", + "role": "user", "virtual_llm": True, }, ), @@ -336,6 +360,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -346,6 +371,7 @@ "sequence": 1, "vendor": "langchain", "ingest_source": "Python", + "role": "assistant", "is_response": True, "virtual_llm": True, }, @@ -430,6 +456,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "request_id": None, "span_id": None, "trace_id": "trace-id", @@ -438,6 +465,7 @@ "vendor": "langchain", "ingest_source": "Python", "is_response": True, + "role": "assistant", "virtual_llm": True, "content": "page_content='What is 2 + 4?'", }, @@ -446,6 +474,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, + "timestamp": None, "span_id": None, "trace_id": "trace-id", "request.model": "gpt-3.5-turbo", @@ -471,6 +500,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "request_id": None, "span_id": None, "trace_id": "trace-id", @@ -487,6 +517,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "request_id": None, "span_id": None, "trace_id": "trace-id", @@ -503,6 +534,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "request_id": None, "span_id": None, "trace_id": "trace-id", @@ -520,6 +552,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "request_id": None, "span_id": None, "trace_id": "trace-id", @@ -527,6 +560,7 @@ "sequence": 0, "vendor": "langchain", "ingest_source": "Python", + "role": "user", "virtual_llm": True, "content": "{'input': 'math', 'context': [Document(id='1234', metadata={}, page_content='What is 2 + 4?')]}", }, @@ -535,6 +569,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "request_id": None, "span_id": None, "trace_id": "trace-id", @@ -542,6 +577,7 @@ "sequence": 1, "vendor": "langchain", "ingest_source": "Python", + "role": "assistant", "is_response": True, "virtual_llm": True, "content": "```html\n\n\n\n Math Quiz\n\n\n

Math Quiz Questions

\n
    \n
  1. What is the result of 5 + 3?
  2. \n \n
  3. What is the product of 6 x 7?
  4. \n \n
  5. What is the square root of 64?
  6. \n \n
  7. What is the result of 12 / 4?
  8. \n \n
  9. What is the sum of 15 + 9?
  10. \n \n
\n\n\n```", @@ -551,6 +587,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "request_id": None, "span_id": None, "trace_id": "trace-id", @@ -558,6 +595,7 @@ "sequence": 1, "vendor": "langchain", "ingest_source": "Python", + "role": "assistant", "is_response": True, "virtual_llm": True, "content": "{'input': 'math', 'context': [Document(id='1234', metadata={}, page_content='What is 2 + 4?')], 'answer': '```html\\n\\n\\n\\n Math Quiz\\n\\n\\n

Math Quiz Questions

\\n
    \\n
  1. What is the result of 5 + 3?
  2. \\n \\n
  3. What is the product of 6 x 7?
  4. \\n \\n
  5. What is the square root of 64?
  6. \\n \\n
  7. What is the result of 12 / 4?
  8. \\n \\n
  9. What is the sum of 15 + 9?
  10. \\n \\n
\\n\\n\\n```'}", @@ -570,6 +608,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -587,6 +626,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -597,6 +637,7 @@ "sequence": 0, "vendor": "langchain", "ingest_source": "Python", + "role": "user", "virtual_llm": True, }, ), @@ -604,6 +645,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -614,6 +656,7 @@ "sequence": 1, "vendor": "langchain", "ingest_source": "Python", + "role": "assistant", "is_response": True, "virtual_llm": True, }, @@ -624,6 +667,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -641,6 +685,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -651,6 +696,7 @@ "sequence": 0, "vendor": "langchain", "ingest_source": "Python", + "role": "user", "virtual_llm": True, }, ), @@ -658,6 +704,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -668,6 +715,7 @@ "sequence": 1, "vendor": "langchain", "ingest_source": "Python", + "role": "assistant", "is_response": True, "virtual_llm": True, }, @@ -679,6 +727,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -696,6 +745,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -706,6 +756,7 @@ "sequence": 0, "vendor": "langchain", "ingest_source": "Python", + "role": "user", "virtual_llm": True, }, ), @@ -716,6 +767,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -732,6 +784,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": None, @@ -742,6 +795,7 @@ "sequence": 0, "vendor": "langchain", "ingest_source": "Python", + "role": "user", "virtual_llm": True, }, ), diff --git a/tests/mlmodel_openai/test_chat_completion.py b/tests/mlmodel_openai/test_chat_completion.py index 1f8cf1cb74..89208ab268 100644 --- a/tests/mlmodel_openai/test_chat_completion.py +++ b/tests/mlmodel_openai/test_chat_completion.py @@ -44,6 +44,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -72,6 +73,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-0", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "49dbbffbd3c3f4612aa48def69059ccd", @@ -90,6 +92,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-1", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "49dbbffbd3c3f4612aa48def69059ccd", @@ -108,6 +111,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-2", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "49dbbffbd3c3f4612aa48def69059ccd", diff --git a/tests/mlmodel_openai/test_chat_completion_error.py b/tests/mlmodel_openai/test_chat_completion_error.py index bfb2267a33..79cc79d6db 100644 --- a/tests/mlmodel_openai/test_chat_completion_error.py +++ b/tests/mlmodel_openai/test_chat_completion_error.py @@ -45,6 +45,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -61,6 +62,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -76,6 +78,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -162,6 +165,7 @@ def test_chat_completion_invalid_request_error_no_model_no_content(set_trace_inf {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -179,6 +183,7 @@ def test_chat_completion_invalid_request_error_no_model_no_content(set_trace_inf {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -257,6 +262,7 @@ def test_chat_completion_invalid_request_error_invalid_model(set_trace_info): {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -274,6 +280,7 @@ def test_chat_completion_invalid_request_error_invalid_model(set_trace_info): {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -289,6 +296,7 @@ def test_chat_completion_invalid_request_error_invalid_model(set_trace_info): {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -338,6 +346,7 @@ def test_chat_completion_authentication_error(monkeypatch, set_trace_info): {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "span_id": None, "trace_id": "trace-id", "duration": None, # Response time varies each test run @@ -354,6 +363,7 @@ def test_chat_completion_authentication_error(monkeypatch, set_trace_info): {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "span_id": None, "trace_id": "trace-id", "content": "Invalid API key.", diff --git a/tests/mlmodel_openai/test_chat_completion_error_v1.py b/tests/mlmodel_openai/test_chat_completion_error_v1.py index 9be9fcab9c..848ad57add 100644 --- a/tests/mlmodel_openai/test_chat_completion_error_v1.py +++ b/tests/mlmodel_openai/test_chat_completion_error_v1.py @@ -44,6 +44,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -60,6 +61,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -75,6 +77,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -205,6 +208,7 @@ def test_chat_completion_invalid_request_error_no_model_async_no_content(loop, s {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -222,6 +226,7 @@ def test_chat_completion_invalid_request_error_no_model_async_no_content(loop, s {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -369,6 +374,7 @@ def test_chat_completion_invalid_request_error_invalid_model_with_token_count_as {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "span_id": None, "trace_id": "trace-id", "duration": None, # Response time varies each test run @@ -385,6 +391,7 @@ def test_chat_completion_invalid_request_error_invalid_model_with_token_count_as {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "span_id": None, "trace_id": "trace-id", "content": "Invalid API key.", diff --git a/tests/mlmodel_openai/test_chat_completion_stream.py b/tests/mlmodel_openai/test_chat_completion_stream.py index ad89d6f260..55e8e8fbdb 100644 --- a/tests/mlmodel_openai/test_chat_completion_stream.py +++ b/tests/mlmodel_openai/test_chat_completion_stream.py @@ -45,6 +45,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -73,6 +74,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-0", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "49dbbffbd3c3f4612aa48def69059ccd", @@ -91,6 +93,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-1", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "49dbbffbd3c3f4612aa48def69059ccd", @@ -109,6 +112,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "chatcmpl-87sb95K4EF2nuJRcTs43Tm9ntTemv-2", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "49dbbffbd3c3f4612aa48def69059ccd", diff --git a/tests/mlmodel_openai/test_chat_completion_stream_error.py b/tests/mlmodel_openai/test_chat_completion_stream_error.py index eebb5ee8fb..0fb0d06867 100644 --- a/tests/mlmodel_openai/test_chat_completion_stream_error.py +++ b/tests/mlmodel_openai/test_chat_completion_stream_error.py @@ -45,6 +45,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -61,6 +62,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -76,6 +78,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -167,6 +170,7 @@ def test_chat_completion_invalid_request_error_no_model_no_content(set_trace_inf {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -184,6 +188,7 @@ def test_chat_completion_invalid_request_error_no_model_no_content(set_trace_inf {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -266,6 +271,7 @@ def test_chat_completion_invalid_request_error_invalid_model(set_trace_info): {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -283,6 +289,7 @@ def test_chat_completion_invalid_request_error_invalid_model(set_trace_info): {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -298,6 +305,7 @@ def test_chat_completion_invalid_request_error_invalid_model(set_trace_info): {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -352,6 +360,7 @@ def test_chat_completion_authentication_error(monkeypatch, set_trace_info): {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "span_id": None, "trace_id": "trace-id", "duration": None, # Response time varies each test run @@ -368,6 +377,7 @@ def test_chat_completion_authentication_error(monkeypatch, set_trace_info): {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "span_id": None, "trace_id": "trace-id", "content": "Invalid API key.", @@ -626,6 +636,7 @@ def test_chat_completion_wrong_api_key_error_async(loop, monkeypatch, set_trace_ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "span_id": None, "trace_id": "trace-id", "duration": None, # Response time varies each test run @@ -643,6 +654,7 @@ def test_chat_completion_wrong_api_key_error_async(loop, monkeypatch, set_trace_ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "span_id": None, "trace_id": "trace-id", "content": "Stream parsing error.", diff --git a/tests/mlmodel_openai/test_chat_completion_stream_error_v1.py b/tests/mlmodel_openai/test_chat_completion_stream_error_v1.py index 5f769ea0e6..5d06dc2a28 100644 --- a/tests/mlmodel_openai/test_chat_completion_stream_error_v1.py +++ b/tests/mlmodel_openai/test_chat_completion_stream_error_v1.py @@ -45,6 +45,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -61,6 +62,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -76,6 +78,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -219,6 +222,7 @@ async def consumer(): {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -236,6 +240,7 @@ async def consumer(): {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "llm.conversation_id": "my-awesome-id", "span_id": None, "trace_id": "trace-id", @@ -392,6 +397,7 @@ async def consumer(): {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "span_id": None, "trace_id": "trace-id", "duration": None, # Response time varies each test run @@ -408,6 +414,7 @@ async def consumer(): {"type": "LlmChatCompletionMessage"}, { "id": None, + "timestamp": None, "span_id": None, "trace_id": "trace-id", "content": "Invalid API key.", diff --git a/tests/mlmodel_openai/test_chat_completion_stream_v1.py b/tests/mlmodel_openai/test_chat_completion_stream_v1.py index 796404012b..6fc5d58f28 100644 --- a/tests/mlmodel_openai/test_chat_completion_stream_v1.py +++ b/tests/mlmodel_openai/test_chat_completion_stream_v1.py @@ -54,6 +54,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -83,6 +84,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug-0", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", @@ -101,6 +103,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug-1", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", @@ -119,6 +122,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "chatcmpl-8TJ9dS50zgQM7XicE8PLnCyEihRug-2", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "f8d0f53b6881c5c0a3698e55f8f410ac", diff --git a/tests/mlmodel_openai/test_chat_completion_v1.py b/tests/mlmodel_openai/test_chat_completion_v1.py index 817db35d8e..5a6793d955 100644 --- a/tests/mlmodel_openai/test_chat_completion_v1.py +++ b/tests/mlmodel_openai/test_chat_completion_v1.py @@ -43,6 +43,7 @@ {"type": "LlmChatCompletionSummary"}, { "id": None, # UUID that varies with each run + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "span_id": None, @@ -71,6 +72,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "chatcmpl-9NPYxI4Zk5ztxNwW5osYdpevgoiBQ-0", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "req_25be7e064e0c590cd65709c85385c796", @@ -89,6 +91,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "chatcmpl-9NPYxI4Zk5ztxNwW5osYdpevgoiBQ-1", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "req_25be7e064e0c590cd65709c85385c796", @@ -107,6 +110,7 @@ {"type": "LlmChatCompletionMessage"}, { "id": "chatcmpl-9NPYxI4Zk5ztxNwW5osYdpevgoiBQ-2", + "timestamp": None, "llm.conversation_id": "my-awesome-id", "llm.foo": "bar", "request_id": "req_25be7e064e0c590cd65709c85385c796",