From 538215e1b76e1d6b62a3e83c4711c8263cae7c7a Mon Sep 17 00:00:00 2001 From: Dan Fuller Date: Tue, 1 Jul 2025 15:57:52 -0700 Subject: [PATCH 1/2] feat(uptime): Use EAP to query in `project_uptime_alert_checks_index` Relies on https://github.com/getsentry/sentry/pull/94597 This allows us to switch our queries to use EAP in the check details endpoint --- .../search/eap/uptime_results/attributes.py | 5 + .../project_uptime_alert_checks_index.py | 275 +++++++++---- tests/sentry/uptime/endpoints/__init__.py | 4 +- tests/sentry/uptime/endpoints/test_base.py | 140 +++++++ .../test_organization_uptime_stats.py | 6 +- .../test_project_uptime_alert_check_index.py | 363 +++++++++++------- ...test_organization_events_uptime_results.py | 168 ++------ 7 files changed, 603 insertions(+), 358 deletions(-) create mode 100644 tests/sentry/uptime/endpoints/test_base.py diff --git a/src/sentry/search/eap/uptime_results/attributes.py b/src/sentry/search/eap/uptime_results/attributes.py index c6a91bdcc1f8e5..165f2a13588926 100644 --- a/src/sentry/search/eap/uptime_results/attributes.py +++ b/src/sentry/search/eap/uptime_results/attributes.py @@ -6,6 +6,11 @@ column.public_alias: column for column in COMMON_COLUMNS + [ + ResolvedAttribute( + public_alias="trace_id", + internal_name="trace_id", + search_type="string", + ), ResolvedAttribute( public_alias="guid", internal_name="guid", diff --git a/src/sentry/uptime/endpoints/project_uptime_alert_checks_index.py b/src/sentry/uptime/endpoints/project_uptime_alert_checks_index.py index 9239bc17256dde..5de6ac5d1aa2f2 100644 --- a/src/sentry/uptime/endpoints/project_uptime_alert_checks_index.py +++ b/src/sentry/uptime/endpoints/project_uptime_alert_checks_index.py @@ -1,3 +1,5 @@ +import logging +import uuid from datetime import datetime, timezone from typing import Any, cast @@ -15,9 +17,13 @@ ) from sentry_protos.snuba.v1.request_common_pb2 import PageToken, RequestMeta, TraceItemType from sentry_protos.snuba.v1.trace_item_attribute_pb2 import AttributeKey, AttributeValue -from sentry_protos.snuba.v1.trace_item_filter_pb2 import ComparisonFilter, TraceItemFilter +from sentry_protos.snuba.v1.trace_item_filter_pb2 import ( + AndFilter, + ComparisonFilter, + TraceItemFilter, +) -from sentry import options +from sentry import features, options from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import region_silo_endpoint @@ -31,6 +37,8 @@ from sentry.uptime.types import EapCheckEntry, IncidentStatus from sentry.utils import snuba_rpc +logger = logging.getLogger(__name__) + @region_silo_endpoint class ProjectUptimeAlertCheckIndexEndpoint(ProjectUptimeAlertEndpoint): @@ -53,10 +61,36 @@ def get( start, end = get_date_range_from_params(request.GET) def data_fn(offset: int, limit: int) -> Any: - rpc_response = self._make_eap_request( - project, uptime_subscription, offset=offset, limit=limit, start=start, end=end - ) - return self._serialize_response(rpc_response, uptime_subscription) + try: + if features.has( + "organizations:uptime-eap-uptime-results-query", project.organization + ): + return self._make_eap_request( + project, + uptime_subscription, + offset, + limit, + start, + end, + TraceItemType.TRACE_ITEM_TYPE_UPTIME_RESULT, + "subscription_id", + True, + ) + else: + return self._make_eap_request( + project, + uptime_subscription, + offset, + limit, + start, + end, + TraceItemType.TRACE_ITEM_TYPE_UPTIME_CHECK, + "uptime_subscription_id", + False, + ) + except Exception: + logger.exception("Error making EAP RPC request for uptime alert checks") + return [] with handle_query_errors(): return self.paginate( @@ -74,7 +108,10 @@ def _make_eap_request( limit: int, start: datetime, end: datetime, - ) -> TraceItemTableResponse: + trace_item_type: TraceItemType.ValueType, + subscription_key: str, + include_request_sequence_filter: bool, + ) -> list[EapCheckEntrySerializerResponse]: maybe_cutoff = self._get_date_cutoff_epoch_seconds() epoch_cutoff = ( datetime.fromtimestamp(maybe_cutoff, tz=timezone.utc) if maybe_cutoff else None @@ -86,32 +123,143 @@ def _make_eap_request( start_timestamp.FromDatetime(start) end_timestamp = Timestamp() end_timestamp.FromDatetime(end) + subscription_filter = TraceItemFilter( + comparison_filter=ComparisonFilter( + key=AttributeKey( + name=subscription_key, + type=AttributeKey.Type.TYPE_STRING, + ), + op=ComparisonFilter.OP_EQUALS, + value=AttributeValue( + val_str=str(uuid.UUID(uptime_subscription.uptime_subscription.subscription_id)) + ), + ) + ) + + if include_request_sequence_filter: + request_sequence_filter = TraceItemFilter( + comparison_filter=ComparisonFilter( + key=AttributeKey( + name="request_sequence", + type=AttributeKey.Type.TYPE_INT, + ), + op=ComparisonFilter.OP_EQUALS, + value=AttributeValue(val_int=0), + ) + ) + query_filter = TraceItemFilter( + and_filter=AndFilter(filters=[subscription_filter, request_sequence_filter]) + ) + else: + query_filter = subscription_filter + rpc_request = TraceItemTableRequest( meta=RequestMeta( referrer="uptime_alert_checks_index", organization_id=project.organization.id, project_ids=[project.id], - trace_item_type=TraceItemType.TRACE_ITEM_TYPE_UPTIME_CHECK, + trace_item_type=trace_item_type, start_timestamp=start_timestamp, end_timestamp=end_timestamp, ), - filter=TraceItemFilter( - comparison_filter=ComparisonFilter( - key=AttributeKey( - name="uptime_subscription_id", - type=AttributeKey.Type.TYPE_STRING, - ), - op=ComparisonFilter.OP_EQUALS, - value=AttributeValue( - val_str=str(uptime_subscription.uptime_subscription.subscription_id) + filter=query_filter, + columns=self._get_columns_for_trace_item_type(trace_item_type), + order_by=[ + TraceItemTableRequest.OrderBy( + column=Column( + label="timestamp", + key=AttributeKey( + name=( + "sentry.timestamp" + if trace_item_type == TraceItemType.TRACE_ITEM_TYPE_UPTIME_RESULT + else "timestamp" + ), + type=AttributeKey.Type.TYPE_DOUBLE, + ), ), + descending=True, ) + ], + limit=limit, + page_token=PageToken(offset=offset), + ) + + rpc_response = snuba_rpc.table_rpc([rpc_request])[0] + return self._serialize_response(rpc_response, uptime_subscription, trace_item_type) + + def _get_columns_for_trace_item_type( + self, trace_item_type: TraceItemType.ValueType + ) -> list[Column]: + """Get appropriate columns based on trace item type.""" + common_columns = [ + Column( + label="environment", + key=AttributeKey(name="environment", type=AttributeKey.Type.TYPE_STRING), ), - columns=[ + Column( + label="timestamp", + key=AttributeKey( + name=( + "sentry.timestamp" + if trace_item_type == TraceItemType.TRACE_ITEM_TYPE_UPTIME_RESULT + else "timestamp" + ), + type=AttributeKey.Type.TYPE_DOUBLE, + ), + ), + Column( + label="region", + key=AttributeKey(name="region", type=AttributeKey.Type.TYPE_STRING), + ), + Column( + label="check_status", + key=AttributeKey(name="check_status", type=AttributeKey.Type.TYPE_STRING), + ), + Column( + label="http_status_code", + key=AttributeKey(name="http_status_code", type=AttributeKey.Type.TYPE_INT), + ), + Column( + label="incident_status", + key=AttributeKey(name="incident_status", type=AttributeKey.Type.TYPE_INT), + ), + Column( + label="trace_id", + key=AttributeKey(name="trace_id", type=AttributeKey.Type.TYPE_STRING), + ), + ] + + if trace_item_type == TraceItemType.TRACE_ITEM_TYPE_UPTIME_RESULT: + return common_columns + [ Column( - label="environment", - key=AttributeKey(name="environment", type=AttributeKey.Type.TYPE_STRING), + label="subscription_id", + key=AttributeKey(name="subscription_id", type=AttributeKey.Type.TYPE_STRING), ), + Column( + label="check_id", + key=AttributeKey(name="check_id", type=AttributeKey.Type.TYPE_STRING), + ), + Column( + label="scheduled_check_time_us", + key=AttributeKey( + name="scheduled_check_time_us", type=AttributeKey.Type.TYPE_INT + ), + ), + Column( + label="check_duration_us", + key=AttributeKey(name="check_duration_us", type=AttributeKey.Type.TYPE_INT), + ), + Column( + label="check_status_reason", + key=AttributeKey(name="status_reason_type", type=AttributeKey.Type.TYPE_STRING), + ), + Column( + label="guid", + key=AttributeKey(name="guid", type=AttributeKey.Type.TYPE_STRING), + ), + ] + else: + return common_columns + [ Column( label="uptime_subscription_id", key=AttributeKey( @@ -128,61 +276,23 @@ def _make_eap_request( name="scheduled_check_time", type=AttributeKey.Type.TYPE_DOUBLE ), ), - Column( - label="timestamp", - key=AttributeKey(name="timestamp", type=AttributeKey.Type.TYPE_DOUBLE), - ), Column( label="duration_ms", key=AttributeKey(name="duration_ms", type=AttributeKey.Type.TYPE_INT), ), - Column( - label="region", - key=AttributeKey(name="region", type=AttributeKey.Type.TYPE_STRING), - ), - Column( - label="check_status", - key=AttributeKey(name="check_status", type=AttributeKey.Type.TYPE_STRING), - ), Column( label="check_status_reason", key=AttributeKey( name="check_status_reason", type=AttributeKey.Type.TYPE_STRING ), ), - Column( - label="trace_id", - key=AttributeKey(name="trace_id", type=AttributeKey.Type.TYPE_STRING), - ), - Column( - label="http_status_code", - key=AttributeKey(name="http_status_code", type=AttributeKey.Type.TYPE_INT), - ), - Column( - label="incident_status", - key=AttributeKey(name="incident_status", type=AttributeKey.Type.TYPE_INT), - ), - ], - order_by=[ - TraceItemTableRequest.OrderBy( - column=Column( - label="timestamp", - key=AttributeKey(name="timestamp", type=AttributeKey.Type.TYPE_INT), - ), - descending=True, - ) - ], - limit=limit, - page_token=PageToken(offset=offset), - ) - - rpc_response = snuba_rpc.table_rpc([rpc_request])[0] - return rpc_response + ] def _serialize_response( self, rpc_response: TraceItemTableResponse, uptime_subscription: ProjectUptimeSubscription, + trace_item_type: TraceItemType.ValueType, ) -> list[EapCheckEntrySerializerResponse]: """ Serialize the response from the EAP into a list of items per each uptime check. @@ -193,7 +303,9 @@ def _serialize_response( column_names = [cv.attribute_name for cv in column_values] entries: list[EapCheckEntry] = [ - self._transform_row(row_idx, column_values, column_names, uptime_subscription) + self._transform_row( + row_idx, column_values, column_names, uptime_subscription, trace_item_type + ) for row_idx in range(len(column_values[0].results)) ] @@ -205,37 +317,58 @@ def _transform_row( column_values: Any, column_names: list[str], uptime_subscription: ProjectUptimeSubscription, + trace_item_type: TraceItemType.ValueType, ) -> EapCheckEntry: row_dict: dict[str, AttributeValue] = { col_name: column_values[col_idx].results[row_idx] for col_idx, col_name in enumerate(column_names) } + if trace_item_type == TraceItemType.TRACE_ITEM_TYPE_UPTIME_RESULT: + uptime_check_id = row_dict["check_id"].val_str + scheduled_check_time = datetime.fromtimestamp( + row_dict.get("scheduled_check_time_us").val_int / 1_000_000 + ) + duration_val = row_dict.get("check_duration_us") + duration_ms = ( + (duration_val.val_int // 1000) if duration_val and not duration_val.is_null else 0 + ) + else: + uptime_check_id = row_dict["uptime_check_id"].val_str + scheduled_check_time = datetime.fromtimestamp( + row_dict["scheduled_check_time"].val_double + ) + duration_ms = row_dict["duration_ms"].val_int return EapCheckEntry( - uptime_check_id=row_dict["uptime_check_id"].val_str, + uptime_check_id=uptime_check_id, uptime_subscription_id=uptime_subscription.id, timestamp=datetime.fromtimestamp(row_dict["timestamp"].val_double), - scheduled_check_time=datetime.fromtimestamp( - row_dict["scheduled_check_time"].val_double - ), + scheduled_check_time=scheduled_check_time, check_status=cast(CheckStatus, row_dict["check_status"].val_str), - check_status_reason=( - None - if row_dict["check_status_reason"].val_str == "" - else cast(CheckStatusReasonType, row_dict["check_status_reason"].val_str) + check_status_reason=self._extract_check_status_reason( + row_dict.get("check_status_reason") ), http_status_code=( None if row_dict["http_status_code"].is_null else row_dict["http_status_code"].val_int ), - duration_ms=row_dict["duration_ms"].val_int, + duration_ms=duration_ms, trace_id=row_dict["trace_id"].val_str, incident_status=IncidentStatus(row_dict["incident_status"].val_int), - environment=row_dict["environment"].val_str, + environment=row_dict.get("environment", AttributeValue(val_str="")).val_str, region=row_dict["region"].val_str, ) + def _extract_check_status_reason( + self, check_status_reason_val: AttributeValue | None + ) -> CheckStatusReasonType | None: + """Extract check status reason from attribute value, handling null/empty cases.""" + if not check_status_reason_val or check_status_reason_val.is_null: + return None + val_str = check_status_reason_val.val_str + return cast(CheckStatusReasonType, val_str) if val_str != "" else None + def _get_date_cutoff_epoch_seconds(self) -> float | None: value = float(options.get("uptime.date_cutoff_epoch_seconds")) return None if value == 0 else value diff --git a/tests/sentry/uptime/endpoints/__init__.py b/tests/sentry/uptime/endpoints/__init__.py index 9f5efb219fb47a..54e05110b0769b 100644 --- a/tests/sentry/uptime/endpoints/__init__.py +++ b/tests/sentry/uptime/endpoints/__init__.py @@ -1,7 +1,7 @@ -from sentry.testutils.cases import APITestCase, UptimeTestCaseMixin +from sentry.testutils.cases import APITestCase -class UptimeAlertBaseEndpointTest(UptimeTestCaseMixin, APITestCase): +class UptimeAlertBaseEndpointTest(APITestCase): def setUp(self): super().setUp() self.login_as(user=self.user) diff --git a/tests/sentry/uptime/endpoints/test_base.py b/tests/sentry/uptime/endpoints/test_base.py new file mode 100644 index 00000000000000..dbe39bec4dca89 --- /dev/null +++ b/tests/sentry/uptime/endpoints/test_base.py @@ -0,0 +1,140 @@ +from datetime import datetime, timedelta, timezone +from uuid import uuid4 + +import pytest +import requests +from django.conf import settings +from google.protobuf.timestamp_pb2 import Timestamp +from sentry_protos.snuba.v1.request_common_pb2 import TraceItemType +from sentry_protos.snuba.v1.trace_item_pb2 import AnyValue, TraceItem + +from sentry.testutils.cases import BaseTestCase, scalar_to_any_value +from sentry.testutils.skips import requires_snuba + +MOCK_DATETIME = datetime.now(tz=timezone.utc) - timedelta(days=1) + + +@pytest.mark.snuba +@requires_snuba +@pytest.mark.usefixtures("reset_snuba") +class UptimeResultEAPTestCase(BaseTestCase): + """Test case for creating and storing EAP uptime results.""" + + def create_eap_uptime_result( + self, + *, + organization=None, + project=None, + scheduled_check_time=None, + trace_id=None, + guid=None, + subscription_id=None, + check_id=None, + check_status="success", + incident_status=None, + region="default", + http_status_code=200, + request_type="GET", + request_url="https://example.com", + request_sequence=0, + check_duration_us=150000, + request_duration_us=125000, + dns_lookup_duration_us=None, + tcp_connection_duration_us=None, + tls_handshake_duration_us=None, + time_to_first_byte_duration_us=None, + send_request_duration_us=None, + receive_response_duration_us=None, + request_body_size_bytes=0, + response_body_size_bytes=1024, + status_reason_type=None, + status_reason_description=None, + ) -> TraceItem: + if organization is None: + organization = self.organization + if project is None: + project = self.project + if scheduled_check_time is None: + scheduled_check_time = datetime.now(timezone.utc) - timedelta(minutes=1) + if trace_id is None: + trace_id = uuid4().hex + if guid is None: + guid = uuid4().hex + if subscription_id is None: + subscription_id = f"sub-{uuid4().hex[:8]}" + + attributes_data = { + "guid": guid, + "subscription_id": subscription_id, + "check_status": check_status, + "region": region, + "http_status_code": http_status_code, + "request_type": request_type, + "request_url": request_url, + "request_sequence": request_sequence, + "check_duration_us": check_duration_us, + "request_duration_us": request_duration_us, + "request_body_size_bytes": request_body_size_bytes, + "response_body_size_bytes": response_body_size_bytes, + } + + if check_id is not None: + attributes_data["check_id"] = check_id + + timing_fields = { + "dns_lookup_duration_us": dns_lookup_duration_us, + "tcp_connection_duration_us": tcp_connection_duration_us, + "tls_handshake_duration_us": tls_handshake_duration_us, + "time_to_first_byte_duration_us": time_to_first_byte_duration_us, + "send_request_duration_us": send_request_duration_us, + "receive_response_duration_us": receive_response_duration_us, + } + for field, value in timing_fields.items(): + if value is not None: + attributes_data[field] = value + + if status_reason_type is not None: + attributes_data["status_reason_type"] = status_reason_type + if status_reason_description is not None: + attributes_data["status_reason_description"] = status_reason_description + + if incident_status is not None: + attributes_data["incident_status"] = incident_status.value + + attributes_proto = {} + for k, v in attributes_data.items(): + if v is not None: + attributes_proto[k] = scalar_to_any_value(v) + + timestamp_proto = Timestamp() + timestamp_proto.FromDatetime(scheduled_check_time) + + attributes_proto["scheduled_check_time_us"] = AnyValue( + int_value=int(scheduled_check_time.timestamp() * 1_000_000) + ) + attributes_proto["actual_check_time_us"] = AnyValue( + int_value=int(scheduled_check_time.timestamp() * 1_000_000) + 5000 + ) + + return TraceItem( + organization_id=organization.id, + project_id=project.id, + item_type=TraceItemType.TRACE_ITEM_TYPE_UPTIME_RESULT, + timestamp=timestamp_proto, + trace_id=trace_id, + item_id=uuid4().bytes, + received=timestamp_proto, + retention_days=90, + attributes=attributes_proto, + ) + + def store_uptime_results(self, uptime_results): + """Store uptime results in the EAP dataset.""" + files = { + f"uptime_{i}": result.SerializeToString() for i, result in enumerate(uptime_results) + } + response = requests.post( + settings.SENTRY_SNUBA + "/tests/entities/eap_items/insert_bytes", + files=files, + ) + assert response.status_code == 200 diff --git a/tests/sentry/uptime/endpoints/test_organization_uptime_stats.py b/tests/sentry/uptime/endpoints/test_organization_uptime_stats.py index 21f298ee847d78..2ab76b007792cf 100644 --- a/tests/sentry/uptime/endpoints/test_organization_uptime_stats.py +++ b/tests/sentry/uptime/endpoints/test_organization_uptime_stats.py @@ -7,9 +7,7 @@ from sentry.uptime.endpoints.organization_uptime_stats import add_extra_buckets_for_epoch_cutoff from sentry.uptime.types import IncidentStatus from sentry.utils import json -from tests.snuba.api.endpoints.test_organization_events_uptime_results import ( - UptimeResultEAPTestCase, -) +from tests.sentry.uptime.endpoints.test_base import UptimeResultEAPTestCase MOCK_DATETIME = datetime.now(tz=timezone.utc) - timedelta(days=1) @@ -331,7 +329,7 @@ def store_uptime_data( incident_status=IncidentStatus.NO_INCIDENT, scheduled_check_time=None, ): - uptime_result = self.create_uptime_result( + uptime_result = self.create_eap_uptime_result( subscription_id=str(uuid.UUID(subscription_id)), guid=str(uuid.UUID(subscription_id)), request_url="https://santry.io", diff --git a/tests/sentry/uptime/endpoints/test_project_uptime_alert_check_index.py b/tests/sentry/uptime/endpoints/test_project_uptime_alert_check_index.py index 0b3769dbcbe0a1..3532d1caccf722 100644 --- a/tests/sentry/uptime/endpoints/test_project_uptime_alert_check_index.py +++ b/tests/sentry/uptime/endpoints/test_project_uptime_alert_check_index.py @@ -1,29 +1,24 @@ import uuid -from datetime import datetime, timedelta, timezone +from abc import abstractmethod +from datetime import datetime, timedelta from sentry.testutils.cases import UptimeCheckSnubaTestCase -from sentry.testutils.helpers.datetime import freeze_time +from sentry.testutils.helpers.datetime import before_now, freeze_time from sentry.testutils.helpers.options import override_options from sentry.testutils.silo import region_silo_test from sentry.uptime.types import IncidentStatus from sentry.utils.cursors import Cursor -from tests.sentry.uptime.endpoints.test_organization_uptime_alert_index import ( - OrganizationUptimeAlertIndexBaseEndpointTest, -) +from tests.sentry.uptime.endpoints import UptimeAlertBaseEndpointTest +from tests.sentry.uptime.endpoints.test_base import MOCK_DATETIME, UptimeResultEAPTestCase -MOCK_DATETIME = datetime.now(tz=timezone.utc) - timedelta(days=1) - -@region_silo_test -@freeze_time(MOCK_DATETIME) -class ProjectUptimeAlertCheckIndexEndpoint( - OrganizationUptimeAlertIndexBaseEndpointTest, UptimeCheckSnubaTestCase -): +class ProjectUptimeAlertCheckIndexBaseTest(UptimeAlertBaseEndpointTest): + __test__ = False endpoint = "sentry-api-0-project-uptime-alert-checks" + features: dict[str, bool] = {} def setUp(self): super().setUp() - self.login_as(user=self.user) self.subscription_id = uuid.uuid4().hex self.subscription = self.create_uptime_subscription( url="https://santry.io", subscription_id=self.subscription_id @@ -32,141 +27,239 @@ def setUp(self): uptime_subscription=self.subscription ) - self.store_snuba_uptime_check(subscription_id=self.subscription_id, check_status="success") - self.store_snuba_uptime_check(subscription_id=self.subscription_id, check_status="failure") - self.store_snuba_uptime_check(subscription_id=self.subscription_id, check_status="success") - self.store_snuba_uptime_check( - subscription_id=self.subscription_id, - check_status="failure", - http_status=None, - ) - self.store_snuba_uptime_check(subscription_id=self.subscription_id, check_status="success") - self.store_snuba_uptime_check( - subscription_id=self.subscription_id, - check_status="failure", - incident_status=IncidentStatus.IN_INCIDENT, - ) + test_scenarios: list[dict] = [ + {"check_status": "success", "scheduled_check_time": before_now(minutes=10)}, + {"check_status": "failure", "scheduled_check_time": before_now(minutes=9)}, + {"check_status": "success", "scheduled_check_time": before_now(minutes=8)}, + { + "check_status": "failure", + "http_status": None, + "scheduled_check_time": before_now(minutes=7), + }, + {"check_status": "success", "scheduled_check_time": before_now(minutes=6)}, + { + "check_status": "failure", + "incident_status": IncidentStatus.IN_INCIDENT, + "scheduled_check_time": before_now(minutes=5), + }, + ] + + for scenario in test_scenarios: + kwargs = { + "incident_status": scenario.get("incident_status", IncidentStatus.NO_INCIDENT), + "scheduled_check_time": scenario["scheduled_check_time"], + } + if "http_status" in scenario: + kwargs["http_status"] = scenario["http_status"] + + self.store_uptime_data(self.subscription_id, scenario["check_status"], **kwargs) + + @abstractmethod + def store_uptime_data( + self, + subscription_id: str, + check_status: str, + incident_status: IncidentStatus = IncidentStatus.NO_INCIDENT, + scheduled_check_time: datetime | None = None, + http_status: int | None = None, + ) -> None: + """Store a single uptime data row. Must be implemented by subclasses.""" + raise NotImplementedError("Subclasses must implement store_uptime_data") def test_get(self): - response = self.get_success_response( - self.organization.slug, - self.project.slug, - self.project_uptime_subscription.id, - ) - assert response.data is not None - assert len(response.data) == 6 - most_recent = response.data[0] - for key in [ - "uptimeSubscriptionId", - "uptimeCheckId", - "scheduledCheckTime", - "timestamp", - "durationMs", - "region", - "regionName", - "checkStatus", - "checkStatusReason", - "traceId", - "httpStatusCode", - "incidentStatus", - ]: - assert key in most_recent, f"{key} not in {most_recent}" - - assert most_recent["uptimeCheckId"] - assert most_recent["uptimeSubscriptionId"] == self.project_uptime_subscription.id - assert most_recent["regionName"] == "Default Region" - assert most_recent["checkStatusReason"] == "failure" - - assert any(v for v in response.data if v["checkStatus"] == "failure_incident") - assert any(v for v in response.data if v["checkStatusReason"] is None) - assert any(v for v in response.data if v["httpStatusCode"] is None) + with self.feature(self.features): + response = self.get_success_response( + self.organization.slug, + self.project.slug, + self.project_uptime_subscription.id, + ) + assert response.data is not None + assert len(response.data) == 6 + most_recent = response.data[0] + for key in [ + "uptimeSubscriptionId", + "uptimeCheckId", + "scheduledCheckTime", + "timestamp", + "durationMs", + "region", + "regionName", + "checkStatus", + "checkStatusReason", + "traceId", + "httpStatusCode", + "incidentStatus", + ]: + assert key in most_recent, f"{key} not in {most_recent}" + + assert most_recent["uptimeCheckId"] + assert most_recent["uptimeSubscriptionId"] == self.project_uptime_subscription.id + assert most_recent["regionName"] == "Default Region" + assert most_recent["checkStatusReason"] == "failure" + + assert any(v for v in response.data if v["checkStatus"] == "failure_incident") + assert any(v for v in response.data if v["checkStatusReason"] is None) + assert any(v for v in response.data if v["httpStatusCode"] is None) def test_datetime_range(self): - # all of our checks are stored in the last 5 minutes, so query for 10 days ago and expect 0 results - response = self.get_success_response( - self.organization.slug, - self.project.slug, - self.project_uptime_subscription.id, - qs_params={ - "start": datetime.now() - timedelta(days=10), - "end": datetime.now() - timedelta(days=9), - }, - ) - assert len(response.data) == 0 - # query for the last 3 days and expect 6 results - response = self.get_success_response( - self.organization.slug, - self.project.slug, - self.project_uptime_subscription.id, - qs_params={ - "start": datetime.now() - timedelta(days=3), - "end": datetime.now(), - }, - ) - assert len(response.data) == 6 + with self.feature(self.features): + # all of our checks are stored in the last 5 minutes, so query for 10 days ago and expect 0 results + response = self.get_success_response( + self.organization.slug, + self.project.slug, + self.project_uptime_subscription.id, + qs_params={ + "start": datetime.now() - timedelta(days=10), + "end": datetime.now() - timedelta(days=9), + }, + ) + assert len(response.data) == 0 + # query for the last 3 days and expect 6 results + response = self.get_success_response( + self.organization.slug, + self.project.slug, + self.project_uptime_subscription.id, + qs_params={ + "start": datetime.now() - timedelta(days=3), + "end": datetime.now(), + }, + ) + assert len(response.data) == 6 # TODO: fix this test once snuba is fixed def test_get_paginated(self): - response = self.get_success_response( - self.organization.slug, - self.project.slug, - self.project_uptime_subscription.id, - qs_params={"cursor": Cursor(0, 0), "per_page": 2}, - ) - assert response.data is not None - assert len(response.data) == 2 - - response = self.get_success_response( - self.organization.slug, - self.project.slug, - self.project_uptime_subscription.id, - qs_params={"cursor": Cursor(0, 2), "per_page": 2}, - ) - assert response.data is not None - assert len(response.data) == 2 - - response = self.get_success_response( - self.organization.slug, - self.project.slug, - self.project_uptime_subscription.id, - qs_params={"cursor": Cursor(0, 4), "per_page": 2}, - ) - assert response.data is not None - assert len(response.data) == 2 - - response = self.get_success_response( - self.organization.slug, - self.project.slug, - self.project_uptime_subscription.id, - qs_params={"cursor": Cursor(0, 20), "per_page": 2}, - ) - assert response.data is not None - assert len(response.data) == 0 + with self.feature(self.features): + response = self.get_success_response( + self.organization.slug, + self.project.slug, + self.project_uptime_subscription.id, + qs_params={"cursor": Cursor(0, 0), "per_page": 2}, + ) + assert response.data is not None + assert len(response.data) == 2 + + response = self.get_success_response( + self.organization.slug, + self.project.slug, + self.project_uptime_subscription.id, + qs_params={"cursor": Cursor(0, 2), "per_page": 2}, + ) + assert response.data is not None + assert len(response.data) == 2 + + response = self.get_success_response( + self.organization.slug, + self.project.slug, + self.project_uptime_subscription.id, + qs_params={"cursor": Cursor(0, 4), "per_page": 2}, + ) + assert response.data is not None + assert len(response.data) == 2 + + response = self.get_success_response( + self.organization.slug, + self.project.slug, + self.project_uptime_subscription.id, + qs_params={"cursor": Cursor(0, 20), "per_page": 2}, + ) + assert response.data is not None + assert len(response.data) == 0 @override_options( {"uptime.date_cutoff_epoch_seconds": (MOCK_DATETIME - timedelta(seconds=1)).timestamp()} ) def test_get_with_date_cutoff(self): - response = self.get_success_response( - self.organization.slug, - self.project.slug, - self.project_uptime_subscription.id, - ) - assert response.data is not None - assert len(response.data) == 0 + with self.feature(self.features): + response = self.get_success_response( + self.organization.slug, + self.project.slug, + self.project_uptime_subscription.id, + ) + assert response.data is not None + assert len(response.data) == 0 def test_get_with_none_subscription_id(self): - # Create a subscription with None subscription_id - subscription = self.create_uptime_subscription( - url="https://example.com", subscription_id=None - ) - project_uptime_subscription = self.create_project_uptime_subscription( - uptime_subscription=subscription - ) + with self.feature(self.features): + # Create a subscription with None subscription_id + subscription = self.create_uptime_subscription( + url="https://example.com", subscription_id=None + ) + project_uptime_subscription = self.create_project_uptime_subscription( + uptime_subscription=subscription + ) + + response = self.get_success_response( + self.organization.slug, + self.project.slug, + project_uptime_subscription.id, + ) + assert response.data == [] + + +@region_silo_test +@freeze_time(MOCK_DATETIME) +class ProjectUptimeAlertCheckIndexEndpoint( + ProjectUptimeAlertCheckIndexBaseTest, UptimeCheckSnubaTestCase +): + __test__ = True + features = { + "organizations:uptime-eap-enabled": False, + "organizations:uptime-eap-uptime-results-query": False, + } - response = self.get_success_response( - self.organization.slug, - self.project.slug, - project_uptime_subscription.id, + def store_uptime_data( + self, + subscription_id, + check_status, + incident_status=IncidentStatus.NO_INCIDENT, + scheduled_check_time=None, + http_status=None, + ): + # if scheduled_check_time is None: + # scheduled_check_time = datetime.now(timezone.utc) - timedelta(hours=12) + # + self.store_snuba_uptime_check( + subscription_id=subscription_id, + check_status=check_status, + incident_status=incident_status, + scheduled_check_time=scheduled_check_time, + http_status=http_status, + region="default", ) - assert response.data == [] + + +@region_silo_test +@freeze_time(MOCK_DATETIME) +class ProjectUptimeAlertCheckIndexEndpointWithEAPTests( + ProjectUptimeAlertCheckIndexBaseTest, UptimeResultEAPTestCase +): + __test__ = True + + def setUp(self): + self.features = { + "organizations:uptime-eap-enabled": True, + "organizations:uptime-eap-uptime-results-query": True, + } + super().setUp() + + def store_uptime_data( + self, + subscription_id, + check_status, + incident_status=IncidentStatus.NO_INCIDENT, + scheduled_check_time=None, + http_status=None, + ): + create_params = { + "subscription_id": str(uuid.UUID(subscription_id)), + "guid": str(uuid.UUID(subscription_id)), + "check_id": str(uuid.uuid4()), + "check_status": check_status, + "incident_status": incident_status, + "scheduled_check_time": scheduled_check_time, + "status_reason_type": "failure" if check_status == "failure" else None, + "region": "default", + "http_status_code": http_status, + } + uptime_result = self.create_eap_uptime_result(**create_params) + self.store_uptime_results([uptime_result]) diff --git a/tests/snuba/api/endpoints/test_organization_events_uptime_results.py b/tests/snuba/api/endpoints/test_organization_events_uptime_results.py index ab8f83727f7002..8f6dc7ee53374d 100644 --- a/tests/snuba/api/endpoints/test_organization_events_uptime_results.py +++ b/tests/snuba/api/endpoints/test_organization_events_uptime_results.py @@ -1,136 +1,12 @@ -from datetime import datetime, timedelta, timezone +from datetime import timedelta from uuid import uuid4 import pytest -import requests -from django.conf import settings -from google.protobuf.timestamp_pb2 import Timestamp -from sentry_protos.snuba.v1.request_common_pb2 import TraceItemType -from sentry_protos.snuba.v1.trace_item_pb2 import AnyValue, TraceItem -from sentry.testutils.cases import BaseTestCase, scalar_to_any_value +from tests.sentry.uptime.endpoints.test_base import UptimeResultEAPTestCase from tests.snuba.api.endpoints.test_organization_events import OrganizationEventsEndpointTestBase -class UptimeResultEAPTestCase(BaseTestCase): - def create_uptime_result( - self, - *, - organization=None, - project=None, - scheduled_check_time=None, - trace_id=None, - guid=None, - subscription_id=None, - check_id=None, - check_status="success", - incident_status=None, - region="us-west", - http_status_code=200, - request_type="GET", - request_url="https://example.com", - request_sequence=0, - check_duration_us=150000, - request_duration_us=125000, - dns_lookup_duration_us=None, - tcp_connection_duration_us=None, - tls_handshake_duration_us=None, - time_to_first_byte_duration_us=None, - send_request_duration_us=None, - receive_response_duration_us=None, - request_body_size_bytes=0, - response_body_size_bytes=1024, - status_reason_type=None, - status_reason_description=None, - ) -> TraceItem: - if organization is None: - organization = self.organization - if project is None: - project = self.project - if scheduled_check_time is None: - scheduled_check_time = datetime.now(timezone.utc) - timedelta(minutes=1) - if trace_id is None: - trace_id = uuid4().hex - if guid is None: - guid = uuid4().hex - if subscription_id is None: - subscription_id = f"sub-{uuid4().hex[:8]}" - - attributes_data = { - "guid": guid, - "subscription_id": subscription_id, - "check_status": check_status, - "region": region, - "http_status_code": http_status_code, - "request_type": request_type, - "request_url": request_url, - "request_sequence": request_sequence, - "check_duration_us": check_duration_us, - "request_duration_us": request_duration_us, - "request_body_size_bytes": request_body_size_bytes, - "response_body_size_bytes": response_body_size_bytes, - } - - if check_id is not None: - attributes_data["check_id"] = check_id - - timing_fields = { - "dns_lookup_duration_us": dns_lookup_duration_us, - "tcp_connection_duration_us": tcp_connection_duration_us, - "tls_handshake_duration_us": tls_handshake_duration_us, - "time_to_first_byte_duration_us": time_to_first_byte_duration_us, - "send_request_duration_us": send_request_duration_us, - "receive_response_duration_us": receive_response_duration_us, - } - for field, value in timing_fields.items(): - if value is not None: - attributes_data[field] = value - - if status_reason_type is not None: - attributes_data["status_reason_type"] = status_reason_type - if status_reason_description is not None: - attributes_data["status_reason_description"] = status_reason_description - - if incident_status is not None: - attributes_data["incident_status"] = incident_status.value - - attributes_proto = {} - for k, v in attributes_data.items(): - attributes_proto[k] = scalar_to_any_value(v) - - timestamp_proto = Timestamp() - timestamp_proto.FromDatetime(scheduled_check_time) - - attributes_proto["scheduled_check_time_us"] = AnyValue( - int_value=int(scheduled_check_time.timestamp() * 1_000_000) - ) - attributes_proto["actual_check_time_us"] = AnyValue( - int_value=int(scheduled_check_time.timestamp() * 1_000_000) + 5000 - ) - - return TraceItem( - organization_id=organization.id, - project_id=project.id, - item_type=TraceItemType.TRACE_ITEM_TYPE_UPTIME_RESULT, - timestamp=timestamp_proto, - trace_id=trace_id, - item_id=uuid4().bytes, - received=timestamp_proto, - retention_days=90, - attributes=attributes_proto, - ) - - def store_uptime_results(self, uptime_results): - files = { - f"uptime_{i}": result.SerializeToString() for i, result in enumerate(uptime_results) - } - response = requests.post( - settings.SENTRY_SNUBA + "/tests/entities/eap_items/insert_bytes", - files=files, - ) - assert response.status_code == 200 - - class OrganizationEventsUptimeResultsEndpointTest( OrganizationEventsEndpointTestBase, UptimeResultEAPTestCase ): @@ -148,13 +24,13 @@ def build_expected_result(self, **kwargs): @pytest.mark.querybuilder def test_simple_uptime_query(self): results = [ - self.create_uptime_result( + self.create_eap_uptime_result( check_status="success", http_status_code=200, region="us-east-1", scheduled_check_time=self.ten_mins_ago, ), - self.create_uptime_result( + self.create_eap_uptime_result( check_status="failure", http_status_code=500, region="us-west-2", @@ -189,17 +65,17 @@ def test_simple_uptime_query(self): @pytest.mark.querybuilder def test_status_filter_query(self): results = [ - self.create_uptime_result( + self.create_eap_uptime_result( check_status="success", http_status_code=200, scheduled_check_time=self.ten_mins_ago, ), - self.create_uptime_result( + self.create_eap_uptime_result( check_status="failure", http_status_code=500, scheduled_check_time=self.nine_mins_ago, ), - self.create_uptime_result( + self.create_eap_uptime_result( check_status="success", http_status_code=201, scheduled_check_time=self.nine_mins_ago, @@ -228,7 +104,7 @@ def test_status_filter_query(self): @pytest.mark.querybuilder def test_timing_fields_query(self): results = [ - self.create_uptime_result( + self.create_eap_uptime_result( check_status="success", check_duration_us=150000, request_duration_us=125000, @@ -236,7 +112,7 @@ def test_timing_fields_query(self): tcp_connection_duration_us=15000, scheduled_check_time=self.ten_mins_ago, ), - self.create_uptime_result( + self.create_eap_uptime_result( check_status="failure", check_duration_us=30000000, request_duration_us=30000000, @@ -286,21 +162,21 @@ def test_timing_fields_query(self): @pytest.mark.querybuilder def test_cross_level_filter_query(self): results = [ - self.create_uptime_result( + self.create_eap_uptime_result( check_status="success", http_status_code=200, dns_lookup_duration_us=15000, region="us-east-1", scheduled_check_time=self.ten_mins_ago, ), - self.create_uptime_result( + self.create_eap_uptime_result( check_status="failure", http_status_code=504, dns_lookup_duration_us=150000, region="us-east-1", scheduled_check_time=self.nine_mins_ago, ), - self.create_uptime_result( + self.create_eap_uptime_result( check_status="failure", http_status_code=500, dns_lookup_duration_us=20000, @@ -339,7 +215,7 @@ def test_redirect_sequence_query(self): trace_id = uuid4().hex results = [ - self.create_uptime_result( + self.create_eap_uptime_result( check_id=check_id, request_sequence=0, check_status="success", @@ -348,7 +224,7 @@ def test_redirect_sequence_query(self): trace_id=trace_id, scheduled_check_time=self.ten_mins_ago, ), - self.create_uptime_result( + self.create_eap_uptime_result( check_id=check_id, request_sequence=1, check_status="success", @@ -357,7 +233,7 @@ def test_redirect_sequence_query(self): trace_id=trace_id, scheduled_check_time=self.ten_mins_ago, ), - self.create_uptime_result( + self.create_eap_uptime_result( check_id=uuid4().hex, request_sequence=0, check_status="success", @@ -393,25 +269,25 @@ def test_redirect_sequence_query(self): @pytest.mark.querybuilder def test_region_and_status_combination(self): results = [ - self.create_uptime_result( + self.create_eap_uptime_result( check_status="success", region="us-east-1", http_status_code=200, scheduled_check_time=self.ten_mins_ago, ), - self.create_uptime_result( + self.create_eap_uptime_result( check_status="failure", region="us-east-1", http_status_code=500, scheduled_check_time=self.nine_mins_ago, ), - self.create_uptime_result( + self.create_eap_uptime_result( check_status="success", region="us-west-2", http_status_code=200, scheduled_check_time=self.nine_mins_ago, ), - self.create_uptime_result( + self.create_eap_uptime_result( check_status="failure", region="us-west-2", http_status_code=503, @@ -446,17 +322,17 @@ def test_timestamp_precision(self): """Test that timestamp precision is maintained in queries.""" base_time = self.ten_mins_ago results = [ - self.create_uptime_result( + self.create_eap_uptime_result( check_status="success", guid="check-1", scheduled_check_time=base_time, ), - self.create_uptime_result( + self.create_eap_uptime_result( check_status="success", guid="check-2", scheduled_check_time=base_time + timedelta(microseconds=1), ), - self.create_uptime_result( + self.create_eap_uptime_result( check_status="success", guid="check-3", scheduled_check_time=base_time + timedelta(microseconds=2), From 5f21d41a52aee7fe1691a79cea8216d93f23703c Mon Sep 17 00:00:00 2001 From: Dan Fuller Date: Wed, 2 Jul 2025 12:07:08 -0700 Subject: [PATCH 2/2] mypy --- .../uptime/endpoints/project_uptime_alert_checks_index.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/sentry/uptime/endpoints/project_uptime_alert_checks_index.py b/src/sentry/uptime/endpoints/project_uptime_alert_checks_index.py index 5de6ac5d1aa2f2..b8e281b50936ee 100644 --- a/src/sentry/uptime/endpoints/project_uptime_alert_checks_index.py +++ b/src/sentry/uptime/endpoints/project_uptime_alert_checks_index.py @@ -326,7 +326,7 @@ def _transform_row( if trace_item_type == TraceItemType.TRACE_ITEM_TYPE_UPTIME_RESULT: uptime_check_id = row_dict["check_id"].val_str scheduled_check_time = datetime.fromtimestamp( - row_dict.get("scheduled_check_time_us").val_int / 1_000_000 + row_dict["scheduled_check_time_us"].val_int / 1_000_000 ) duration_val = row_dict.get("check_duration_us") duration_ms = (