diff --git a/src/sentry/features/temporary.py b/src/sentry/features/temporary.py index 220d2c98802d53..900726847ac5b3 100644 --- a/src/sentry/features/temporary.py +++ b/src/sentry/features/temporary.py @@ -453,6 +453,8 @@ def register_temporary_features(manager: FeatureManager): manager.add("organizations:uptime-detector-create-issues", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) # Enable sending uptime results to EAP (Events Analytics Platform) manager.add("organizations:uptime-eap-results", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) + # Enable querying uptime data from EAP uptime_results instead of uptime_checks + manager.add("organizations:uptime-eap-uptime-results-query", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) manager.add("organizations:use-metrics-layer", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=False) manager.add("organizations:user-feedback-ai-summaries", OrganizationFeature, FeatureHandlerStrategy.FLAGPOLE, api_expose=True) # Enable auto spam classification at User Feedback ingest time diff --git a/src/sentry/uptime/endpoints/organization_uptime_stats.py b/src/sentry/uptime/endpoints/organization_uptime_stats.py index 4d438d255b6bd0..2b6f6f5d566b3e 100644 --- a/src/sentry/uptime/endpoints/organization_uptime_stats.py +++ b/src/sentry/uptime/endpoints/organization_uptime_stats.py @@ -16,9 +16,13 @@ Function, StrArray, ) -from sentry_protos.snuba.v1.trace_item_filter_pb2 import ComparisonFilter, TraceItemFilter +from sentry_protos.snuba.v1.trace_item_filter_pb2 import ( + AndFilter, + ComparisonFilter, + TraceItemFilter, +) -from sentry import options +from sentry import features, options from sentry.api.api_owners import ApiOwner from sentry.api.api_publish_status import ApiPublishStatus from sentry.api.base import StatsArgsDict, StatsMixin, region_silo_endpoint @@ -74,15 +78,35 @@ def get(self, request: Request, organization: Organization) -> Response: ) try: - eap_response = self._make_eap_request( - organization, projects, subscription_ids, timerange_args, epoch_cutoff - ) + if features.has("organizations:uptime-eap-uptime-results-query", organization): + eap_response = self._make_eap_request( + organization, + projects, + subscription_ids, + timerange_args, + epoch_cutoff, + TraceItemType.TRACE_ITEM_TYPE_UPTIME_RESULT, + "guid", + "subscription_id", + include_request_sequence_filter=True, + ) + formatted_response = self._format_response(eap_response, "subscription_id") + else: + eap_response = self._make_eap_request( + organization, + projects, + subscription_ids, + timerange_args, + epoch_cutoff, + TraceItemType.TRACE_ITEM_TYPE_UPTIME_CHECK, + "uptime_check_id", + "uptime_subscription_id", + ) + formatted_response = self._format_response(eap_response, "uptime_subscription_id") except Exception: logger.exception("Error making EAP RPC request for uptime check stats") return self.respond("error making request", status=400) - formatted_response = self._format_response(eap_response, epoch_cutoff) - # Map the response back to project uptime subscription ids mapped_response = self._map_response_to_project_uptime_subscription_ids( subscription_id_to_project_uptime_subscription_id, formatted_response @@ -127,7 +151,7 @@ def _authorize_and_map_project_uptime_subscription_ids( } validated_subscription_ids = [ - project_uptime_subscription[1] + str(uuid.UUID(project_uptime_subscription[1])) for project_uptime_subscription in project_uptime_subscriptions if project_uptime_subscription[1] is not None ] @@ -141,6 +165,10 @@ def _make_eap_request( subscription_ids: list[str], timerange_args: StatsArgsDict, epoch_cutoff: datetime.datetime | None, + trace_item_type: TraceItemType.ValueType, + aggregation_key: str, + subscription_key: str, + include_request_sequence_filter: bool = False, ) -> TimeSeriesResponse: eap_query_start = timerange_args["start"] @@ -151,11 +179,40 @@ def _make_eap_request( start_timestamp.FromDatetime(eap_query_start) end_timestamp = Timestamp() end_timestamp.FromDatetime(timerange_args["end"]) + + subscription_filter = TraceItemFilter( + comparison_filter=ComparisonFilter( + key=AttributeKey( + name=subscription_key, + type=AttributeKey.Type.TYPE_STRING, + ), + op=ComparisonFilter.OP_IN, + value=AttributeValue(val_str_array=StrArray(values=subscription_ids)), + ) + ) + + if include_request_sequence_filter: + request_sequence_filter = TraceItemFilter( + comparison_filter=ComparisonFilter( + key=AttributeKey( + name="request_sequence", + type=AttributeKey.Type.TYPE_INT, + ), + op=ComparisonFilter.OP_EQUALS, + value=AttributeValue(val_int=0), + ) + ) + query_filter = TraceItemFilter( + and_filter=AndFilter(filters=[subscription_filter, request_sequence_filter]) + ) + else: + query_filter = subscription_filter + request = TimeSeriesRequest( meta=RequestMeta( organization_id=organization.id, project_ids=[project.id for project in projects], - trace_item_type=TraceItemType.TRACE_ITEM_TYPE_UPTIME_CHECK, + trace_item_type=trace_item_type, start_timestamp=start_timestamp, end_timestamp=end_timestamp, ), @@ -163,7 +220,7 @@ def _make_eap_request( AttributeAggregation( aggregate=Function.FUNCTION_COUNT, key=AttributeKey( - name="uptime_check_id", + name=aggregation_key, type=AttributeKey.Type.TYPE_STRING, ), label="count()", @@ -171,7 +228,7 @@ def _make_eap_request( ], group_by=[ AttributeKey( - name="uptime_subscription_id", + name=subscription_key, type=AttributeKey.Type.TYPE_STRING, ), AttributeKey( @@ -184,32 +241,28 @@ def _make_eap_request( ), ], granularity_secs=timerange_args["rollup"], - filter=TraceItemFilter( - comparison_filter=ComparisonFilter( - key=AttributeKey( - name="uptime_subscription_id", - type=AttributeKey.Type.TYPE_STRING, - ), - op=ComparisonFilter.OP_IN, - value=AttributeValue(val_str_array=StrArray(values=subscription_ids)), - ) - ), + filter=query_filter, ) responses = timeseries_rpc([request]) assert len(responses) == 1 return responses[0] def _format_response( - self, response: TimeSeriesResponse, epoch_cutoff: datetime.datetime | None = None + self, response: TimeSeriesResponse, subscription_key: str ) -> dict[str, list[tuple[int, dict[str, int]]]]: """ Formats the response from the EAP RPC request into a dictionary of subscription ids to a list of tuples of timestamps and a dictionary of check statuses to counts. + + Args: + response: The EAP RPC TimeSeriesResponse + subscription_key: The attribute name for subscription ID ("uptime_subscription_id" or "subscription_id") + epoch_cutoff: Optional cutoff timestamp for data """ formatted_data: dict[str, dict[int, dict[str, int]]] = {} for timeseries in response.result_timeseries: - subscription_id = timeseries.group_by_attributes["uptime_subscription_id"] + subscription_id = timeseries.group_by_attributes[subscription_key] status = timeseries.group_by_attributes["check_status"] incident_status = timeseries.group_by_attributes["incident_status"] diff --git a/tests/sentry/uptime/endpoints/test_organization_uptime_stats.py b/tests/sentry/uptime/endpoints/test_organization_uptime_stats.py index 4c0549bd540394..21f298ee847d78 100644 --- a/tests/sentry/uptime/endpoints/test_organization_uptime_stats.py +++ b/tests/sentry/uptime/endpoints/test_organization_uptime_stats.py @@ -1,24 +1,23 @@ import uuid from datetime import datetime, timedelta, timezone -from sentry.testutils.cases import UptimeCheckSnubaTestCase +from sentry.testutils.cases import APITestCase, UptimeCheckSnubaTestCase from sentry.testutils.helpers.datetime import freeze_time from sentry.testutils.helpers.options import override_options from sentry.uptime.endpoints.organization_uptime_stats import add_extra_buckets_for_epoch_cutoff from sentry.uptime.types import IncidentStatus from sentry.utils import json -from tests.sentry.uptime.endpoints.test_organization_uptime_alert_index import ( - OrganizationUptimeAlertIndexBaseEndpointTest, +from tests.snuba.api.endpoints.test_organization_events_uptime_results import ( + UptimeResultEAPTestCase, ) MOCK_DATETIME = datetime.now(tz=timezone.utc) - timedelta(days=1) -@freeze_time(MOCK_DATETIME) -class OrganizationUptimeCheckIndexEndpointTest( - OrganizationUptimeAlertIndexBaseEndpointTest, UptimeCheckSnubaTestCase -): +class OrganizationUptimeStatsBaseTest(APITestCase): + __test__ = False endpoint = "sentry-api-0-organization-uptime-stats" + features: dict[str, bool] = {} def setUp(self): super().setUp() @@ -30,46 +29,57 @@ def setUp(self): self.project_uptime_subscription = self.create_project_uptime_subscription( uptime_subscription=self.subscription ) + scenarios: list[dict] = [ + {"check_status": "success"}, + {"check_status": "failure"}, + {"check_status": "success"}, + {"check_status": "failure"}, + {"check_status": "success"}, + {"check_status": "failure"}, + {"check_status": "failure"}, + {"check_status": "failure", "incident_status": IncidentStatus.IN_INCIDENT}, + ] - self.store_snuba_uptime_check(subscription_id=self.subscription_id, check_status="success") - self.store_snuba_uptime_check(subscription_id=self.subscription_id, check_status="failure") - self.store_snuba_uptime_check(subscription_id=self.subscription_id, check_status="success") - self.store_snuba_uptime_check(subscription_id=self.subscription_id, check_status="failure") - self.store_snuba_uptime_check(subscription_id=self.subscription_id, check_status="success") - self.store_snuba_uptime_check(subscription_id=self.subscription_id, check_status="failure") - self.store_snuba_uptime_check(subscription_id=self.subscription_id, check_status="failure") - self.store_snuba_uptime_check( - subscription_id=self.subscription_id, - check_status="failure", - incident_status=IncidentStatus.IN_INCIDENT, - ) + for scenario in scenarios: + self.store_uptime_data(self.subscription_id, **scenario) + + def store_uptime_data( + self, + subscription_id, + check_status, + incident_status=IncidentStatus.NO_INCIDENT, + scheduled_check_time=None, + ): + """Store a single uptime data row. Must be implemented by subclasses.""" + raise NotImplementedError("Subclasses must implement store_uptime_data") def test_simple(self): """Test that the endpoint returns data for a simple uptime check.""" - response = self.get_success_response( - self.organization.slug, - project=[self.project.id], - projectUptimeSubscriptionId=[str(self.project_uptime_subscription.id)], - since=(datetime.now(timezone.utc) - timedelta(days=7)).timestamp(), - until=datetime.now(timezone.utc).timestamp(), - resolution="1d", - ) - assert response.data is not None - data = json.loads(json.dumps(response.data)) - assert len(data[str(self.project_uptime_subscription.id)]) == 7 - assert data[str(self.project_uptime_subscription.id)][-1][1] == { - "failure": 4, - "failure_incident": 1, - "success": 3, - "missed_window": 0, - } - assert data[str(self.project_uptime_subscription.id)][0][1] == { - "failure": 0, - "failure_incident": 0, - "success": 0, - "missed_window": 0, - } + with self.feature(self.features): + response = self.get_success_response( + self.organization.slug, + project=[self.project.id], + projectUptimeSubscriptionId=[str(self.project_uptime_subscription.id)], + since=(datetime.now(timezone.utc) - timedelta(days=7)).timestamp(), + until=datetime.now(timezone.utc).timestamp(), + resolution="1d", + ) + assert response.data is not None + data = json.loads(json.dumps(response.data)) + assert len(data[str(self.project_uptime_subscription.id)]) == 7 + assert data[str(self.project_uptime_subscription.id)][-1][1] == { + "failure": 4, + "failure_incident": 1, + "success": 3, + "missed_window": 0, + } + assert data[str(self.project_uptime_subscription.id)][0][1] == { + "failure": 0, + "failure_incident": 0, + "success": 0, + "missed_window": 0, + } @override_options( {"uptime.date_cutoff_epoch_seconds": (MOCK_DATETIME - timedelta(days=1)).timestamp()} @@ -77,17 +87,18 @@ def test_simple(self): def test_simple_with_date_cutoff(self): """Test that the endpoint returns data for a simple uptime check.""" - response = self.get_success_response( - self.organization.slug, - project=[self.project.id], - projectUptimeSubscriptionId=[str(self.project_uptime_subscription.id)], - since=(datetime.now(timezone.utc) - timedelta(days=90)).timestamp(), - until=datetime.now(timezone.utc).timestamp(), - resolution="1d", - ) - assert response.data is not None - data = json.loads(json.dumps(response.data)) - assert len(data[str(self.project_uptime_subscription.id)]) == 90 + with self.feature(self.features): + response = self.get_success_response( + self.organization.slug, + project=[self.project.id], + projectUptimeSubscriptionId=[str(self.project_uptime_subscription.id)], + since=(datetime.now(timezone.utc) - timedelta(days=90)).timestamp(), + until=datetime.now(timezone.utc).timestamp(), + resolution="1d", + ) + assert response.data is not None + data = json.loads(json.dumps(response.data)) + assert len(data[str(self.project_uptime_subscription.id)]) == 90 @override_options( {"uptime.date_cutoff_epoch_seconds": (MOCK_DATETIME - timedelta(days=1)).timestamp()} @@ -95,17 +106,18 @@ def test_simple_with_date_cutoff(self): def test_simple_with_date_cutoff_rounded_resolution(self): """Test that the endpoint returns data for a simple uptime check.""" - response = self.get_success_response( - self.organization.slug, - project=[self.project.id], - projectUptimeSubscriptionId=[str(self.project_uptime_subscription.id)], - since=(datetime.now(timezone.utc) - timedelta(days=89, hours=1)).timestamp(), - until=datetime.now(timezone.utc).timestamp(), - resolution="1d", - ) - assert response.data is not None - data = json.loads(json.dumps(response.data)) - assert len(data[str(self.project_uptime_subscription.id)]) == 89 + with self.feature(self.features): + response = self.get_success_response( + self.organization.slug, + project=[self.project.id], + projectUptimeSubscriptionId=[str(self.project_uptime_subscription.id)], + since=(datetime.now(timezone.utc) - timedelta(days=89, hours=1)).timestamp(), + until=datetime.now(timezone.utc).timestamp(), + resolution="1d", + ) + assert response.data is not None + data = json.loads(json.dumps(response.data)) + assert len(data[str(self.project_uptime_subscription.id)]) == 89 @override_options( {"uptime.date_cutoff_epoch_seconds": (MOCK_DATETIME - timedelta(days=1)).timestamp()} @@ -120,30 +132,26 @@ def test_simple_with_date_cutoff_rounded_resolution_past_cutoff(self): uptime_subscription=subscription ) - self.store_snuba_uptime_check( - subscription_id=subscription_id, - check_status="success", - scheduled_check_time=(MOCK_DATETIME - timedelta(days=5)), + # Store data for the cutoff test scenario + self.store_uptime_data( + subscription_id, "success", scheduled_check_time=(MOCK_DATETIME - timedelta(days=5)) ) - self.store_snuba_uptime_check( - subscription_id=subscription_id, - check_status="failure", - scheduled_check_time=MOCK_DATETIME - timedelta(days=5), + self.store_uptime_data( + subscription_id, "failure", scheduled_check_time=MOCK_DATETIME - timedelta(days=5) ) - self.store_snuba_uptime_check( - subscription_id=subscription_id, - check_status="failure", - scheduled_check_time=MOCK_DATETIME - timedelta(hours=2), + self.store_uptime_data( + subscription_id, "failure", scheduled_check_time=MOCK_DATETIME - timedelta(hours=2) ) - response = self.get_success_response( - self.organization.slug, - project=[self.project.id], - projectUptimeSubscriptionId=[str(project_uptime_subscription.id)], - since=(datetime.now(timezone.utc) - timedelta(days=89, hours=1)).timestamp(), - until=datetime.now(timezone.utc).timestamp(), - resolution="1d", - ) + with self.feature(self.features): + response = self.get_success_response( + self.organization.slug, + project=[self.project.id], + projectUptimeSubscriptionId=[str(project_uptime_subscription.id)], + since=(datetime.now(timezone.utc) - timedelta(days=89, hours=1)).timestamp(), + until=datetime.now(timezone.utc).timestamp(), + resolution="1d", + ) assert response.data is not None data = json.loads(json.dumps(response.data)) # check that we return all the intervals, @@ -168,65 +176,91 @@ def test_invalid_uptime_subscription_id(self): """ Test that an invalid uptime_subscription_id produces a 400 response. """ - response = self.get_response( - self.organization.slug, - project=[self.project.id], - projectUptimeSubscriptionId=[str(uuid.uuid4())], - since=(datetime.now(timezone.utc) - timedelta(days=7)).timestamp(), - until=datetime.now(timezone.utc).timestamp(), - resolution="1d", - ) - assert response.status_code == 400 - assert response.json() == "Invalid project uptime subscription ids provided" + with self.feature(self.features): + response = self.get_response( + self.organization.slug, + project=[self.project.id], + projectUptimeSubscriptionId=[str(uuid.uuid4())], + since=(datetime.now(timezone.utc) - timedelta(days=7)).timestamp(), + until=datetime.now(timezone.utc).timestamp(), + resolution="1d", + ) + assert response.status_code == 400 + assert response.json() == "Invalid project uptime subscription ids provided" def test_no_uptime_subscription_id(self): """ Test that not sending any uptime_subscription_id produces a 400 response. """ - response = self.get_response( - self.organization.slug, - project=[self.project.id], - projectUptimeSubscriptionId=[], - since=(datetime.now(timezone.utc) - timedelta(days=7)).timestamp(), - until=datetime.now(timezone.utc).timestamp(), - resolution="1d", - ) - assert response.status_code == 400 - assert response.json() == "No project uptime subscription ids provided" + with self.feature(self.features): + response = self.get_response( + self.organization.slug, + project=[self.project.id], + projectUptimeSubscriptionId=[], + since=(datetime.now(timezone.utc) - timedelta(days=7)).timestamp(), + until=datetime.now(timezone.utc).timestamp(), + resolution="1d", + ) + assert response.status_code == 400 + assert response.json() == "No project uptime subscription ids provided" def test_too_many_periods(self): """ Test that requesting a high resolution across a large period of time produces a 400 response. """ - response = self.get_response( - self.organization.slug, - project=[self.project.id], - projectUptimeSubscriptionId=[str(self.project_uptime_subscription.id)], - since=(datetime.now(timezone.utc) - timedelta(days=90)).timestamp(), - until=datetime.now(timezone.utc).timestamp(), - resolution="1m", - ) - assert response.status_code == 400 - assert response.json() == "error making request" + with self.feature(self.features): + response = self.get_response( + self.organization.slug, + project=[self.project.id], + projectUptimeSubscriptionId=[str(self.project_uptime_subscription.id)], + since=(datetime.now(timezone.utc) - timedelta(days=90)).timestamp(), + until=datetime.now(timezone.utc).timestamp(), + resolution="1m", + ) + assert response.status_code == 400 + assert response.json() == "error making request" def test_too_many_uptime_subscription_ids(self): """ - Test that sending a large nubmer of subscription IDs produces a 400 + Test that sending a large number of subscription IDs produces a 400 """ - response = self.get_response( - self.organization.slug, - project=[self.project.id], - projectUptimeSubscriptionId=[str(uuid.uuid4()) for _ in range(101)], - since=(datetime.now(timezone.utc) - timedelta(days=90)).timestamp(), - until=datetime.now(timezone.utc).timestamp(), - resolution="1h", - ) - assert response.status_code == 400 - assert ( - response.json() == "Too many project uptime subscription ids provided. Maximum is 100" + with self.feature(self.features): + response = self.get_response( + self.organization.slug, + project=[self.project.id], + projectUptimeSubscriptionId=[str(uuid.uuid4()) for _ in range(101)], + since=(datetime.now(timezone.utc) - timedelta(days=90)).timestamp(), + until=datetime.now(timezone.utc).timestamp(), + resolution="1h", + ) + assert response.status_code == 400 + assert ( + response.json() + == "Too many project uptime subscription ids provided. Maximum is 100" + ) + + +@freeze_time(MOCK_DATETIME) +class OrganizationUptimeCheckIndexEndpointTest( + OrganizationUptimeStatsBaseTest, UptimeCheckSnubaTestCase +): + __test__ = True + + def store_uptime_data( + self, + subscription_id, + check_status, + incident_status=IncidentStatus.NO_INCIDENT, + scheduled_check_time=None, + ): + self.store_snuba_uptime_check( + subscription_id=subscription_id, + check_status=check_status, + incident_status=incident_status, + scheduled_check_time=scheduled_check_time, ) @@ -275,3 +309,34 @@ def test_add_extra_buckets_for_epoch_cutoff(): # Test with no epoch cutoff - should return original result = add_extra_buckets_for_epoch_cutoff(formatted_response, None, rollup, start, end) assert result == formatted_response + + +@freeze_time(MOCK_DATETIME) +class OrganizationUptimeStatsEndpointWithEAPTests( + OrganizationUptimeStatsBaseTest, UptimeResultEAPTestCase +): + __test__ = True + + def setUp(self): + super().setUp() + self.features = { + "organizations:uptime-eap-enabled": True, + "organizations:uptime-eap-uptime-results-query": True, + } + + def store_uptime_data( + self, + subscription_id, + check_status, + incident_status=IncidentStatus.NO_INCIDENT, + scheduled_check_time=None, + ): + uptime_result = self.create_uptime_result( + subscription_id=str(uuid.UUID(subscription_id)), + guid=str(uuid.UUID(subscription_id)), + request_url="https://santry.io", + check_status=check_status, + incident_status=incident_status, + scheduled_check_time=scheduled_check_time, + ) + self.store_uptime_results([uptime_result]) diff --git a/tests/snuba/api/endpoints/test_organization_events_uptime_results.py b/tests/snuba/api/endpoints/test_organization_events_uptime_results.py index 4cb3fee9f054c3..ab8f83727f7002 100644 --- a/tests/snuba/api/endpoints/test_organization_events_uptime_results.py +++ b/tests/snuba/api/endpoints/test_organization_events_uptime_results.py @@ -18,12 +18,13 @@ def create_uptime_result( *, organization=None, project=None, - timestamp=None, + scheduled_check_time=None, trace_id=None, guid=None, subscription_id=None, check_id=None, check_status="success", + incident_status=None, region="us-west", http_status_code=200, request_type="GET", @@ -41,14 +42,13 @@ def create_uptime_result( response_body_size_bytes=1024, status_reason_type=None, status_reason_description=None, - **extra_attributes, ) -> TraceItem: if organization is None: organization = self.organization if project is None: project = self.project - if timestamp is None: - timestamp = datetime.now(timezone.utc) - timedelta(minutes=1) + if scheduled_check_time is None: + scheduled_check_time = datetime.now(timezone.utc) - timedelta(minutes=1) if trace_id is None: trace_id = uuid4().hex if guid is None: @@ -91,20 +91,21 @@ def create_uptime_result( if status_reason_description is not None: attributes_data["status_reason_description"] = status_reason_description - attributes_data.update(extra_attributes) + if incident_status is not None: + attributes_data["incident_status"] = incident_status.value attributes_proto = {} for k, v in attributes_data.items(): attributes_proto[k] = scalar_to_any_value(v) timestamp_proto = Timestamp() - timestamp_proto.FromDatetime(timestamp) + timestamp_proto.FromDatetime(scheduled_check_time) attributes_proto["scheduled_check_time_us"] = AnyValue( - int_value=int(timestamp.timestamp() * 1_000_000) + int_value=int(scheduled_check_time.timestamp() * 1_000_000) ) attributes_proto["actual_check_time_us"] = AnyValue( - int_value=int(timestamp.timestamp() * 1_000_000) + 5000 + int_value=int(scheduled_check_time.timestamp() * 1_000_000) + 5000 ) return TraceItem( @@ -151,13 +152,13 @@ def test_simple_uptime_query(self): check_status="success", http_status_code=200, region="us-east-1", - timestamp=self.ten_mins_ago, + scheduled_check_time=self.ten_mins_ago, ), self.create_uptime_result( check_status="failure", http_status_code=500, region="us-west-2", - timestamp=self.nine_mins_ago, + scheduled_check_time=self.nine_mins_ago, ), ] self.store_uptime_results(results) @@ -191,17 +192,17 @@ def test_status_filter_query(self): self.create_uptime_result( check_status="success", http_status_code=200, - timestamp=self.ten_mins_ago, + scheduled_check_time=self.ten_mins_ago, ), self.create_uptime_result( check_status="failure", http_status_code=500, - timestamp=self.nine_mins_ago, + scheduled_check_time=self.nine_mins_ago, ), self.create_uptime_result( check_status="success", http_status_code=201, - timestamp=self.nine_mins_ago, + scheduled_check_time=self.nine_mins_ago, ), ] self.store_uptime_results(results) @@ -233,7 +234,7 @@ def test_timing_fields_query(self): request_duration_us=125000, dns_lookup_duration_us=25000, tcp_connection_duration_us=15000, - timestamp=self.ten_mins_ago, + scheduled_check_time=self.ten_mins_ago, ), self.create_uptime_result( check_status="failure", @@ -241,7 +242,7 @@ def test_timing_fields_query(self): request_duration_us=30000000, dns_lookup_duration_us=200000, tcp_connection_duration_us=25000, - timestamp=self.nine_mins_ago, + scheduled_check_time=self.nine_mins_ago, ), ] self.store_uptime_results(results) @@ -290,21 +291,21 @@ def test_cross_level_filter_query(self): http_status_code=200, dns_lookup_duration_us=15000, region="us-east-1", - timestamp=self.ten_mins_ago, + scheduled_check_time=self.ten_mins_ago, ), self.create_uptime_result( check_status="failure", http_status_code=504, dns_lookup_duration_us=150000, region="us-east-1", - timestamp=self.nine_mins_ago, + scheduled_check_time=self.nine_mins_ago, ), self.create_uptime_result( check_status="failure", http_status_code=500, dns_lookup_duration_us=20000, region="us-west-2", - timestamp=self.nine_mins_ago, + scheduled_check_time=self.nine_mins_ago, ), ] self.store_uptime_results(results) @@ -345,7 +346,7 @@ def test_redirect_sequence_query(self): http_status_code=301, request_url="http://example.com", trace_id=trace_id, - timestamp=self.ten_mins_ago, + scheduled_check_time=self.ten_mins_ago, ), self.create_uptime_result( check_id=check_id, @@ -354,7 +355,7 @@ def test_redirect_sequence_query(self): http_status_code=200, request_url="https://example.com", trace_id=trace_id, - timestamp=self.ten_mins_ago, + scheduled_check_time=self.ten_mins_ago, ), self.create_uptime_result( check_id=uuid4().hex, @@ -362,7 +363,7 @@ def test_redirect_sequence_query(self): check_status="success", http_status_code=200, request_url="https://other.com", - timestamp=self.nine_mins_ago, + scheduled_check_time=self.nine_mins_ago, ), ] self.store_uptime_results(results) @@ -396,25 +397,25 @@ def test_region_and_status_combination(self): check_status="success", region="us-east-1", http_status_code=200, - timestamp=self.ten_mins_ago, + scheduled_check_time=self.ten_mins_ago, ), self.create_uptime_result( check_status="failure", region="us-east-1", http_status_code=500, - timestamp=self.nine_mins_ago, + scheduled_check_time=self.nine_mins_ago, ), self.create_uptime_result( check_status="success", region="us-west-2", http_status_code=200, - timestamp=self.nine_mins_ago, + scheduled_check_time=self.nine_mins_ago, ), self.create_uptime_result( check_status="failure", region="us-west-2", http_status_code=503, - timestamp=self.nine_mins_ago, + scheduled_check_time=self.nine_mins_ago, ), ] self.store_uptime_results(results) @@ -448,17 +449,17 @@ def test_timestamp_precision(self): self.create_uptime_result( check_status="success", guid="check-1", - timestamp=base_time, + scheduled_check_time=base_time, ), self.create_uptime_result( check_status="success", guid="check-2", - timestamp=base_time + timedelta(microseconds=1), + scheduled_check_time=base_time + timedelta(microseconds=1), ), self.create_uptime_result( check_status="success", guid="check-3", - timestamp=base_time + timedelta(microseconds=2), + scheduled_check_time=base_time + timedelta(microseconds=2), ), ] self.store_uptime_results(results)