1
1
from __future__ import annotations
2
2
3
3
import itertools
4
- from collections .abc import Callable , Sequence
4
+ from collections .abc import Callable , Iterable , Sequence
5
5
from datetime import timedelta
6
6
from typing import Any , cast
7
7
from urllib .parse import quote as urlquote
8
8
9
9
import sentry_sdk
10
+ from django .contrib .auth .models import AnonymousUser
10
11
from django .http .request import HttpRequest
11
12
from django .utils import timezone
12
13
from rest_framework .exceptions import ParseError , ValidationError
27
28
from sentry .api .serializers .snuba import SnubaTSResultSerializer
28
29
from sentry .api .utils import handle_query_errors
29
30
from sentry .discover .arithmetic import is_equation , strip_equation
30
- from sentry .discover .models import DatasetSourcesTypes , DiscoverSavedQueryTypes
31
+ from sentry .discover .models import DatasetSourcesTypes , DiscoverSavedQuery , DiscoverSavedQueryTypes
31
32
from sentry .exceptions import InvalidSearchQuery
32
- from sentry .models .dashboard_widget import DashboardWidgetTypes
33
+ from sentry .models .dashboard_widget import DashboardWidget , DashboardWidgetTypes
33
34
from sentry .models .dashboard_widget import DatasetSourcesTypes as DashboardDatasetSourcesTypes
34
35
from sentry .models .group import Group
35
36
from sentry .models .organization import Organization
43
44
from sentry .snuba .dataset import Dataset
44
45
from sentry .snuba .metrics .extraction import MetricSpecType
45
46
from sentry .snuba .utils import DATASET_LABELS , DATASET_OPTIONS , get_dataset
47
+ from sentry .users .models .user import User
46
48
from sentry .users .services .user .serial import serialize_generic_user
47
49
from sentry .utils import snuba
48
50
from sentry .utils .cursors import Cursor
51
53
from sentry .utils .snuba import MAX_FIELDS , SnubaTSResult
52
54
53
55
54
- def get_query_columns (columns , rollup ) :
56
+ def get_query_columns (columns : list [ str ] , rollup : int ) -> list [ str ] :
55
57
"""
56
58
Backwards compatibility for incidents which uses the old
57
59
column aliases as it straddles both versions of events/discover.
@@ -113,7 +115,7 @@ def get_teams(self, request: Request, organization: Organization) -> list[Team]:
113
115
if not request .user :
114
116
return []
115
117
116
- teams = get_teams (request , organization )
118
+ teams : Iterable [ Team ] = get_teams (request , organization )
117
119
if not teams :
118
120
teams = Team .objects .get_for_user (organization , request .user )
119
121
@@ -249,7 +251,14 @@ def handle_on_demand(self, request: Request) -> tuple[bool, MetricSpecType]:
249
251
250
252
return use_on_demand_metrics , on_demand_metric_type
251
253
252
- def save_split_decision (self , widget , has_errors , has_transactions_data , organization , user ):
254
+ def save_split_decision (
255
+ self ,
256
+ widget : DashboardWidget ,
257
+ has_errors : bool ,
258
+ has_transactions_data : bool ,
259
+ organization : Organization ,
260
+ user : User | AnonymousUser ,
261
+ ) -> int | None :
253
262
"""This can be removed once the discover dataset has been fully split"""
254
263
source = DashboardDatasetSourcesTypes .INFERRED .value
255
264
if has_errors and not has_transactions_data :
@@ -273,15 +282,19 @@ def save_split_decision(self, widget, has_errors, has_transactions_data, organiz
273
282
return decision
274
283
275
284
def save_discover_saved_query_split_decision (
276
- self , query , dataset_inferred_from_query , has_errors , has_transactions_data
277
- ):
285
+ self ,
286
+ query : DiscoverSavedQuery ,
287
+ dataset_inferred_from_query : int | None ,
288
+ has_errors : bool ,
289
+ has_transactions_data : bool ,
290
+ ) -> int | None :
278
291
"""
279
292
This can be removed once the discover dataset has been fully split.
280
293
If dataset is ambiguous (i.e., could be either transactions or errors),
281
294
default to errors.
282
295
"""
283
296
dataset_source = DatasetSourcesTypes .INFERRED .value
284
- if dataset_inferred_from_query :
297
+ if dataset_inferred_from_query is not None :
285
298
decision = dataset_inferred_from_query
286
299
sentry_sdk .set_tag ("discover.split_reason" , "inferred_from_query" )
287
300
elif has_errors and not has_transactions_data :
@@ -314,7 +327,7 @@ def handle_unit_meta(
314
327
units [key ], meta [key ] = self .get_unit_and_type (key , value )
315
328
return meta , units
316
329
317
- def get_unit_and_type (self , field , field_type ) :
330
+ def get_unit_and_type (self , field : str , field_type : str ) -> tuple [ str | None , str ] :
318
331
if field_type in SIZE_UNITS :
319
332
return field_type , "size"
320
333
elif field_type in DURATION_UNITS :
@@ -427,7 +440,7 @@ def handle_data(
427
440
428
441
return results
429
442
430
- def handle_error_upsampling (self , project_ids : Sequence [int ], results : dict [str , Any ]):
443
+ def handle_error_upsampling (self , project_ids : Sequence [int ], results : dict [str , Any ]) -> None :
431
444
"""
432
445
If the query is for error upsampled projects, we convert various functions under the hood.
433
446
We need to rename these fields before returning the results to the client, to hide the conversion.
@@ -704,7 +717,9 @@ def serialize_multiple_axis(
704
717
705
718
return result
706
719
707
- def update_meta_with_accuracy (self , meta , event_result , query_column ) -> None :
720
+ def update_meta_with_accuracy (
721
+ self , meta : dict [str , Any ], event_result : SnubaTSResult , query_column : str
722
+ ) -> None :
708
723
if "processed_timeseries" in event_result .data :
709
724
processed_timeseries = event_result .data ["processed_timeseries" ]
710
725
meta ["accuracy" ] = {
@@ -724,7 +739,7 @@ def serialize_accuracy_data(
724
739
data : Any ,
725
740
column : str ,
726
741
null_zero : bool = False ,
727
- ):
742
+ ) -> list [ dict [ str , Any ]] :
728
743
serialized_values = []
729
744
for timestamp , group in itertools .groupby (data , key = lambda r : r ["time" ]):
730
745
for row in group :
0 commit comments