Skip to content

Commit 962d1e6

Browse files
authoredMar 18, 2025··
feat(eap-spans): add perf score function (#87127)
Closes getsentry/team-visibility#37 1. Implements `performance_score(x)` function 2. Adds `attribute_resolver` to `AggregateDefinition` to handle more complicated aggregates where we don't simply aggregate on the parameter. 3. Some refactoring to improve readability and logic
1 parent d457ede commit 962d1e6

File tree

4 files changed

+119
-32
lines changed

4 files changed

+119
-32
lines changed
 

‎src/sentry/search/eap/columns.py

+13-4
Original file line numberDiff line numberDiff line change
@@ -87,7 +87,7 @@ class ArgumentDefinition:
8787
# Sets the argument as an attribute, for custom functions like `http_response rate` we might have non-attribute parameters
8888
is_attribute: bool = True
8989
# Validator to check if the value is allowed for this argument
90-
validator: Callable[[Any], Any] | None = None
90+
validator: Callable[[str], bool] | None = None
9191
# Whether this argument is completely ignored, used for `count()`
9292
ignored: bool = False
9393

@@ -238,6 +238,11 @@ def resolve(
238238
@dataclass(kw_only=True)
239239
class AggregateDefinition(FunctionDefinition):
240240
internal_function: Function.ValueType
241+
"""
242+
An optional function that takes in the resolved argument and returns the attribute key to aggregate on.
243+
If not provided, assumes the aggregate is on the first argument.
244+
"""
245+
attribute_resolver: Callable[[ResolvedArguments], AttributeKey] | None = None
241246

242247
def resolve(
243248
self,
@@ -249,11 +254,15 @@ def resolve(
249254
raise InvalidSearchQuery(
250255
f"Aggregates expects exactly 1 argument, got {len(resolved_arguments)}"
251256
)
252-
resolved_argument = None
257+
258+
resolved_attribute = None
259+
253260
if len(resolved_arguments) == 1:
254261
if not isinstance(resolved_arguments[0], AttributeKey):
255262
raise InvalidSearchQuery("Aggregates accept attribute keys only")
256-
resolved_argument = resolved_arguments[0]
263+
resolved_attribute = resolved_arguments[0]
264+
if self.attribute_resolver is not None:
265+
resolved_attribute = self.attribute_resolver(resolved_arguments)
257266

258267
return ResolvedAggregate(
259268
public_alias=alias,
@@ -262,7 +271,7 @@ def resolve(
262271
internal_type=self.internal_type,
263272
processor=self.processor,
264273
extrapolation=self.extrapolation,
265-
argument=resolved_argument,
274+
argument=resolved_attribute,
266275
)
267276

268277

‎src/sentry/search/eap/resolver.py

+20-19
Original file line numberDiff line numberDiff line change
@@ -730,43 +730,44 @@ def resolve_function(self, column: str, match: Match | None = None) -> tuple[
730730
f"Invalid number of arguments for {function}, was expecting {len(function_definition.required_arguments)} arguments"
731731
)
732732

733-
for index, argument in enumerate(function_definition.arguments):
734-
if argument.ignored:
733+
for index, argument_definition in enumerate(function_definition.arguments):
734+
if argument_definition.ignored:
735735
continue
736-
if argument.validator is not None:
737-
if not argument.validator(arguments[index]):
738-
raise InvalidSearchQuery(
739-
f"{arguments[index]} is not a valid argument for {function}"
740-
)
741736

742737
if index < len(arguments):
743-
if argument.is_attribute:
744-
parsed_argument, _ = self.resolve_attribute(arguments[index])
738+
argument = arguments[index]
739+
if argument_definition.validator is not None:
740+
if not argument_definition.validator(argument):
741+
raise InvalidSearchQuery(
742+
f"{argument} is not a valid argument for {function}"
743+
)
744+
if argument_definition.is_attribute:
745+
parsed_argument, _ = self.resolve_attribute(argument)
745746
else:
746-
if argument.argument_types is None:
747-
parsed_args.append(arguments[index]) # assume it's a string
747+
if argument_definition.argument_types is None:
748+
parsed_args.append(argument) # assume it's a string
748749
continue
749750
# TODO: we assume that the argument is only one type for now, and we only support string/integer
750-
for type in argument.argument_types:
751+
for type in argument_definition.argument_types:
751752
if type == "integer":
752-
parsed_args.append(int(arguments[index]))
753+
parsed_args.append(int(argument))
753754
else:
754-
parsed_args.append(arguments[index])
755+
parsed_args.append(argument)
755756
continue
756757

757-
elif argument.default_arg:
758-
parsed_argument, _ = self.resolve_attribute(argument.default_arg)
758+
elif argument_definition.default_arg:
759+
parsed_argument, _ = self.resolve_attribute(argument_definition.default_arg)
759760
else:
760761
raise InvalidSearchQuery(
761762
f"Invalid number of arguments for {function}, was expecting {len(function_definition.required_arguments)} arguments"
762763
)
763764

764765
if (
765-
argument.argument_types is not None
766-
and parsed_argument.search_type not in argument.argument_types
766+
argument_definition.argument_types is not None
767+
and parsed_argument.search_type not in argument_definition.argument_types
767768
):
768769
raise InvalidSearchQuery(
769-
f"{parsed_argument.public_alias} is invalid for parameter {index+1} in {function}. Its a {parsed_argument.search_type} type field, but it must be one of these types: {argument.argument_types}"
770+
f"{parsed_argument.public_alias} is invalid for parameter {index+1} in {function}. Its a {parsed_argument.search_type} type field, but it must be one of these types: {argument_definition.argument_types}"
770771
)
771772
parsed_args.append(parsed_argument)
772773

‎src/sentry/search/eap/spans/aggregates.py

+38-9
Original file line numberDiff line numberDiff line change
@@ -66,14 +66,21 @@ def resolve_key_eq_value_filter(args: ResolvedArguments) -> tuple[AttributeKey,
6666

6767
# TODO: We should eventually update the frontend to query the ratio column directly
6868
def resolve_count_scores(args: ResolvedArguments) -> tuple[AttributeKey, TraceItemFilter]:
69-
score_column = cast(str, args[0])
70-
ratio_column_name = score_column.replace("measurements.score", "score.ratio")
71-
if ratio_column_name == "score.ratio.total":
72-
ratio_column_name = "score.total"
73-
attribute_key = AttributeKey(name=ratio_column_name, type=AttributeKey.TYPE_DOUBLE)
74-
filter = TraceItemFilter(exists_filter=ExistsFilter(key=attribute_key))
69+
score_attribute = cast(AttributeKey, args[0])
70+
ratio_attribute = transform_vital_score_to_ratio([score_attribute])
71+
filter = TraceItemFilter(exists_filter=ExistsFilter(key=ratio_attribute))
7572

76-
return (attribute_key, filter)
73+
return (ratio_attribute, filter)
74+
75+
76+
def transform_vital_score_to_ratio(args: ResolvedArguments) -> AttributeKey:
77+
score_attribute = cast(AttributeKey, args[0])
78+
score_name = score_attribute.name
79+
80+
ratio_score_name = score_name.replace("score", "score.ratio")
81+
if ratio_score_name == "score.ratio.total":
82+
ratio_score_name = "score.total"
83+
return AttributeKey(name=ratio_score_name, type=AttributeKey.TYPE_DOUBLE)
7784

7885

7986
SPAN_CONDITIONAL_AGGREGATE_DEFINITIONS = {
@@ -113,9 +120,14 @@ def resolve_count_scores(args: ResolvedArguments) -> tuple[AttributeKey, TraceIt
113120
default_search_type="integer",
114121
arguments=[
115122
ArgumentDefinition(
116-
argument_types={"string"},
123+
argument_types={
124+
"duration",
125+
"number",
126+
"percentage",
127+
*constants.SIZE_TYPE,
128+
*constants.DURATION_TYPE,
129+
},
117130
validator=literal_validator(WEB_VITALS_MEASUREMENTS),
118-
is_attribute=False,
119131
)
120132
],
121133
aggregate_resolver=resolve_count_scores,
@@ -355,4 +367,21 @@ def resolve_count_scores(args: ResolvedArguments) -> tuple[AttributeKey, TraceIt
355367
)
356368
],
357369
),
370+
"performance_score": AggregateDefinition(
371+
internal_function=Function.FUNCTION_AVG,
372+
default_search_type="integer",
373+
arguments=[
374+
ArgumentDefinition(
375+
argument_types={
376+
"duration",
377+
"number",
378+
"percentage",
379+
*constants.SIZE_TYPE,
380+
*constants.DURATION_TYPE,
381+
},
382+
validator=literal_validator(WEB_VITALS_MEASUREMENTS),
383+
),
384+
],
385+
attribute_resolver=transform_vital_score_to_ratio,
386+
),
358387
}

‎tests/snuba/api/endpoints/test_organization_events_span_indexed.py

+48
Original file line numberDiff line numberDiff line change
@@ -2981,6 +2981,54 @@ def test_count_scores(self):
29812981
assert data[0]["count_scores(measurements.score.total)"] == 3
29822982
assert meta["dataset"] == self.dataset
29832983

2984+
def test_performance_score(self):
2985+
self.store_spans(
2986+
[
2987+
self.create_span(
2988+
{
2989+
"measurements": {
2990+
"score.ratio.lcp": {"value": 0.02},
2991+
}
2992+
},
2993+
start_ts=self.ten_mins_ago,
2994+
),
2995+
self.create_span(
2996+
{
2997+
"measurements": {
2998+
"score.ratio.lcp": {"value": 0.08},
2999+
}
3000+
},
3001+
start_ts=self.ten_mins_ago,
3002+
),
3003+
self.create_span(
3004+
{
3005+
"measurements": {
3006+
"score.ratio.lcp": {"value": 0.08},
3007+
}
3008+
},
3009+
start_ts=self.ten_mins_ago,
3010+
),
3011+
],
3012+
is_eap=self.is_eap,
3013+
)
3014+
3015+
response = self.do_request(
3016+
{
3017+
"field": [
3018+
"performance_score(measurements.score.lcp)",
3019+
],
3020+
"project": self.project.id,
3021+
"dataset": self.dataset,
3022+
}
3023+
)
3024+
3025+
assert response.status_code == 200, response.content
3026+
data = response.data["data"]
3027+
meta = response.data["meta"]
3028+
assert len(data) == 1
3029+
assert data[0]["performance_score(measurements.score.lcp)"] == 0.06
3030+
assert meta["dataset"] == self.dataset
3031+
29843032
@pytest.mark.skip(reason="replay id alias not migrated over")
29853033
def test_replay_id(self):
29863034
super().test_replay_id()

0 commit comments

Comments
 (0)
Please sign in to comment.