Skip to content

Commit 7e645c7

Browse files
authored
feat(eap-spans): implement opportunity_score function (mostly) (#87313)
Work for getsentry/team-visibility#41 1. Mostly implements opportunity score function, we just need to multiple the formula by the weight but we can't until getsentry/eap-planning#202 is complete 2. Also moves `transform_vital_score_to_ratio` to a shared `utils` file as it's now needed for `formulas.py` and `aggregates.py`
1 parent be5c268 commit 7e645c7

File tree

4 files changed

+127
-21
lines changed

4 files changed

+127
-21
lines changed

src/sentry/search/eap/spans/aggregates.py

+1-19
Original file line numberDiff line numberDiff line change
@@ -14,17 +14,9 @@
1414
ConditionalAggregateDefinition,
1515
ResolvedArguments,
1616
)
17+
from sentry.search.eap.spans.utils import WEB_VITALS_MEASUREMENTS, transform_vital_score_to_ratio
1718
from sentry.search.eap.utils import literal_validator
1819

19-
WEB_VITALS_MEASUREMENTS = [
20-
"measurements.score.total",
21-
"measurements.score.lcp",
22-
"measurements.score.fcp",
23-
"measurements.score.cls",
24-
"measurements.score.ttfb",
25-
"measurements.score.inp",
26-
]
27-
2820

2921
def count_processor(count_value: int | None) -> int:
3022
if count_value is None:
@@ -73,16 +65,6 @@ def resolve_count_scores(args: ResolvedArguments) -> tuple[AttributeKey, TraceIt
7365
return (ratio_attribute, filter)
7466

7567

76-
def transform_vital_score_to_ratio(args: ResolvedArguments) -> AttributeKey:
77-
score_attribute = cast(AttributeKey, args[0])
78-
score_name = score_attribute.name
79-
80-
ratio_score_name = score_name.replace("score", "score.ratio")
81-
if ratio_score_name == "score.ratio.total":
82-
ratio_score_name = "score.total"
83-
return AttributeKey(name=ratio_score_name, type=AttributeKey.TYPE_DOUBLE)
84-
85-
8668
SPAN_CONDITIONAL_AGGREGATE_DEFINITIONS = {
8769
"count_op": ConditionalAggregateDefinition(
8870
internal_function=Function.FUNCTION_COUNT,

src/sentry/search/eap/spans/formulas.py

+48-1
Original file line numberDiff line numberDiff line change
@@ -12,10 +12,15 @@
1212
Function,
1313
StrArray,
1414
)
15-
from sentry_protos.snuba.v1.trace_item_filter_pb2 import ComparisonFilter, TraceItemFilter
15+
from sentry_protos.snuba.v1.trace_item_filter_pb2 import (
16+
ComparisonFilter,
17+
ExistsFilter,
18+
TraceItemFilter,
19+
)
1620

1721
from sentry.search.eap.columns import ArgumentDefinition, FormulaDefinition, ResolvedArguments
1822
from sentry.search.eap.constants import RESPONSE_CODE_MAP
23+
from sentry.search.eap.spans.utils import WEB_VITALS_MEASUREMENTS, transform_vital_score_to_ratio
1924
from sentry.search.eap.utils import literal_validator
2025

2126
"""
@@ -64,6 +69,34 @@ def failure_rate(_: ResolvedArguments) -> Column.BinaryFormula:
6469
)
6570

6671

72+
def opportunity_score(args: ResolvedArguments) -> Column.BinaryFormula:
73+
score_attribute = cast(AttributeKey, args[0])
74+
ratio_attribute = transform_vital_score_to_ratio([score_attribute])
75+
76+
# TODO: We should be multiplying by the weight in the formula, but we can't until https://github.com/getsentry/eap-planning/issues/202
77+
return Column.BinaryFormula(
78+
left=Column(
79+
conditional_aggregation=AttributeConditionalAggregation(
80+
aggregate=Function.FUNCTION_COUNT,
81+
filter=TraceItemFilter(
82+
exists_filter=ExistsFilter(key=ratio_attribute),
83+
),
84+
key=ratio_attribute,
85+
)
86+
),
87+
op=Column.BinaryFormula.OP_SUBTRACT,
88+
right=Column(
89+
conditional_aggregation=AttributeConditionalAggregation(
90+
aggregate=Function.FUNCTION_SUM,
91+
filter=TraceItemFilter(
92+
exists_filter=ExistsFilter(key=ratio_attribute),
93+
),
94+
key=ratio_attribute,
95+
)
96+
),
97+
)
98+
99+
67100
def http_response_rate(args: ResolvedArguments) -> Column.BinaryFormula:
68101
code = cast(Literal[1, 2, 3, 4, 5], args[0])
69102

@@ -273,4 +306,18 @@ def ttid_contribution_rate(args: ResolvedArguments) -> Column.BinaryFormula:
273306
formula_resolver=ttid_contribution_rate,
274307
is_aggregate=True,
275308
),
309+
"opportunity_score": FormulaDefinition(
310+
default_search_type="percentage",
311+
arguments=[
312+
ArgumentDefinition(
313+
argument_types={
314+
"duration",
315+
"number",
316+
},
317+
validator=literal_validator(WEB_VITALS_MEASUREMENTS),
318+
),
319+
],
320+
formula_resolver=opportunity_score,
321+
is_aggregate=True,
322+
),
276323
}

src/sentry/search/eap/spans/utils.py

+24-1
Original file line numberDiff line numberDiff line change
@@ -1,5 +1,8 @@
1-
from typing import Literal
1+
from typing import Literal, cast
22

3+
from sentry_protos.snuba.v1.trace_item_attribute_pb2 import AttributeKey
4+
5+
from sentry.search.eap.columns import ResolvedArguments
36
from sentry.search.eap.spans.attributes import SPAN_ATTRIBUTE_DEFINITIONS
47

58
INTERNAL_TO_PUBLIC_ALIAS_MAPPINGS: dict[Literal["string", "number"], dict[str, str]] = {
@@ -26,3 +29,23 @@ def translate_internal_to_public_alias(
2629
) -> str | None:
2730
mappings = INTERNAL_TO_PUBLIC_ALIAS_MAPPINGS.get(type, {})
2831
return mappings.get(internal_alias)
32+
33+
34+
def transform_vital_score_to_ratio(args: ResolvedArguments) -> AttributeKey:
35+
score_attribute = cast(AttributeKey, args[0])
36+
score_name = score_attribute.name
37+
38+
ratio_score_name = score_name.replace("score", "score.ratio")
39+
if ratio_score_name == "score.ratio.total":
40+
ratio_score_name = "score.total"
41+
return AttributeKey(name=ratio_score_name, type=AttributeKey.TYPE_DOUBLE)
42+
43+
44+
WEB_VITALS_MEASUREMENTS = [
45+
"measurements.score.total",
46+
"measurements.score.lcp",
47+
"measurements.score.fcp",
48+
"measurements.score.cls",
49+
"measurements.score.ttfb",
50+
"measurements.score.inp",
51+
]

tests/snuba/api/endpoints/test_organization_events_span_indexed.py

+54
Original file line numberDiff line numberDiff line change
@@ -5,6 +5,7 @@
55
import pytest
66
import urllib3
77

8+
from sentry.search.events.constants import WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS
89
from sentry.testutils.helpers import parse_link_header
910
from tests.snuba.api.endpoints.test_organization_events import OrganizationEventsEndpointTestBase
1011

@@ -3057,6 +3058,59 @@ def test_performance_score(self):
30573058
assert data[0]["performance_score(measurements.score.lcp)"] == 0.06
30583059
assert meta["dataset"] == self.dataset
30593060

3061+
def test_opportunity_score(self):
3062+
self.store_spans(
3063+
[
3064+
self.create_span(
3065+
{
3066+
"measurements": {
3067+
"score.ratio.lcp": {"value": 0.1},
3068+
"score.ratio.fcp": {"value": 0.57142857142},
3069+
"score.total": {"value": 0.43},
3070+
}
3071+
}
3072+
),
3073+
self.create_span(
3074+
{
3075+
"measurements": {
3076+
"score.ratio.lcp": {"value": 1.0},
3077+
"score.total": {"value": 1.0},
3078+
}
3079+
}
3080+
),
3081+
self.create_span(
3082+
{
3083+
"measurements": {
3084+
"score.total": {"value": 0.0},
3085+
}
3086+
}
3087+
),
3088+
],
3089+
is_eap=self.is_eap,
3090+
)
3091+
3092+
response = self.do_request(
3093+
{
3094+
"field": [
3095+
"opportunity_score(measurements.score.lcp)",
3096+
"opportunity_score(measurements.score.total)",
3097+
],
3098+
"project": self.project.id,
3099+
"dataset": self.dataset,
3100+
}
3101+
)
3102+
3103+
lcp_score = (
3104+
0.27 / WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS["lcp"]
3105+
) # TODO: we should multiplying by the weight in the formula, but we can't until https://github.com/getsentry/eap-planning/issues/202
3106+
assert response.status_code == 200, response.content
3107+
data = response.data["data"]
3108+
meta = response.data["meta"]
3109+
assert len(data) == 1
3110+
self.assertAlmostEqual(data[0]["opportunity_score(measurements.score.lcp)"], lcp_score)
3111+
assert data[0]["opportunity_score(measurements.score.total)"] == 1.57
3112+
assert meta["dataset"] == self.dataset
3113+
30603114
@pytest.mark.skip(reason="replay id alias not migrated over")
30613115
def test_replay_id(self):
30623116
super().test_replay_id()

0 commit comments

Comments
 (0)