From d661d86fdcd9daced23d20e5a50f36d181c85dc9 Mon Sep 17 00:00:00 2001 From: Dominik Buszowiecki Date: Tue, 18 Mar 2025 15:19:31 -0400 Subject: [PATCH] implement opportunity_score function (mostly) --- src/sentry/search/eap/spans/aggregates.py | 20 +------ src/sentry/search/eap/spans/formulas.py | 49 ++++++++++++++++- src/sentry/search/eap/spans/utils.py | 25 ++++++++- .../test_organization_events_span_indexed.py | 54 +++++++++++++++++++ 4 files changed, 127 insertions(+), 21 deletions(-) diff --git a/src/sentry/search/eap/spans/aggregates.py b/src/sentry/search/eap/spans/aggregates.py index 9498e9e0c9fc5d..156aa0da51be84 100644 --- a/src/sentry/search/eap/spans/aggregates.py +++ b/src/sentry/search/eap/spans/aggregates.py @@ -14,17 +14,9 @@ ConditionalAggregateDefinition, ResolvedArguments, ) +from sentry.search.eap.spans.utils import WEB_VITALS_MEASUREMENTS, transform_vital_score_to_ratio from sentry.search.eap.utils import literal_validator -WEB_VITALS_MEASUREMENTS = [ - "measurements.score.total", - "measurements.score.lcp", - "measurements.score.fcp", - "measurements.score.cls", - "measurements.score.ttfb", - "measurements.score.inp", -] - def count_processor(count_value: int | None) -> int: if count_value is None: @@ -73,16 +65,6 @@ def resolve_count_scores(args: ResolvedArguments) -> tuple[AttributeKey, TraceIt return (ratio_attribute, filter) -def transform_vital_score_to_ratio(args: ResolvedArguments) -> AttributeKey: - score_attribute = cast(AttributeKey, args[0]) - score_name = score_attribute.name - - ratio_score_name = score_name.replace("score", "score.ratio") - if ratio_score_name == "score.ratio.total": - ratio_score_name = "score.total" - return AttributeKey(name=ratio_score_name, type=AttributeKey.TYPE_DOUBLE) - - SPAN_CONDITIONAL_AGGREGATE_DEFINITIONS = { "count_op": ConditionalAggregateDefinition( internal_function=Function.FUNCTION_COUNT, diff --git a/src/sentry/search/eap/spans/formulas.py b/src/sentry/search/eap/spans/formulas.py index 3fc3987914b4f7..59e4ac9d81cc85 100644 --- a/src/sentry/search/eap/spans/formulas.py +++ b/src/sentry/search/eap/spans/formulas.py @@ -12,10 +12,15 @@ Function, StrArray, ) -from sentry_protos.snuba.v1.trace_item_filter_pb2 import ComparisonFilter, TraceItemFilter +from sentry_protos.snuba.v1.trace_item_filter_pb2 import ( + ComparisonFilter, + ExistsFilter, + TraceItemFilter, +) from sentry.search.eap.columns import ArgumentDefinition, FormulaDefinition, ResolvedArguments from sentry.search.eap.constants import RESPONSE_CODE_MAP +from sentry.search.eap.spans.utils import WEB_VITALS_MEASUREMENTS, transform_vital_score_to_ratio from sentry.search.eap.utils import literal_validator """ @@ -32,6 +37,34 @@ ) +def opportunity_score(args: ResolvedArguments) -> Column.BinaryFormula: + score_attribute = cast(AttributeKey, args[0]) + ratio_attribute = transform_vital_score_to_ratio([score_attribute]) + + # TODO: We should be multiplying by the weight in the formula, but we can't until https://github.com/getsentry/eap-planning/issues/202 + return Column.BinaryFormula( + left=Column( + conditional_aggregation=AttributeConditionalAggregation( + aggregate=Function.FUNCTION_COUNT, + filter=TraceItemFilter( + exists_filter=ExistsFilter(key=ratio_attribute), + ), + key=ratio_attribute, + ) + ), + op=Column.BinaryFormula.OP_SUBTRACT, + right=Column( + conditional_aggregation=AttributeConditionalAggregation( + aggregate=Function.FUNCTION_SUM, + filter=TraceItemFilter( + exists_filter=ExistsFilter(key=ratio_attribute), + ), + key=ratio_attribute, + ) + ), + ) + + def http_response_rate(args: ResolvedArguments) -> Column.BinaryFormula: code = cast(Literal[1, 2, 3, 4, 5], args[0]) @@ -235,4 +268,18 @@ def ttid_contribution_rate(args: ResolvedArguments) -> Column.BinaryFormula: formula_resolver=ttid_contribution_rate, is_aggregate=True, ), + "opportunity_score": FormulaDefinition( + default_search_type="percentage", + arguments=[ + ArgumentDefinition( + argument_types={ + "duration", + "number", + }, + validator=literal_validator(WEB_VITALS_MEASUREMENTS), + ), + ], + formula_resolver=opportunity_score, + is_aggregate=True, + ), } diff --git a/src/sentry/search/eap/spans/utils.py b/src/sentry/search/eap/spans/utils.py index 37636cc7456925..d8e6ac78a9631a 100644 --- a/src/sentry/search/eap/spans/utils.py +++ b/src/sentry/search/eap/spans/utils.py @@ -1,5 +1,8 @@ -from typing import Literal +from typing import Literal, cast +from sentry_protos.snuba.v1.trace_item_attribute_pb2 import AttributeKey + +from sentry.search.eap.columns import ResolvedArguments from sentry.search.eap.spans.attributes import SPAN_ATTRIBUTE_DEFINITIONS INTERNAL_TO_PUBLIC_ALIAS_MAPPINGS: dict[Literal["string", "number"], dict[str, str]] = { @@ -26,3 +29,23 @@ def translate_internal_to_public_alias( ) -> str | None: mappings = INTERNAL_TO_PUBLIC_ALIAS_MAPPINGS.get(type, {}) return mappings.get(internal_alias) + + +def transform_vital_score_to_ratio(args: ResolvedArguments) -> AttributeKey: + score_attribute = cast(AttributeKey, args[0]) + score_name = score_attribute.name + + ratio_score_name = score_name.replace("score", "score.ratio") + if ratio_score_name == "score.ratio.total": + ratio_score_name = "score.total" + return AttributeKey(name=ratio_score_name, type=AttributeKey.TYPE_DOUBLE) + + +WEB_VITALS_MEASUREMENTS = [ + "measurements.score.total", + "measurements.score.lcp", + "measurements.score.fcp", + "measurements.score.cls", + "measurements.score.ttfb", + "measurements.score.inp", +] diff --git a/tests/snuba/api/endpoints/test_organization_events_span_indexed.py b/tests/snuba/api/endpoints/test_organization_events_span_indexed.py index 1100a1949d65a8..58c00d9c322c46 100644 --- a/tests/snuba/api/endpoints/test_organization_events_span_indexed.py +++ b/tests/snuba/api/endpoints/test_organization_events_span_indexed.py @@ -5,6 +5,7 @@ import pytest import urllib3 +from sentry.search.events.constants import WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS from sentry.testutils.helpers import parse_link_header from tests.snuba.api.endpoints.test_organization_events import OrganizationEventsEndpointTestBase @@ -3029,6 +3030,59 @@ def test_performance_score(self): assert data[0]["performance_score(measurements.score.lcp)"] == 0.06 assert meta["dataset"] == self.dataset + def test_opportunity_score(self): + self.store_spans( + [ + self.create_span( + { + "measurements": { + "score.ratio.lcp": {"value": 0.1}, + "score.ratio.fcp": {"value": 0.57142857142}, + "score.total": {"value": 0.43}, + } + } + ), + self.create_span( + { + "measurements": { + "score.ratio.lcp": {"value": 1.0}, + "score.total": {"value": 1.0}, + } + } + ), + self.create_span( + { + "measurements": { + "score.total": {"value": 0.0}, + } + } + ), + ], + is_eap=self.is_eap, + ) + + response = self.do_request( + { + "field": [ + "opportunity_score(measurements.score.lcp)", + "opportunity_score(measurements.score.total)", + ], + "project": self.project.id, + "dataset": self.dataset, + } + ) + + lcp_score = ( + 0.27 / WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS["lcp"] + ) # TODO: we should multiplying by the weight in the formula, but we can't until https://github.com/getsentry/eap-planning/issues/202 + assert response.status_code == 200, response.content + data = response.data["data"] + meta = response.data["meta"] + assert len(data) == 1 + self.assertAlmostEqual(data[0]["opportunity_score(measurements.score.lcp)"], lcp_score) + assert data[0]["opportunity_score(measurements.score.total)"] == 1.57 + assert meta["dataset"] == self.dataset + @pytest.mark.skip(reason="replay id alias not migrated over") def test_replay_id(self): super().test_replay_id()