Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat(eap-spans): implement opportunity_score function (mostly) #87313

Merged
merged 2 commits into from
Mar 19, 2025
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
20 changes: 1 addition & 19 deletions src/sentry/search/eap/spans/aggregates.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,17 +14,9 @@
ConditionalAggregateDefinition,
ResolvedArguments,
)
from sentry.search.eap.spans.utils import WEB_VITALS_MEASUREMENTS, transform_vital_score_to_ratio
from sentry.search.eap.utils import literal_validator

WEB_VITALS_MEASUREMENTS = [
"measurements.score.total",
"measurements.score.lcp",
"measurements.score.fcp",
"measurements.score.cls",
"measurements.score.ttfb",
"measurements.score.inp",
]


def count_processor(count_value: int | None) -> int:
if count_value is None:
Expand Down Expand Up @@ -73,16 +65,6 @@ def resolve_count_scores(args: ResolvedArguments) -> tuple[AttributeKey, TraceIt
return (ratio_attribute, filter)


def transform_vital_score_to_ratio(args: ResolvedArguments) -> AttributeKey:
score_attribute = cast(AttributeKey, args[0])
score_name = score_attribute.name

ratio_score_name = score_name.replace("score", "score.ratio")
if ratio_score_name == "score.ratio.total":
ratio_score_name = "score.total"
return AttributeKey(name=ratio_score_name, type=AttributeKey.TYPE_DOUBLE)


SPAN_CONDITIONAL_AGGREGATE_DEFINITIONS = {
"count_op": ConditionalAggregateDefinition(
internal_function=Function.FUNCTION_COUNT,
Expand Down
49 changes: 48 additions & 1 deletion src/sentry/search/eap/spans/formulas.py
Original file line number Diff line number Diff line change
Expand Up @@ -12,10 +12,15 @@
Function,
StrArray,
)
from sentry_protos.snuba.v1.trace_item_filter_pb2 import ComparisonFilter, TraceItemFilter
from sentry_protos.snuba.v1.trace_item_filter_pb2 import (
ComparisonFilter,
ExistsFilter,
TraceItemFilter,
)

from sentry.search.eap.columns import ArgumentDefinition, FormulaDefinition, ResolvedArguments
from sentry.search.eap.constants import RESPONSE_CODE_MAP
from sentry.search.eap.spans.utils import WEB_VITALS_MEASUREMENTS, transform_vital_score_to_ratio
from sentry.search.eap.utils import literal_validator

"""
Expand Down Expand Up @@ -64,6 +69,34 @@ def failure_rate(_: ResolvedArguments) -> Column.BinaryFormula:
)


def opportunity_score(args: ResolvedArguments) -> Column.BinaryFormula:
score_attribute = cast(AttributeKey, args[0])
ratio_attribute = transform_vital_score_to_ratio([score_attribute])

# TODO: We should be multiplying by the weight in the formula, but we can't until https://github.com/getsentry/eap-planning/issues/202
return Column.BinaryFormula(
left=Column(
conditional_aggregation=AttributeConditionalAggregation(
aggregate=Function.FUNCTION_COUNT,
filter=TraceItemFilter(
exists_filter=ExistsFilter(key=ratio_attribute),
),
key=ratio_attribute,
)
),
op=Column.BinaryFormula.OP_SUBTRACT,
right=Column(
conditional_aggregation=AttributeConditionalAggregation(
aggregate=Function.FUNCTION_SUM,
filter=TraceItemFilter(
exists_filter=ExistsFilter(key=ratio_attribute),
),
key=ratio_attribute,
)
),
)


def http_response_rate(args: ResolvedArguments) -> Column.BinaryFormula:
code = cast(Literal[1, 2, 3, 4, 5], args[0])

Expand Down Expand Up @@ -273,4 +306,18 @@ def ttid_contribution_rate(args: ResolvedArguments) -> Column.BinaryFormula:
formula_resolver=ttid_contribution_rate,
is_aggregate=True,
),
"opportunity_score": FormulaDefinition(
default_search_type="percentage",
arguments=[
ArgumentDefinition(
argument_types={
"duration",
"number",
},
validator=literal_validator(WEB_VITALS_MEASUREMENTS),
),
],
formula_resolver=opportunity_score,
is_aggregate=True,
),
}
25 changes: 24 additions & 1 deletion src/sentry/search/eap/spans/utils.py
Original file line number Diff line number Diff line change
@@ -1,5 +1,8 @@
from typing import Literal
from typing import Literal, cast

from sentry_protos.snuba.v1.trace_item_attribute_pb2 import AttributeKey

from sentry.search.eap.columns import ResolvedArguments
from sentry.search.eap.spans.attributes import SPAN_ATTRIBUTE_DEFINITIONS

INTERNAL_TO_PUBLIC_ALIAS_MAPPINGS: dict[Literal["string", "number"], dict[str, str]] = {
Expand All @@ -26,3 +29,23 @@ def translate_internal_to_public_alias(
) -> str | None:
mappings = INTERNAL_TO_PUBLIC_ALIAS_MAPPINGS.get(type, {})
return mappings.get(internal_alias)


def transform_vital_score_to_ratio(args: ResolvedArguments) -> AttributeKey:
score_attribute = cast(AttributeKey, args[0])
score_name = score_attribute.name

ratio_score_name = score_name.replace("score", "score.ratio")
if ratio_score_name == "score.ratio.total":
ratio_score_name = "score.total"
return AttributeKey(name=ratio_score_name, type=AttributeKey.TYPE_DOUBLE)


WEB_VITALS_MEASUREMENTS = [
"measurements.score.total",
"measurements.score.lcp",
"measurements.score.fcp",
"measurements.score.cls",
"measurements.score.ttfb",
"measurements.score.inp",
]
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
import pytest
import urllib3

from sentry.search.events.constants import WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS
from sentry.testutils.helpers import parse_link_header
from tests.snuba.api.endpoints.test_organization_events import OrganizationEventsEndpointTestBase

Expand Down Expand Up @@ -3057,6 +3058,59 @@ def test_performance_score(self):
assert data[0]["performance_score(measurements.score.lcp)"] == 0.06
assert meta["dataset"] == self.dataset

def test_opportunity_score(self):
self.store_spans(
[
self.create_span(
{
"measurements": {
"score.ratio.lcp": {"value": 0.1},
"score.ratio.fcp": {"value": 0.57142857142},
"score.total": {"value": 0.43},
}
}
),
self.create_span(
{
"measurements": {
"score.ratio.lcp": {"value": 1.0},
"score.total": {"value": 1.0},
}
}
),
self.create_span(
{
"measurements": {
"score.total": {"value": 0.0},
}
}
),
],
is_eap=self.is_eap,
)

response = self.do_request(
{
"field": [
"opportunity_score(measurements.score.lcp)",
"opportunity_score(measurements.score.total)",
],
"project": self.project.id,
"dataset": self.dataset,
}
)

lcp_score = (
0.27 / WEB_VITALS_PERFORMANCE_SCORE_WEIGHTS["lcp"]
) # TODO: we should multiplying by the weight in the formula, but we can't until https://github.com/getsentry/eap-planning/issues/202
assert response.status_code == 200, response.content
data = response.data["data"]
meta = response.data["meta"]
assert len(data) == 1
self.assertAlmostEqual(data[0]["opportunity_score(measurements.score.lcp)"], lcp_score)
assert data[0]["opportunity_score(measurements.score.total)"] == 1.57
assert meta["dataset"] == self.dataset

@pytest.mark.skip(reason="replay id alias not migrated over")
def test_replay_id(self):
super().test_replay_id()
Expand Down
Loading