diff --git a/snuba/query/processors/logical/timeseries_processor.py b/snuba/query/processors/logical/timeseries_processor.py index 55b4dabf55..9d8e07da9f 100644 --- a/snuba/query/processors/logical/timeseries_processor.py +++ b/snuba/query/processors/logical/timeseries_processor.py @@ -241,6 +241,8 @@ def extract_granularity_from_query(query: Query, column: str) -> Optional[int]: ), ) + print("expr_match", expr_match) + for top_expr in groupby: for expr in top_expr: result = fn_match.match(expr) diff --git a/snuba/web/rpc/v1/endpoint_time_series.py b/snuba/web/rpc/v1/endpoint_time_series.py index 8ffd8dbfb4..3e9d18e9e0 100644 --- a/snuba/web/rpc/v1/endpoint_time_series.py +++ b/snuba/web/rpc/v1/endpoint_time_series.py @@ -15,6 +15,7 @@ from snuba.attribution.appid import AppID from snuba.attribution.attribution_info import AttributionInfo +from snuba.cli import start from snuba.datasets.entities.entity_key import EntityKey from snuba.datasets.entities.factory import get_entity from snuba.datasets.pluggable_dataset import PluggableDataset @@ -58,6 +59,10 @@ _MAX_BUCKETS_IN_REQUEST = 1000 +def _rewind(start_timestamp: int, granularity: int) -> int: + return (start_timestamp // granularity) * granularity + + def _convert_result_timeseries( request: TimeSeriesRequest, data: list[Dict[str, Any]] ) -> Iterable[TimeSeries]: @@ -124,10 +129,16 @@ def _convert_result_timeseries( query_duration = ( request.meta.end_timestamp.seconds - request.meta.start_timestamp.seconds ) + # start_timestamp_seconds = _rewind( + # request.meta.start_timestamp.seconds, granularity=request.granularity_secs + # ) + start_timestamp_seconds = request.meta.start_timestamp.seconds time_buckets = [ - Timestamp(seconds=(request.meta.start_timestamp.seconds) + secs) + Timestamp(seconds=start_timestamp_seconds + secs) for secs in range(0, query_duration, request.granularity_secs) ] + print("request", request) + print("data", data) # this loop fill in our pre-computed dictionaries so that we can zerofill later for row in data: diff --git a/tests/web/rpc/v1/test_endpoint_time_series.py b/tests/web/rpc/v1/test_endpoint_time_series.py index 82d15efbaa..4578975fe5 100644 --- a/tests/web/rpc/v1/test_endpoint_time_series.py +++ b/tests/web/rpc/v1/test_endpoint_time_series.py @@ -165,6 +165,7 @@ def test_basic(self) -> None: assert response.status_code == 200, (error.message, error.details) def test_sum(self) -> None: + print(BASE_TIME.timestamp()) # store a a test metric with a value of 1, every second of one hour granularity_secs = 300 query_duration = 60 * 30 @@ -225,6 +226,72 @@ def test_sum(self) -> None: ], ), ] + assert False + + def test_rachel(self) -> None: + start_timestamp_seconds = 1725892950 + # store a a test metric with a value of 1, every second of one hour + granularity_secs = 15 + query_duration = 60 * 30 + store_timeseries( + datetime.fromtimestamp(start_timestamp_seconds, tz=UTC), + 1, + 3600, + metrics=[DummyMetric("test_metric", get_value=lambda x: 1)], + ) + + message = TimeSeriesRequest( + meta=RequestMeta( + project_ids=[1, 2, 3], + organization_id=1, + cogs_category="something", + referrer="something", + start_timestamp=Timestamp(seconds=start_timestamp_seconds), + end_timestamp=Timestamp( + seconds=int(start_timestamp_seconds + query_duration) + ), + ), + aggregations=[ + AttributeAggregation( + aggregate=Function.FUNCTION_SUM, + key=AttributeKey(type=AttributeKey.TYPE_FLOAT, name="test_metric"), + label="sum", + extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_NONE, + ), + AttributeAggregation( + aggregate=Function.FUNCTION_AVG, + key=AttributeKey(type=AttributeKey.TYPE_FLOAT, name="test_metric"), + label="avg", + extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_NONE, + ), + ], + granularity_secs=granularity_secs, + ) + response = EndpointTimeSeries().execute(message) + expected_buckets = [ + Timestamp(seconds=int(BASE_TIME.timestamp()) + secs) + for secs in range(0, query_duration, granularity_secs) + ] + assert sorted(response.result_timeseries, key=lambda x: x.label) == [ + TimeSeries( + label="avg", + buckets=expected_buckets, + data_points=[ + DataPoint(data=1, data_present=True) + for _ in range(len(expected_buckets)) + ], + ), + TimeSeries( + label="sum", + buckets=expected_buckets, + data_points=[ + DataPoint(data=300, data_present=True) + for _ in range(len(expected_buckets)) + ], + ), + ] + + assert False def test_with_group_by(self) -> None: store_timeseries(