Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

reproduce #6588

Draft
wants to merge 2 commits into
base: master
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions snuba/query/processors/logical/timeseries_processor.py
Original file line number Diff line number Diff line change
Expand Up @@ -241,6 +241,8 @@ def extract_granularity_from_query(query: Query, column: str) -> Optional[int]:
),
)

print("expr_match", expr_match)

for top_expr in groupby:
for expr in top_expr:
result = fn_match.match(expr)
Expand Down
13 changes: 12 additions & 1 deletion snuba/web/rpc/v1/endpoint_time_series.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@

from snuba.attribution.appid import AppID
from snuba.attribution.attribution_info import AttributionInfo
from snuba.cli import start
from snuba.datasets.entities.entity_key import EntityKey
from snuba.datasets.entities.factory import get_entity
from snuba.datasets.pluggable_dataset import PluggableDataset
Expand Down Expand Up @@ -58,6 +59,10 @@
_MAX_BUCKETS_IN_REQUEST = 1000


def _rewind(start_timestamp: int, granularity: int) -> int:
return (start_timestamp // granularity) * granularity


def _convert_result_timeseries(
request: TimeSeriesRequest, data: list[Dict[str, Any]]
) -> Iterable[TimeSeries]:
Expand Down Expand Up @@ -124,10 +129,16 @@ def _convert_result_timeseries(
query_duration = (
request.meta.end_timestamp.seconds - request.meta.start_timestamp.seconds
)
# start_timestamp_seconds = _rewind(
# request.meta.start_timestamp.seconds, granularity=request.granularity_secs
# )
start_timestamp_seconds = request.meta.start_timestamp.seconds
time_buckets = [
Timestamp(seconds=(request.meta.start_timestamp.seconds) + secs)
Timestamp(seconds=start_timestamp_seconds + secs)
for secs in range(0, query_duration, request.granularity_secs)
]
print("request", request)
print("data", data)

# this loop fill in our pre-computed dictionaries so that we can zerofill later
for row in data:
Expand Down
67 changes: 67 additions & 0 deletions tests/web/rpc/v1/test_endpoint_time_series.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,7 @@ def test_basic(self) -> None:
assert response.status_code == 200, (error.message, error.details)

def test_sum(self) -> None:
print(BASE_TIME.timestamp())
# store a a test metric with a value of 1, every second of one hour
granularity_secs = 300
query_duration = 60 * 30
Expand Down Expand Up @@ -225,6 +226,72 @@ def test_sum(self) -> None:
],
),
]
assert False

def test_rachel(self) -> None:
start_timestamp_seconds = 1725892950
# store a a test metric with a value of 1, every second of one hour
granularity_secs = 15
query_duration = 60 * 30
store_timeseries(
datetime.fromtimestamp(start_timestamp_seconds, tz=UTC),
1,
3600,
metrics=[DummyMetric("test_metric", get_value=lambda x: 1)],
)

message = TimeSeriesRequest(
meta=RequestMeta(
project_ids=[1, 2, 3],
organization_id=1,
cogs_category="something",
referrer="something",
start_timestamp=Timestamp(seconds=start_timestamp_seconds),
end_timestamp=Timestamp(
seconds=int(start_timestamp_seconds + query_duration)
),
),
aggregations=[
AttributeAggregation(
aggregate=Function.FUNCTION_SUM,
key=AttributeKey(type=AttributeKey.TYPE_FLOAT, name="test_metric"),
label="sum",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_NONE,
),
AttributeAggregation(
aggregate=Function.FUNCTION_AVG,
key=AttributeKey(type=AttributeKey.TYPE_FLOAT, name="test_metric"),
label="avg",
extrapolation_mode=ExtrapolationMode.EXTRAPOLATION_MODE_NONE,
),
],
granularity_secs=granularity_secs,
)
response = EndpointTimeSeries().execute(message)
expected_buckets = [
Timestamp(seconds=int(BASE_TIME.timestamp()) + secs)
for secs in range(0, query_duration, granularity_secs)
]
assert sorted(response.result_timeseries, key=lambda x: x.label) == [
TimeSeries(
label="avg",
buckets=expected_buckets,
data_points=[
DataPoint(data=1, data_present=True)
for _ in range(len(expected_buckets))
],
),
TimeSeries(
label="sum",
buckets=expected_buckets,
data_points=[
DataPoint(data=300, data_present=True)
for _ in range(len(expected_buckets))
],
),
]

assert False

def test_with_group_by(self) -> None:
store_timeseries(
Expand Down
Loading