From 0cfa67de1e553add4dfaf464f07da34b91dfc99d Mon Sep 17 00:00:00 2001 From: Patrick Schork <354473+pschork@users.noreply.github.com> Date: Sat, 22 Jun 2024 19:09:35 -0700 Subject: [PATCH] Use auto rate interval for blob thoughput metrics --- disperser/dataapi/metrics_handlers.go | 2 +- disperser/dataapi/prometheus_client.go | 11 +++-------- 2 files changed, 4 insertions(+), 9 deletions(-) diff --git a/disperser/dataapi/metrics_handlers.go b/disperser/dataapi/metrics_handlers.go index 5f18ba422..66444d100 100644 --- a/disperser/dataapi/metrics_handlers.go +++ b/disperser/dataapi/metrics_handlers.go @@ -79,7 +79,7 @@ func (s *server) getMetric(ctx context.Context, startTime int64, endTime int64) } func (s *server) getThroughput(ctx context.Context, start int64, end int64) ([]*Throughput, error) { - result, err := s.promClient.QueryDisperserAvgThroughputBlobSizeBytes(ctx, time.Unix(start, 0), time.Unix(end, 0), avgThroughputWindowSize) + result, err := s.promClient.QueryDisperserAvgThroughputBlobSizeBytes(ctx, time.Unix(start, 0), time.Unix(end, 0)) if err != nil { return nil, err } diff --git a/disperser/dataapi/prometheus_client.go b/disperser/dataapi/prometheus_client.go index 0d9db872f..0d39599e0 100644 --- a/disperser/dataapi/prometheus_client.go +++ b/disperser/dataapi/prometheus_client.go @@ -11,8 +11,7 @@ import ( const ( // maxNumOfDataPoints is the maximum number of data points that can be queried from Prometheus based on latency that this API can provide - maxNumOfDataPoints = 3500 - throughputRateWindowInSec = 60 + maxNumOfDataPoints = 3500 ) type ( @@ -47,12 +46,8 @@ func (pc *prometheusClient) QueryDisperserBlobSizeBytesPerSecond(ctx context.Con return pc.queryRange(ctx, query, start, end) } -func (pc *prometheusClient) QueryDisperserAvgThroughputBlobSizeBytes(ctx context.Context, start time.Time, end time.Time, windowSizeInSec uint8) (*PrometheusResult, error) { - if windowSizeInSec < throughputRateWindowInSec { - windowSizeInSec = throughputRateWindowInSec - } - - query := fmt.Sprintf("sum by (job) (rate(eigenda_batcher_blobs_total{state=\"confirmed\",data=\"size\",cluster=\"%s\"}[%ds]))", pc.cluster, windowSizeInSec) +func (pc *prometheusClient) QueryDisperserAvgThroughputBlobSizeBytes(ctx context.Context, start time.Time, end time.Time) (*PrometheusResult, error) { + query := fmt.Sprintf("sum by (job) (rate(eigenda_batcher_blobs_total{state=\"confirmed\",data=\"size\",cluster=\"%s\"}[$__rate_interval]))", pc.cluster) return pc.queryRange(ctx, query, start, end) }