Skip to content

Commit

Permalink
Merge pull request #353 from sapcc/cluster-scrape-timestamps
Browse files Browse the repository at this point in the history
  • Loading branch information
SuperSandro2000 authored Sep 12, 2023
2 parents 3df2619 + b682820 commit aeac026
Show file tree
Hide file tree
Showing 15 changed files with 68 additions and 85 deletions.
4 changes: 1 addition & 3 deletions internal/api/fixtures/cluster-get-west-no-resources.json
Original file line number Diff line number Diff line change
Expand Up @@ -9,8 +9,6 @@
"max_scraped_at": 66,
"min_scraped_at": 22
}
],
"max_scraped_at": 1100,
"min_scraped_at": 1100
]
}
}
Original file line number Diff line number Diff line change
Expand Up @@ -105,7 +105,7 @@
"min_scraped_at": 11
}
],
"max_scraped_at": 1200,
"max_scraped_at": 1100,
"min_scraped_at": 1000
}
}
2 changes: 1 addition & 1 deletion internal/api/fixtures/cluster-get-west.json
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@
"min_scraped_at": 11
}
],
"max_scraped_at": 1200,
"max_scraped_at": 1100,
"min_scraped_at": 1000
}
}
4 changes: 2 additions & 2 deletions internal/api/fixtures/start-data-minimal.sql
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
CREATE OR REPLACE FUNCTION unix(i integer) RETURNS timestamp AS $$ SELECT TO_TIMESTAMP(i) AT TIME ZONE 'Etc/UTC' $$ LANGUAGE SQL;

-- two services, one shared, one unshared
INSERT INTO cluster_services (id, type, scraped_at) VALUES (1, 'unshared', UNIX(1000));
INSERT INTO cluster_services (id, type, scraped_at) VALUES (2, 'shared', UNIX(1100));
INSERT INTO cluster_services (id, type) VALUES (1, 'unshared');
INSERT INTO cluster_services (id, type) VALUES (2, 'shared');

-- two domains
INSERT INTO domains (id, name, uuid) VALUES (1, 'germany', 'uuid-for-germany');
Expand Down
21 changes: 11 additions & 10 deletions internal/api/fixtures/start-data.sql
Original file line number Diff line number Diff line change
@@ -1,17 +1,18 @@
CREATE OR REPLACE FUNCTION unix(i integer) RETURNS timestamp AS $$ SELECT TO_TIMESTAMP(i) AT TIME ZONE 'Etc/UTC' $$ LANGUAGE SQL;

-- one bogus capacitor
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, next_scrape_at) VALUES ('dummy-capacitor', UNIX(900), UNIX(1800));
-- two capacitors matching the two services that have capacity values
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, next_scrape_at) VALUES ('scans-unshared', UNIX(1000), UNIX(2000));
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, next_scrape_at) VALUES ('scans-shared', UNIX(1100), UNIX(2100));

-- three services
INSERT INTO cluster_services (id, type, scraped_at) VALUES (1, 'unshared', UNIX(1000));
INSERT INTO cluster_services (id, type, scraped_at) VALUES (2, 'shared', UNIX(1100));
INSERT INTO cluster_services (id, type, scraped_at) VALUES (3, 'centralized', UNIX(1200));
INSERT INTO cluster_services (id, type) VALUES (1, 'unshared');
INSERT INTO cluster_services (id, type) VALUES (2, 'shared');
INSERT INTO cluster_services (id, type) VALUES (3, 'centralized');

-- all services have the resources "things" and "capacity"
INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacity_per_az, capacitor_id) VALUES (1, 'things', 139, '[{"smaller_half":46},{"larger_half":93}]', '[{"name":"az-one","capacity":69,"usage":13},{"name":"az-two","capacity":69,"usage":13}]', 'dummy-capacitor');
INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacity_per_az, capacitor_id) VALUES (2, 'things', 246, '[{"smaller_half":82},{"larger_half":164}]', '', 'dummy-capacitor');
INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacity_per_az, capacitor_id) VALUES (2, 'capacity', 185, '', '', 'dummy-capacitor');
INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacity_per_az, capacitor_id) VALUES (1, 'things', 139, '[{"smaller_half":46},{"larger_half":93}]', '[{"name":"az-one","capacity":69,"usage":13},{"name":"az-two","capacity":69,"usage":13}]', 'scans-unshared');
INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacity_per_az, capacitor_id) VALUES (2, 'things', 246, '[{"smaller_half":82},{"larger_half":164}]', '', 'scans-shared');
INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacity_per_az, capacitor_id) VALUES (2, 'capacity', 185, '', '', 'scans-shared');

-- two domains
INSERT INTO domains (id, name, uuid) VALUES (1, 'germany', 'uuid-for-germany');
Expand Down Expand Up @@ -104,8 +105,8 @@ INSERT INTO project_rates (service_id, name, rate_limit, window_ns, usage_as_big

-- insert some bullshit data that should be filtered out by the internal/reports/ logic
-- (cluster "north", service "weird", resource "items" and rate "frobnicate" are not configured)
INSERT INTO cluster_services (id, type, scraped_at) VALUES (101, 'weird', UNIX(1100));
INSERT INTO cluster_resources (service_id, name, capacity, capacitor_id) VALUES (101, 'things', 1, 'dummy-capacitor');
INSERT INTO cluster_services (id, type) VALUES (101, 'weird');
INSERT INTO cluster_resources (service_id, name, capacity, capacitor_id) VALUES (101, 'things', 1, 'scans-shared');
INSERT INTO domain_services (id, domain_id, type) VALUES (101, 1, 'weird');
INSERT INTO domain_resources (service_id, name, quota) VALUES (101, 'things', 1);
INSERT INTO project_services (id, project_id, type) VALUES (101, 1, 'weird');
Expand Down
19 changes: 5 additions & 14 deletions internal/collector/capacity.go
Original file line number Diff line number Diff line change
Expand Up @@ -145,7 +145,7 @@ func (c *Collector) scanCapacity() {
if err != nil {
c.LogError("write capacity failed: %s", err.Error())
}
err = c.writeCapacity(tx, values, scrapedAt)
err = c.writeCapacity(tx, values)
if err != nil {
c.LogError("write capacity failed: %s", err.Error())
}
Expand Down Expand Up @@ -192,7 +192,7 @@ func (c *Collector) writeCapacitorInfo(tx *gorp.Transaction, capacitorInfo map[s
return nil
}

func (c *Collector) writeCapacity(tx *gorp.Transaction, values map[string]map[string]capacityDataWithCapacitorID, scrapedAt time.Time) error {
func (c *Collector) writeCapacity(tx *gorp.Transaction, values map[string]map[string]capacityDataWithCapacitorID) error {
//create missing cluster_services entries (superfluous ones will be cleaned
//up by the CheckConsistency())
serviceIDForType := make(map[string]int64)
Expand All @@ -217,23 +217,14 @@ func (c *Collector) writeCapacity(tx *gorp.Transaction, values map[string]map[st
continue
}

dbService := &db.ClusterService{
Type: serviceType,
ScrapedAt: &scrapedAt,
}
dbService := &db.ClusterService{Type: serviceType}
err := tx.Insert(dbService)
if err != nil {
return err
}
serviceIDForType[dbService.Type] = dbService.ID
}

//update scraped_at timestamp on all cluster services in one step
_, err = tx.Exec(`UPDATE cluster_services SET scraped_at = $1`, scrapedAt)
if err != nil {
return err
}

//enumerate cluster_resources: create missing ones, update existing ones, delete superfluous ones
for _, serviceType := range allServiceTypes {
serviceValues := values[serviceType]
Expand All @@ -251,7 +242,7 @@ func (c *Collector) writeCapacity(tx *gorp.Transaction, values map[string]map[st
data, exists := serviceValues[dbResource.Name]
if exists {
dbResource.RawCapacity = data.CapacityData.Capacity
dbResource.CapacitorID = &data.CapacitorID
dbResource.CapacitorID = data.CapacitorID

if len(data.CapacityData.Subcapacities) == 0 {
dbResource.SubcapacitiesJSON = ""
Expand Down Expand Up @@ -301,7 +292,7 @@ func (c *Collector) writeCapacity(tx *gorp.Transaction, values map[string]map[st
RawCapacity: data.CapacityData.Capacity,
CapacityPerAZJSON: "", //but see below
SubcapacitiesJSON: "",
CapacitorID: &data.CapacitorID,
CapacitorID: data.CapacitorID,
}

if len(data.CapacityData.Subcapacities) != 0 {
Expand Down
23 changes: 4 additions & 19 deletions internal/collector/capacity_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -95,17 +95,16 @@ func Test_ScanCapacity(t *testing.T) {
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest2', 0, 1, 900);
INSERT INTO cluster_resources (service_id, name, capacity, capacitor_id) VALUES (1, 'things', 42, 'unittest');
INSERT INTO cluster_resources (service_id, name, capacity, capacitor_id) VALUES (2, 'capacity', 42, 'unittest2');
INSERT INTO cluster_services (id, type, scraped_at) VALUES (1, 'shared', 0);
INSERT INTO cluster_services (id, type, scraped_at) VALUES (2, 'unshared', 0);
INSERT INTO cluster_services (id, type) VALUES (1, 'shared');
INSERT INTO cluster_services (id, type) VALUES (2, 'unshared');
`)

//insert some crap records
capacitorID := "unittest2"
err := s.DB.Insert(&db.ClusterResource{
ServiceID: 2,
Name: "unknown",
RawCapacity: 100,
CapacitorID: &capacitorID,
CapacitorID: "unittest2",
})
if err != nil {
t.Error(err)
Expand All @@ -126,8 +125,6 @@ func Test_ScanCapacity(t *testing.T) {
UPDATE cluster_capacitors SET scraped_at = 5, next_scrape_at = 905 WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = 5, next_scrape_at = 905 WHERE capacitor_id = 'unittest2';
UPDATE cluster_resources SET capacity = 23 WHERE service_id = 1 AND name = 'things';
UPDATE cluster_services SET scraped_at = 5 WHERE id = 1 AND type = 'shared';
UPDATE cluster_services SET scraped_at = 5 WHERE id = 2 AND type = 'unshared';
`)

//add a capacity plugin that reports subcapacities; check that subcapacities
Expand All @@ -147,8 +144,6 @@ func Test_ScanCapacity(t *testing.T) {
UPDATE cluster_capacitors SET scraped_at = 10, next_scrape_at = 910 WHERE capacitor_id = 'unittest2';
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, serialized_metrics, next_scrape_at) VALUES ('unittest4', 10, 1, '{"smaller_half":14,"larger_half":28}', 910);
INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacitor_id) VALUES (2, 'things', 42, '[{"smaller_half":14},{"larger_half":28}]', 'unittest4');
UPDATE cluster_services SET scraped_at = 10 WHERE id = 1 AND type = 'shared';
UPDATE cluster_services SET scraped_at = 10 WHERE id = 2 AND type = 'unshared';
`)

//check that scraping correctly updates subcapacities on an existing record
Expand All @@ -159,8 +154,6 @@ func Test_ScanCapacity(t *testing.T) {
UPDATE cluster_capacitors SET scraped_at = 17, next_scrape_at = 917 WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = 17, serialized_metrics = '{"smaller_half":3,"larger_half":7}', next_scrape_at = 917 WHERE capacitor_id = 'unittest4';
UPDATE cluster_resources SET capacity = 10, subcapacities = '[{"smaller_half":3},{"larger_half":7}]' WHERE service_id = 2 AND name = 'things';
UPDATE cluster_services SET scraped_at = 17 WHERE id = 1 AND type = 'shared';
UPDATE cluster_services SET scraped_at = 17 WHERE id = 2 AND type = 'unshared';
`)

//add a capacity plugin that also reports capacity per availability zone; check that
Expand All @@ -181,9 +174,7 @@ func Test_ScanCapacity(t *testing.T) {
UPDATE cluster_capacitors SET scraped_at = 24, next_scrape_at = 924 WHERE capacitor_id = 'unittest4';
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest5', 24, 1, 924);
INSERT INTO cluster_resources (service_id, name, capacity, capacity_per_az, capacitor_id) VALUES (3, 'things', 42, '[{"name":"az-one","capacity":21,"usage":4},{"name":"az-two","capacity":21,"usage":4}]', 'unittest5');
UPDATE cluster_services SET scraped_at = 24 WHERE id = 1 AND type = 'shared';
UPDATE cluster_services SET scraped_at = 24 WHERE id = 2 AND type = 'unshared';
INSERT INTO cluster_services (id, type, scraped_at) VALUES (3, 'unshared2', 24);
INSERT INTO cluster_services (id, type) VALUES (3, 'unshared2');
`)

//check that scraping correctly updates the capacities on an existing record
Expand All @@ -195,9 +186,6 @@ func Test_ScanCapacity(t *testing.T) {
UPDATE cluster_capacitors SET scraped_at = 33, next_scrape_at = 933 WHERE capacitor_id = 'unittest4';
UPDATE cluster_capacitors SET scraped_at = 33, next_scrape_at = 933 WHERE capacitor_id = 'unittest5';
UPDATE cluster_resources SET capacity = 30, capacity_per_az = '[{"name":"az-one","capacity":15,"usage":3},{"name":"az-two","capacity":15,"usage":3}]' WHERE service_id = 3 AND name = 'things';
UPDATE cluster_services SET scraped_at = 33 WHERE id = 1 AND type = 'shared';
UPDATE cluster_services SET scraped_at = 33 WHERE id = 2 AND type = 'unshared';
UPDATE cluster_services SET scraped_at = 33 WHERE id = 3 AND type = 'unshared2';
`)

//check data metrics generated for these capacity data
Expand All @@ -222,8 +210,5 @@ func Test_ScanCapacity(t *testing.T) {
UPDATE cluster_capacitors SET scraped_at = 42, next_scrape_at = 942 WHERE capacitor_id = 'unittest4';
DELETE FROM cluster_capacitors WHERE capacitor_id = 'unittest5';
DELETE FROM cluster_resources WHERE service_id = 3 AND name = 'things';
UPDATE cluster_services SET scraped_at = 42 WHERE id = 1 AND type = 'shared';
UPDATE cluster_services SET scraped_at = 42 WHERE id = 2 AND type = 'unshared';
UPDATE cluster_services SET scraped_at = 42 WHERE id = 3 AND type = 'unshared2';
`)
}
6 changes: 1 addition & 5 deletions internal/collector/consistency.go
Original file line number Diff line number Diff line change
Expand Up @@ -71,18 +71,14 @@ func (c *Collector) checkConsistencyCluster(_ context.Context, _ prometheus.Labe
}
}

now := c.TimeNow()
//create missing service entries
for _, serviceType := range c.Cluster.ServiceTypesInAlphabeticalOrder() {
if seen[serviceType] {
continue
}

logg.Info("creating missing %s cluster service entry", serviceType)
err := c.DB.Insert(&db.ClusterService{
Type: serviceType,
ScrapedAt: &now,
})
err := c.DB.Insert(&db.ClusterService{Type: serviceType})
if err != nil {
c.LogError(err.Error())
}
Expand Down
6 changes: 1 addition & 5 deletions internal/collector/consistency_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -101,11 +101,7 @@ func Test_Consistency(t *testing.T) {
t.Error(err)
}
//add some useless *_services entries
epoch := time.Unix(0, 0).UTC()
err = s.DB.Insert(&db.ClusterService{
Type: "whatever",
ScrapedAt: &epoch,
})
err = s.DB.Insert(&db.ClusterService{Type: "whatever"})
if err != nil {
t.Error(err)
}
Expand Down
6 changes: 3 additions & 3 deletions internal/collector/fixtures/checkconsistency0.sql
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
INSERT INTO cluster_services (id, type, scraped_at) VALUES (1, 'centralized', 3);
INSERT INTO cluster_services (id, type, scraped_at) VALUES (2, 'shared', 3);
INSERT INTO cluster_services (id, type, scraped_at) VALUES (3, 'unshared', 3);
INSERT INTO cluster_services (id, type) VALUES (1, 'centralized');
INSERT INTO cluster_services (id, type) VALUES (2, 'shared');
INSERT INTO cluster_services (id, type) VALUES (3, 'unshared');

INSERT INTO domain_resources (service_id, name, quota) VALUES (1, 'capacity', 0);
INSERT INTO domain_resources (service_id, name, quota) VALUES (1, 'capacity_portion', 0);
Expand Down
6 changes: 3 additions & 3 deletions internal/collector/fixtures/checkconsistency1.sql
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
INSERT INTO cluster_services (id, type, scraped_at) VALUES (1, 'centralized', 3);
INSERT INTO cluster_services (id, type, scraped_at) VALUES (3, 'unshared', 3);
INSERT INTO cluster_services (id, type, scraped_at) VALUES (4, 'whatever', 0);
INSERT INTO cluster_services (id, type) VALUES (1, 'centralized');
INSERT INTO cluster_services (id, type) VALUES (3, 'unshared');
INSERT INTO cluster_services (id, type) VALUES (4, 'whatever');

INSERT INTO domain_resources (service_id, name, quota) VALUES (1, 'capacity', 0);
INSERT INTO domain_resources (service_id, name, quota) VALUES (1, 'capacity_portion', 0);
Expand Down
12 changes: 6 additions & 6 deletions internal/collector/fixtures/checkconsistency2.sql
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
INSERT INTO cluster_services (id, type, scraped_at) VALUES (1, 'centralized', 3);
INSERT INTO cluster_services (id, type, scraped_at) VALUES (3, 'unshared', 3);
INSERT INTO cluster_services (id, type, scraped_at) VALUES (5, 'shared', 6);
INSERT INTO cluster_services (id, type) VALUES (1, 'centralized');
INSERT INTO cluster_services (id, type) VALUES (3, 'unshared');
INSERT INTO cluster_services (id, type) VALUES (5, 'shared');

INSERT INTO domain_resources (service_id, name, quota) VALUES (1, 'capacity', 0);
INSERT INTO domain_resources (service_id, name, quota) VALUES (1, 'capacity_portion', 0);
Expand Down Expand Up @@ -32,9 +32,9 @@ INSERT INTO domains (id, name, uuid) VALUES (1, 'germany', 'uuid-for-germany');
INSERT INTO domains (id, name, uuid) VALUES (2, 'france', 'uuid-for-france');

INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (1, 1, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (11, 1, 'shared', 7, 7);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (12, 2, 'shared', 7, 7);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (13, 3, 'shared', 8, 8);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (11, 1, 'shared', 5, 5);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (12, 2, 'shared', 5, 5);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (13, 3, 'shared', 6, 6);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (3, 1, 'unshared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 1, 1);
Expand Down
16 changes: 16 additions & 0 deletions internal/db/migrations.go
Original file line number Diff line number Diff line change
Expand Up @@ -161,4 +161,20 @@ var sqlMigrations = map[string]string{
ALTER TABLE cluster_resources
DROP COLUMN capacitor_id;
`,
"024_move_capacity_scrape_timestamps.up.sql": `
ALTER TABLE cluster_capacitors
ALTER COLUMN scraped_at SET DEFAULT NULL; -- null if scraping did not happen yet
ALTER TABLE cluster_services
DROP COLUMN scraped_at;
ALTER TABLE cluster_resources
ALTER COLUMN capacitor_id DROP DEFAULT;
`,
"024_move_capacity_scrape_timestamps.down.sql": `
ALTER TABLE cluster_capacitors
ALTER COLUMN scraped_at DROP DEFAULT;
ALTER TABLE cluster_services
ADD COLUMN scraped_at TIMESTAMP NOT NULL DEFAULT NOW();
ALTER TABLE cluster_resources
ALTER COLUMN capacitor_id SET DEFAULT NULL;
`,
}
17 changes: 8 additions & 9 deletions internal/db/models.go
Original file line number Diff line number Diff line change
Expand Up @@ -37,19 +37,18 @@ type ClusterCapacitor struct {

// ClusterService contains a record from the `cluster_services` table.
type ClusterService struct {
ID int64 `db:"id"`
Type string `db:"type"`
ScrapedAt *time.Time `db:"scraped_at"` //pointer type to allow for NULL value
ID int64 `db:"id"`
Type string `db:"type"`
}

// ClusterResource contains a record from the `cluster_resources` table.
type ClusterResource struct {
ServiceID int64 `db:"service_id"`
Name string `db:"name"`
RawCapacity uint64 `db:"capacity"`
CapacityPerAZJSON string `db:"capacity_per_az"`
SubcapacitiesJSON string `db:"subcapacities"`
CapacitorID *string `db:"capacitor_id"` //can be NULL during transition period (TODO: remove after migration 24)
ServiceID int64 `db:"service_id"`
Name string `db:"name"`
RawCapacity uint64 `db:"capacity"`
CapacityPerAZJSON string `db:"capacity_per_az"`
SubcapacitiesJSON string `db:"subcapacities"`
CapacitorID string `db:"capacitor_id"`
}

// Domain contains a record from the `domains` table.
Expand Down
9 changes: 5 additions & 4 deletions internal/reports/cluster.go
Original file line number Diff line number Diff line change
Expand Up @@ -56,9 +56,10 @@ var clusterReportQuery2 = sqlext.SimplifyWhitespace(`

var clusterReportQuery3 = sqlext.SimplifyWhitespace(`
SELECT cs.type, cr.name, cr.capacity,
cr.capacity_per_az, cr.subcapacities, cs.scraped_at
cr.capacity_per_az, cr.subcapacities, cc.scraped_at
FROM cluster_services cs
LEFT OUTER JOIN cluster_resources cr ON cr.service_id = cs.id {{AND cr.name = $resource_name}}
LEFT OUTER JOIN cluster_capacitors cc ON cc.capacitor_id = cr.capacitor_id
WHERE TRUE {{AND cs.type = $service_type}}
`)

Expand Down Expand Up @@ -162,7 +163,7 @@ func GetClusterResources(cluster *core.Cluster, dbi db.Interface, filter Filter)
rawCapacity *uint64
capacityPerAZ *string
subcapacities *string
scrapedAt time.Time
scrapedAt *time.Time
)
err := rows.Scan(&serviceType, &resourceName, &rawCapacity,
&capacityPerAZ, &subcapacities, &scrapedAt)
Expand Down Expand Up @@ -193,8 +194,8 @@ func GetClusterResources(cluster *core.Cluster, dbi db.Interface, filter Filter)
}
}

report.MaxScrapedAt = mergeMaxTime(report.MaxScrapedAt, &scrapedAt)
report.MinScrapedAt = mergeMinTime(report.MinScrapedAt, &scrapedAt)
report.MaxScrapedAt = mergeMaxTime(report.MaxScrapedAt, scrapedAt)
report.MinScrapedAt = mergeMinTime(report.MinScrapedAt, scrapedAt)

return nil
})
Expand Down

0 comments on commit aeac026

Please sign in to comment.