diff --git a/internal/api/fixtures/cluster-get-west-no-resources.json b/internal/api/fixtures/cluster-get-west-no-resources.json index ff9d7f117..5dd3da9f8 100644 --- a/internal/api/fixtures/cluster-get-west-no-resources.json +++ b/internal/api/fixtures/cluster-get-west-no-resources.json @@ -9,8 +9,6 @@ "max_scraped_at": 66, "min_scraped_at": 22 } - ], - "max_scraped_at": 1100, - "min_scraped_at": 1100 + ] } } diff --git a/internal/api/fixtures/cluster-get-west-with-overcommit.json b/internal/api/fixtures/cluster-get-west-with-overcommit.json index 350897382..c01ede974 100644 --- a/internal/api/fixtures/cluster-get-west-with-overcommit.json +++ b/internal/api/fixtures/cluster-get-west-with-overcommit.json @@ -105,7 +105,7 @@ "min_scraped_at": 11 } ], - "max_scraped_at": 1200, + "max_scraped_at": 1100, "min_scraped_at": 1000 } } diff --git a/internal/api/fixtures/cluster-get-west.json b/internal/api/fixtures/cluster-get-west.json index d0752f24c..b2e634acd 100644 --- a/internal/api/fixtures/cluster-get-west.json +++ b/internal/api/fixtures/cluster-get-west.json @@ -101,7 +101,7 @@ "min_scraped_at": 11 } ], - "max_scraped_at": 1200, + "max_scraped_at": 1100, "min_scraped_at": 1000 } } diff --git a/internal/api/fixtures/start-data-minimal.sql b/internal/api/fixtures/start-data-minimal.sql index a419889cc..7da0101b7 100644 --- a/internal/api/fixtures/start-data-minimal.sql +++ b/internal/api/fixtures/start-data-minimal.sql @@ -1,8 +1,8 @@ CREATE OR REPLACE FUNCTION unix(i integer) RETURNS timestamp AS $$ SELECT TO_TIMESTAMP(i) AT TIME ZONE 'Etc/UTC' $$ LANGUAGE SQL; -- two services, one shared, one unshared -INSERT INTO cluster_services (id, type, scraped_at) VALUES (1, 'unshared', UNIX(1000)); -INSERT INTO cluster_services (id, type, scraped_at) VALUES (2, 'shared', UNIX(1100)); +INSERT INTO cluster_services (id, type) VALUES (1, 'unshared'); +INSERT INTO cluster_services (id, type) VALUES (2, 'shared'); -- two domains INSERT INTO domains (id, name, uuid) VALUES (1, 'germany', 'uuid-for-germany'); diff --git a/internal/api/fixtures/start-data.sql b/internal/api/fixtures/start-data.sql index 1f01a595e..d515b8957 100644 --- a/internal/api/fixtures/start-data.sql +++ b/internal/api/fixtures/start-data.sql @@ -1,17 +1,18 @@ CREATE OR REPLACE FUNCTION unix(i integer) RETURNS timestamp AS $$ SELECT TO_TIMESTAMP(i) AT TIME ZONE 'Etc/UTC' $$ LANGUAGE SQL; --- one bogus capacitor -INSERT INTO cluster_capacitors (capacitor_id, scraped_at, next_scrape_at) VALUES ('dummy-capacitor', UNIX(900), UNIX(1800)); +-- two capacitors matching the two services that have capacity values +INSERT INTO cluster_capacitors (capacitor_id, scraped_at, next_scrape_at) VALUES ('scans-unshared', UNIX(1000), UNIX(2000)); +INSERT INTO cluster_capacitors (capacitor_id, scraped_at, next_scrape_at) VALUES ('scans-shared', UNIX(1100), UNIX(2100)); -- three services -INSERT INTO cluster_services (id, type, scraped_at) VALUES (1, 'unshared', UNIX(1000)); -INSERT INTO cluster_services (id, type, scraped_at) VALUES (2, 'shared', UNIX(1100)); -INSERT INTO cluster_services (id, type, scraped_at) VALUES (3, 'centralized', UNIX(1200)); +INSERT INTO cluster_services (id, type) VALUES (1, 'unshared'); +INSERT INTO cluster_services (id, type) VALUES (2, 'shared'); +INSERT INTO cluster_services (id, type) VALUES (3, 'centralized'); -- all services have the resources "things" and "capacity" -INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacity_per_az, capacitor_id) VALUES (1, 'things', 139, '[{"smaller_half":46},{"larger_half":93}]', '[{"name":"az-one","capacity":69,"usage":13},{"name":"az-two","capacity":69,"usage":13}]', 'dummy-capacitor'); -INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacity_per_az, capacitor_id) VALUES (2, 'things', 246, '[{"smaller_half":82},{"larger_half":164}]', '', 'dummy-capacitor'); -INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacity_per_az, capacitor_id) VALUES (2, 'capacity', 185, '', '', 'dummy-capacitor'); +INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacity_per_az, capacitor_id) VALUES (1, 'things', 139, '[{"smaller_half":46},{"larger_half":93}]', '[{"name":"az-one","capacity":69,"usage":13},{"name":"az-two","capacity":69,"usage":13}]', 'scans-unshared'); +INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacity_per_az, capacitor_id) VALUES (2, 'things', 246, '[{"smaller_half":82},{"larger_half":164}]', '', 'scans-shared'); +INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacity_per_az, capacitor_id) VALUES (2, 'capacity', 185, '', '', 'scans-shared'); -- two domains INSERT INTO domains (id, name, uuid) VALUES (1, 'germany', 'uuid-for-germany'); @@ -104,8 +105,8 @@ INSERT INTO project_rates (service_id, name, rate_limit, window_ns, usage_as_big -- insert some bullshit data that should be filtered out by the internal/reports/ logic -- (cluster "north", service "weird", resource "items" and rate "frobnicate" are not configured) -INSERT INTO cluster_services (id, type, scraped_at) VALUES (101, 'weird', UNIX(1100)); -INSERT INTO cluster_resources (service_id, name, capacity, capacitor_id) VALUES (101, 'things', 1, 'dummy-capacitor'); +INSERT INTO cluster_services (id, type) VALUES (101, 'weird'); +INSERT INTO cluster_resources (service_id, name, capacity, capacitor_id) VALUES (101, 'things', 1, 'scans-shared'); INSERT INTO domain_services (id, domain_id, type) VALUES (101, 1, 'weird'); INSERT INTO domain_resources (service_id, name, quota) VALUES (101, 'things', 1); INSERT INTO project_services (id, project_id, type) VALUES (101, 1, 'weird'); diff --git a/internal/collector/capacity.go b/internal/collector/capacity.go index 149e870d5..83a652aed 100644 --- a/internal/collector/capacity.go +++ b/internal/collector/capacity.go @@ -145,7 +145,7 @@ func (c *Collector) scanCapacity() { if err != nil { c.LogError("write capacity failed: %s", err.Error()) } - err = c.writeCapacity(tx, values, scrapedAt) + err = c.writeCapacity(tx, values) if err != nil { c.LogError("write capacity failed: %s", err.Error()) } @@ -192,7 +192,7 @@ func (c *Collector) writeCapacitorInfo(tx *gorp.Transaction, capacitorInfo map[s return nil } -func (c *Collector) writeCapacity(tx *gorp.Transaction, values map[string]map[string]capacityDataWithCapacitorID, scrapedAt time.Time) error { +func (c *Collector) writeCapacity(tx *gorp.Transaction, values map[string]map[string]capacityDataWithCapacitorID) error { //create missing cluster_services entries (superfluous ones will be cleaned //up by the CheckConsistency()) serviceIDForType := make(map[string]int64) @@ -217,10 +217,7 @@ func (c *Collector) writeCapacity(tx *gorp.Transaction, values map[string]map[st continue } - dbService := &db.ClusterService{ - Type: serviceType, - ScrapedAt: &scrapedAt, - } + dbService := &db.ClusterService{Type: serviceType} err := tx.Insert(dbService) if err != nil { return err @@ -228,12 +225,6 @@ func (c *Collector) writeCapacity(tx *gorp.Transaction, values map[string]map[st serviceIDForType[dbService.Type] = dbService.ID } - //update scraped_at timestamp on all cluster services in one step - _, err = tx.Exec(`UPDATE cluster_services SET scraped_at = $1`, scrapedAt) - if err != nil { - return err - } - //enumerate cluster_resources: create missing ones, update existing ones, delete superfluous ones for _, serviceType := range allServiceTypes { serviceValues := values[serviceType] @@ -251,7 +242,7 @@ func (c *Collector) writeCapacity(tx *gorp.Transaction, values map[string]map[st data, exists := serviceValues[dbResource.Name] if exists { dbResource.RawCapacity = data.CapacityData.Capacity - dbResource.CapacitorID = &data.CapacitorID + dbResource.CapacitorID = data.CapacitorID if len(data.CapacityData.Subcapacities) == 0 { dbResource.SubcapacitiesJSON = "" @@ -301,7 +292,7 @@ func (c *Collector) writeCapacity(tx *gorp.Transaction, values map[string]map[st RawCapacity: data.CapacityData.Capacity, CapacityPerAZJSON: "", //but see below SubcapacitiesJSON: "", - CapacitorID: &data.CapacitorID, + CapacitorID: data.CapacitorID, } if len(data.CapacityData.Subcapacities) != 0 { diff --git a/internal/collector/capacity_test.go b/internal/collector/capacity_test.go index 1d97d3d1c..d5bc9e3f0 100644 --- a/internal/collector/capacity_test.go +++ b/internal/collector/capacity_test.go @@ -95,17 +95,16 @@ func Test_ScanCapacity(t *testing.T) { INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest2', 0, 1, 900); INSERT INTO cluster_resources (service_id, name, capacity, capacitor_id) VALUES (1, 'things', 42, 'unittest'); INSERT INTO cluster_resources (service_id, name, capacity, capacitor_id) VALUES (2, 'capacity', 42, 'unittest2'); - INSERT INTO cluster_services (id, type, scraped_at) VALUES (1, 'shared', 0); - INSERT INTO cluster_services (id, type, scraped_at) VALUES (2, 'unshared', 0); + INSERT INTO cluster_services (id, type) VALUES (1, 'shared'); + INSERT INTO cluster_services (id, type) VALUES (2, 'unshared'); `) //insert some crap records - capacitorID := "unittest2" err := s.DB.Insert(&db.ClusterResource{ ServiceID: 2, Name: "unknown", RawCapacity: 100, - CapacitorID: &capacitorID, + CapacitorID: "unittest2", }) if err != nil { t.Error(err) @@ -126,8 +125,6 @@ func Test_ScanCapacity(t *testing.T) { UPDATE cluster_capacitors SET scraped_at = 5, next_scrape_at = 905 WHERE capacitor_id = 'unittest'; UPDATE cluster_capacitors SET scraped_at = 5, next_scrape_at = 905 WHERE capacitor_id = 'unittest2'; UPDATE cluster_resources SET capacity = 23 WHERE service_id = 1 AND name = 'things'; - UPDATE cluster_services SET scraped_at = 5 WHERE id = 1 AND type = 'shared'; - UPDATE cluster_services SET scraped_at = 5 WHERE id = 2 AND type = 'unshared'; `) //add a capacity plugin that reports subcapacities; check that subcapacities @@ -147,8 +144,6 @@ func Test_ScanCapacity(t *testing.T) { UPDATE cluster_capacitors SET scraped_at = 10, next_scrape_at = 910 WHERE capacitor_id = 'unittest2'; INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, serialized_metrics, next_scrape_at) VALUES ('unittest4', 10, 1, '{"smaller_half":14,"larger_half":28}', 910); INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacitor_id) VALUES (2, 'things', 42, '[{"smaller_half":14},{"larger_half":28}]', 'unittest4'); - UPDATE cluster_services SET scraped_at = 10 WHERE id = 1 AND type = 'shared'; - UPDATE cluster_services SET scraped_at = 10 WHERE id = 2 AND type = 'unshared'; `) //check that scraping correctly updates subcapacities on an existing record @@ -159,8 +154,6 @@ func Test_ScanCapacity(t *testing.T) { UPDATE cluster_capacitors SET scraped_at = 17, next_scrape_at = 917 WHERE capacitor_id = 'unittest2'; UPDATE cluster_capacitors SET scraped_at = 17, serialized_metrics = '{"smaller_half":3,"larger_half":7}', next_scrape_at = 917 WHERE capacitor_id = 'unittest4'; UPDATE cluster_resources SET capacity = 10, subcapacities = '[{"smaller_half":3},{"larger_half":7}]' WHERE service_id = 2 AND name = 'things'; - UPDATE cluster_services SET scraped_at = 17 WHERE id = 1 AND type = 'shared'; - UPDATE cluster_services SET scraped_at = 17 WHERE id = 2 AND type = 'unshared'; `) //add a capacity plugin that also reports capacity per availability zone; check that @@ -181,9 +174,7 @@ func Test_ScanCapacity(t *testing.T) { UPDATE cluster_capacitors SET scraped_at = 24, next_scrape_at = 924 WHERE capacitor_id = 'unittest4'; INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest5', 24, 1, 924); INSERT INTO cluster_resources (service_id, name, capacity, capacity_per_az, capacitor_id) VALUES (3, 'things', 42, '[{"name":"az-one","capacity":21,"usage":4},{"name":"az-two","capacity":21,"usage":4}]', 'unittest5'); - UPDATE cluster_services SET scraped_at = 24 WHERE id = 1 AND type = 'shared'; - UPDATE cluster_services SET scraped_at = 24 WHERE id = 2 AND type = 'unshared'; - INSERT INTO cluster_services (id, type, scraped_at) VALUES (3, 'unshared2', 24); + INSERT INTO cluster_services (id, type) VALUES (3, 'unshared2'); `) //check that scraping correctly updates the capacities on an existing record @@ -195,9 +186,6 @@ func Test_ScanCapacity(t *testing.T) { UPDATE cluster_capacitors SET scraped_at = 33, next_scrape_at = 933 WHERE capacitor_id = 'unittest4'; UPDATE cluster_capacitors SET scraped_at = 33, next_scrape_at = 933 WHERE capacitor_id = 'unittest5'; UPDATE cluster_resources SET capacity = 30, capacity_per_az = '[{"name":"az-one","capacity":15,"usage":3},{"name":"az-two","capacity":15,"usage":3}]' WHERE service_id = 3 AND name = 'things'; - UPDATE cluster_services SET scraped_at = 33 WHERE id = 1 AND type = 'shared'; - UPDATE cluster_services SET scraped_at = 33 WHERE id = 2 AND type = 'unshared'; - UPDATE cluster_services SET scraped_at = 33 WHERE id = 3 AND type = 'unshared2'; `) //check data metrics generated for these capacity data @@ -222,8 +210,5 @@ func Test_ScanCapacity(t *testing.T) { UPDATE cluster_capacitors SET scraped_at = 42, next_scrape_at = 942 WHERE capacitor_id = 'unittest4'; DELETE FROM cluster_capacitors WHERE capacitor_id = 'unittest5'; DELETE FROM cluster_resources WHERE service_id = 3 AND name = 'things'; - UPDATE cluster_services SET scraped_at = 42 WHERE id = 1 AND type = 'shared'; - UPDATE cluster_services SET scraped_at = 42 WHERE id = 2 AND type = 'unshared'; - UPDATE cluster_services SET scraped_at = 42 WHERE id = 3 AND type = 'unshared2'; `) } diff --git a/internal/collector/consistency.go b/internal/collector/consistency.go index 89686420c..889329b9b 100644 --- a/internal/collector/consistency.go +++ b/internal/collector/consistency.go @@ -71,7 +71,6 @@ func (c *Collector) checkConsistencyCluster(_ context.Context, _ prometheus.Labe } } - now := c.TimeNow() //create missing service entries for _, serviceType := range c.Cluster.ServiceTypesInAlphabeticalOrder() { if seen[serviceType] { @@ -79,10 +78,7 @@ func (c *Collector) checkConsistencyCluster(_ context.Context, _ prometheus.Labe } logg.Info("creating missing %s cluster service entry", serviceType) - err := c.DB.Insert(&db.ClusterService{ - Type: serviceType, - ScrapedAt: &now, - }) + err := c.DB.Insert(&db.ClusterService{Type: serviceType}) if err != nil { c.LogError(err.Error()) } diff --git a/internal/collector/consistency_test.go b/internal/collector/consistency_test.go index 50bef8a5d..31808e8ef 100644 --- a/internal/collector/consistency_test.go +++ b/internal/collector/consistency_test.go @@ -101,11 +101,7 @@ func Test_Consistency(t *testing.T) { t.Error(err) } //add some useless *_services entries - epoch := time.Unix(0, 0).UTC() - err = s.DB.Insert(&db.ClusterService{ - Type: "whatever", - ScrapedAt: &epoch, - }) + err = s.DB.Insert(&db.ClusterService{Type: "whatever"}) if err != nil { t.Error(err) } diff --git a/internal/collector/fixtures/checkconsistency0.sql b/internal/collector/fixtures/checkconsistency0.sql index 1f44b6b4c..5a03548a2 100644 --- a/internal/collector/fixtures/checkconsistency0.sql +++ b/internal/collector/fixtures/checkconsistency0.sql @@ -1,6 +1,6 @@ -INSERT INTO cluster_services (id, type, scraped_at) VALUES (1, 'centralized', 3); -INSERT INTO cluster_services (id, type, scraped_at) VALUES (2, 'shared', 3); -INSERT INTO cluster_services (id, type, scraped_at) VALUES (3, 'unshared', 3); +INSERT INTO cluster_services (id, type) VALUES (1, 'centralized'); +INSERT INTO cluster_services (id, type) VALUES (2, 'shared'); +INSERT INTO cluster_services (id, type) VALUES (3, 'unshared'); INSERT INTO domain_resources (service_id, name, quota) VALUES (1, 'capacity', 0); INSERT INTO domain_resources (service_id, name, quota) VALUES (1, 'capacity_portion', 0); diff --git a/internal/collector/fixtures/checkconsistency1.sql b/internal/collector/fixtures/checkconsistency1.sql index 1ddf47136..368f9c1cb 100644 --- a/internal/collector/fixtures/checkconsistency1.sql +++ b/internal/collector/fixtures/checkconsistency1.sql @@ -1,6 +1,6 @@ -INSERT INTO cluster_services (id, type, scraped_at) VALUES (1, 'centralized', 3); -INSERT INTO cluster_services (id, type, scraped_at) VALUES (3, 'unshared', 3); -INSERT INTO cluster_services (id, type, scraped_at) VALUES (4, 'whatever', 0); +INSERT INTO cluster_services (id, type) VALUES (1, 'centralized'); +INSERT INTO cluster_services (id, type) VALUES (3, 'unshared'); +INSERT INTO cluster_services (id, type) VALUES (4, 'whatever'); INSERT INTO domain_resources (service_id, name, quota) VALUES (1, 'capacity', 0); INSERT INTO domain_resources (service_id, name, quota) VALUES (1, 'capacity_portion', 0); diff --git a/internal/collector/fixtures/checkconsistency2.sql b/internal/collector/fixtures/checkconsistency2.sql index 27beb18c6..9e91d8ad3 100644 --- a/internal/collector/fixtures/checkconsistency2.sql +++ b/internal/collector/fixtures/checkconsistency2.sql @@ -1,6 +1,6 @@ -INSERT INTO cluster_services (id, type, scraped_at) VALUES (1, 'centralized', 3); -INSERT INTO cluster_services (id, type, scraped_at) VALUES (3, 'unshared', 3); -INSERT INTO cluster_services (id, type, scraped_at) VALUES (5, 'shared', 6); +INSERT INTO cluster_services (id, type) VALUES (1, 'centralized'); +INSERT INTO cluster_services (id, type) VALUES (3, 'unshared'); +INSERT INTO cluster_services (id, type) VALUES (5, 'shared'); INSERT INTO domain_resources (service_id, name, quota) VALUES (1, 'capacity', 0); INSERT INTO domain_resources (service_id, name, quota) VALUES (1, 'capacity_portion', 0); @@ -32,9 +32,9 @@ INSERT INTO domains (id, name, uuid) VALUES (1, 'germany', 'uuid-for-germany'); INSERT INTO domains (id, name, uuid) VALUES (2, 'france', 'uuid-for-france'); INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (1, 1, 'centralized', 0, 0); -INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (11, 1, 'shared', 7, 7); -INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (12, 2, 'shared', 7, 7); -INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (13, 3, 'shared', 8, 8); +INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (11, 1, 'shared', 5, 5); +INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (12, 2, 'shared', 5, 5); +INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (13, 3, 'shared', 6, 6); INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (3, 1, 'unshared', 0, 0); INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 1, 1); INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 1, 1); diff --git a/internal/db/migrations.go b/internal/db/migrations.go index 094e8a693..3f27b3e0e 100644 --- a/internal/db/migrations.go +++ b/internal/db/migrations.go @@ -161,4 +161,20 @@ var sqlMigrations = map[string]string{ ALTER TABLE cluster_resources DROP COLUMN capacitor_id; `, + "024_move_capacity_scrape_timestamps.up.sql": ` + ALTER TABLE cluster_capacitors + ALTER COLUMN scraped_at SET DEFAULT NULL; -- null if scraping did not happen yet + ALTER TABLE cluster_services + DROP COLUMN scraped_at; + ALTER TABLE cluster_resources + ALTER COLUMN capacitor_id DROP DEFAULT; + `, + "024_move_capacity_scrape_timestamps.down.sql": ` + ALTER TABLE cluster_capacitors + ALTER COLUMN scraped_at DROP DEFAULT; + ALTER TABLE cluster_services + ADD COLUMN scraped_at TIMESTAMP NOT NULL DEFAULT NOW(); + ALTER TABLE cluster_resources + ALTER COLUMN capacitor_id SET DEFAULT NULL; + `, } diff --git a/internal/db/models.go b/internal/db/models.go index df3f5b3b8..188e97b89 100644 --- a/internal/db/models.go +++ b/internal/db/models.go @@ -37,19 +37,18 @@ type ClusterCapacitor struct { // ClusterService contains a record from the `cluster_services` table. type ClusterService struct { - ID int64 `db:"id"` - Type string `db:"type"` - ScrapedAt *time.Time `db:"scraped_at"` //pointer type to allow for NULL value + ID int64 `db:"id"` + Type string `db:"type"` } // ClusterResource contains a record from the `cluster_resources` table. type ClusterResource struct { - ServiceID int64 `db:"service_id"` - Name string `db:"name"` - RawCapacity uint64 `db:"capacity"` - CapacityPerAZJSON string `db:"capacity_per_az"` - SubcapacitiesJSON string `db:"subcapacities"` - CapacitorID *string `db:"capacitor_id"` //can be NULL during transition period (TODO: remove after migration 24) + ServiceID int64 `db:"service_id"` + Name string `db:"name"` + RawCapacity uint64 `db:"capacity"` + CapacityPerAZJSON string `db:"capacity_per_az"` + SubcapacitiesJSON string `db:"subcapacities"` + CapacitorID string `db:"capacitor_id"` } // Domain contains a record from the `domains` table. diff --git a/internal/reports/cluster.go b/internal/reports/cluster.go index efef78db7..b02094ebb 100644 --- a/internal/reports/cluster.go +++ b/internal/reports/cluster.go @@ -56,9 +56,10 @@ var clusterReportQuery2 = sqlext.SimplifyWhitespace(` var clusterReportQuery3 = sqlext.SimplifyWhitespace(` SELECT cs.type, cr.name, cr.capacity, - cr.capacity_per_az, cr.subcapacities, cs.scraped_at + cr.capacity_per_az, cr.subcapacities, cc.scraped_at FROM cluster_services cs LEFT OUTER JOIN cluster_resources cr ON cr.service_id = cs.id {{AND cr.name = $resource_name}} + LEFT OUTER JOIN cluster_capacitors cc ON cc.capacitor_id = cr.capacitor_id WHERE TRUE {{AND cs.type = $service_type}} `) @@ -162,7 +163,7 @@ func GetClusterResources(cluster *core.Cluster, dbi db.Interface, filter Filter) rawCapacity *uint64 capacityPerAZ *string subcapacities *string - scrapedAt time.Time + scrapedAt *time.Time ) err := rows.Scan(&serviceType, &resourceName, &rawCapacity, &capacityPerAZ, &subcapacities, &scrapedAt) @@ -193,8 +194,8 @@ func GetClusterResources(cluster *core.Cluster, dbi db.Interface, filter Filter) } } - report.MaxScrapedAt = mergeMaxTime(report.MaxScrapedAt, &scrapedAt) - report.MinScrapedAt = mergeMinTime(report.MinScrapedAt, &scrapedAt) + report.MaxScrapedAt = mergeMaxTime(report.MaxScrapedAt, scrapedAt) + report.MinScrapedAt = mergeMinTime(report.MinScrapedAt, scrapedAt) return nil })