Skip to content

Commit

Permalink
collector: remove hardcoded time values
Browse files Browse the repository at this point in the history
This is a counterpart of the previous commit: This one leaves the actual
implementation behavior entirely untouched, and only changes the clock
behavior in the test code to something way more reasonable. Variable
interpolation is used to make the SQL diffs less brittle.
  • Loading branch information
majewsky committed Sep 12, 2023
1 parent 43fd2ed commit b22f516
Show file tree
Hide file tree
Showing 14 changed files with 303 additions and 160 deletions.
107 changes: 78 additions & 29 deletions internal/collector/capacity_scrape_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -98,8 +98,8 @@ func Test_ScanCapacity(t *testing.T) {
setClusterCapacitorsStale(t, s)
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)))
tr.DBChanges().AssertEqualf(`
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest', 1, 1, 901);
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest2', 3, 1, 903);
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest', 5, 5, 905);
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest2', 10, 5, 910);
INSERT INTO cluster_resources (service_id, name, capacity, capacitor_id) VALUES (1, 'things', 42, 'unittest');
INSERT INTO cluster_resources (service_id, name, capacity, capacitor_id) VALUES (2, 'capacity', 42, 'unittest2');
`)
Expand Down Expand Up @@ -127,11 +127,17 @@ func Test_ScanCapacity(t *testing.T) {
s.Cluster.CapacityPlugins["unittest"].(*plugins.StaticCapacityPlugin).Capacity = 23
setClusterCapacitorsStale(t, s)
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)))

scrapedAt1 := s.Clock.Now().Add(-5 * time.Second)
scrapedAt2 := s.Clock.Now()
tr.DBChanges().AssertEqualf(`
UPDATE cluster_capacitors SET scraped_at = 5, next_scrape_at = 905 WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = 7, next_scrape_at = 907 WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest2';
UPDATE cluster_resources SET capacity = 23 WHERE service_id = 1 AND name = 'things';
`)
`,
scrapedAt1.Unix(), scrapedAt1.Add(15*time.Minute).Unix(),
scrapedAt2.Unix(), scrapedAt2.Add(15*time.Minute).Unix(),
)

//add a capacity plugin that reports subcapacities; check that subcapacities
//are correctly written when creating a cluster_resources record
Expand All @@ -147,23 +153,39 @@ func Test_ScanCapacity(t *testing.T) {
setClusterCapacitorsStale(t, s)
s.Clock.StepBy(5 * time.Minute) //to force a capacitor consistency check to run
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)))

scrapedAt1 = s.Clock.Now().Add(-10 * time.Second)
scrapedAt2 = s.Clock.Now().Add(-5 * time.Second)
scrapedAt4 := s.Clock.Now()
tr.DBChanges().AssertEqualf(`
UPDATE cluster_capacitors SET scraped_at = 309, next_scrape_at = 1209 WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = 311, next_scrape_at = 1211 WHERE capacitor_id = 'unittest2';
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, serialized_metrics, next_scrape_at) VALUES ('unittest4', 313, 1, '{"smaller_half":14,"larger_half":28}', 1213);
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest2';
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, serialized_metrics, next_scrape_at) VALUES ('unittest4', %d, 5, '{"smaller_half":14,"larger_half":28}', %d);
INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacitor_id) VALUES (2, 'things', 42, '[{"smaller_half":14},{"larger_half":28}]', 'unittest4');
`)
`,
scrapedAt1.Unix(), scrapedAt1.Add(15*time.Minute).Unix(),
scrapedAt2.Unix(), scrapedAt2.Add(15*time.Minute).Unix(),
scrapedAt4.Unix(), scrapedAt4.Add(15*time.Minute).Unix(),
)

//check that scraping correctly updates subcapacities on an existing record
subcapacityPlugin.Capacity = 10
setClusterCapacitorsStale(t, s)
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)))

scrapedAt1 = s.Clock.Now().Add(-10 * time.Second)
scrapedAt2 = s.Clock.Now().Add(-5 * time.Second)
scrapedAt4 = s.Clock.Now()
tr.DBChanges().AssertEqualf(`
UPDATE cluster_capacitors SET scraped_at = 315, next_scrape_at = 1215 WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = 317, next_scrape_at = 1217 WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = 319, serialized_metrics = '{"smaller_half":3,"larger_half":7}', next_scrape_at = 1219 WHERE capacitor_id = 'unittest4';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = %d, serialized_metrics = '{"smaller_half":3,"larger_half":7}', next_scrape_at = %d WHERE capacitor_id = 'unittest4';
UPDATE cluster_resources SET capacity = 10, subcapacities = '[{"smaller_half":3},{"larger_half":7}]' WHERE service_id = 2 AND name = 'things';
`)
`,
scrapedAt1.Unix(), scrapedAt1.Add(15*time.Minute).Unix(),
scrapedAt2.Unix(), scrapedAt2.Add(15*time.Minute).Unix(),
scrapedAt4.Unix(), scrapedAt4.Add(15*time.Minute).Unix(),
)

//add a capacity plugin that also reports capacity per availability zone; check that
//these capacities are correctly written when creating a cluster_resources record
Expand All @@ -179,25 +201,45 @@ func Test_ScanCapacity(t *testing.T) {
setClusterCapacitorsStale(t, s)
s.Clock.StepBy(5 * time.Minute) //to force a capacitor consistency check to run
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)))

scrapedAt1 = s.Clock.Now().Add(-15 * time.Second)
scrapedAt2 = s.Clock.Now().Add(-10 * time.Second)
scrapedAt4 = s.Clock.Now().Add(-5 * time.Second)
scrapedAt5 := s.Clock.Now()
tr.DBChanges().AssertEqualf(`
UPDATE cluster_capacitors SET scraped_at = 621, next_scrape_at = 1521 WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = 623, next_scrape_at = 1523 WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = 625, next_scrape_at = 1525 WHERE capacitor_id = 'unittest4';
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest5', 627, 1, 1527);
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest4';
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest5', %d, 5, %d);
INSERT INTO cluster_resources (service_id, name, capacity, capacity_per_az, capacitor_id) VALUES (3, 'things', 42, '[{"name":"az-one","capacity":21,"usage":4},{"name":"az-two","capacity":21,"usage":4}]', 'unittest5');
`)
`,
scrapedAt1.Unix(), scrapedAt1.Add(15*time.Minute).Unix(),
scrapedAt2.Unix(), scrapedAt2.Add(15*time.Minute).Unix(),
scrapedAt4.Unix(), scrapedAt4.Add(15*time.Minute).Unix(),
scrapedAt5.Unix(), scrapedAt5.Add(15*time.Minute).Unix(),
)

//check that scraping correctly updates the capacities on an existing record
azCapacityPlugin.Capacity = 30
setClusterCapacitorsStale(t, s)
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)))

scrapedAt1 = s.Clock.Now().Add(-15 * time.Second)
scrapedAt2 = s.Clock.Now().Add(-10 * time.Second)
scrapedAt4 = s.Clock.Now().Add(-5 * time.Second)
scrapedAt5 = s.Clock.Now()
tr.DBChanges().AssertEqualf(`
UPDATE cluster_capacitors SET scraped_at = 629, next_scrape_at = 1529 WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = 631, next_scrape_at = 1531 WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = 633, next_scrape_at = 1533 WHERE capacitor_id = 'unittest4';
UPDATE cluster_capacitors SET scraped_at = 635, next_scrape_at = 1535 WHERE capacitor_id = 'unittest5';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest4';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest5';
UPDATE cluster_resources SET capacity = 30, capacity_per_az = '[{"name":"az-one","capacity":15,"usage":3},{"name":"az-two","capacity":15,"usage":3}]' WHERE service_id = 3 AND name = 'things';
`)
`,
scrapedAt1.Unix(), scrapedAt1.Add(15*time.Minute).Unix(),
scrapedAt2.Unix(), scrapedAt2.Add(15*time.Minute).Unix(),
scrapedAt4.Unix(), scrapedAt4.Add(15*time.Minute).Unix(),
scrapedAt5.Unix(), scrapedAt5.Add(15*time.Minute).Unix(),
)

//check data metrics generated for these capacity data
registry := prometheus.NewPedanticRegistry()
Expand All @@ -216,19 +258,26 @@ func Test_ScanCapacity(t *testing.T) {
delete(s.Cluster.CapacityPlugins, "unittest5")
setClusterCapacitorsStale(t, s)
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)+1)) //+1 to account for the deleted capacitor

scrapedAt1 = s.Clock.Now().Add(-10 * time.Second)
scrapedAt2 = s.Clock.Now().Add(-5 * time.Second)
scrapedAt4 = s.Clock.Now()
tr.DBChanges().AssertEqualf(`
UPDATE cluster_capacitors SET scraped_at = 637, next_scrape_at = 1537 WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = 639, next_scrape_at = 1539 WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = 641, next_scrape_at = 1541 WHERE capacitor_id = 'unittest4';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest4';
DELETE FROM cluster_capacitors WHERE capacitor_id = 'unittest5';
DELETE FROM cluster_resources WHERE service_id = 3 AND name = 'things';
`)
`,
scrapedAt1.Unix(), scrapedAt1.Add(15*time.Minute).Unix(),
scrapedAt2.Unix(), scrapedAt2.Add(15*time.Minute).Unix(),
scrapedAt4.Unix(), scrapedAt4.Add(15*time.Minute).Unix(),
)
}

func setClusterCapacitorsStale(t *testing.T, s test.Setup) {
//NOTE: This is built to not use `test.TimeNow()`, because using this function shifts the time around.
//TODO: I hate this clock
t.Helper()
_, err := s.DB.Exec(`UPDATE cluster_capacitors SET next_scrape_at = (SELECT MAX(scraped_at) FROM cluster_capacitors)`)
_, err := s.DB.Exec(`UPDATE cluster_capacitors SET next_scrape_at = $1`, s.Clock.Now())
mustT(t, err)
}
2 changes: 2 additions & 0 deletions internal/collector/consistency_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -44,6 +44,7 @@ func Test_Consistency(t *testing.T) {
//check that CheckConsistency() is satisfied with the
//{domain,project}_services created by ScanDomains(), but adds
//cluster_services entries
s.Clock.StepBy(time.Hour)
err = consistencyJob.ProcessOne(s.Ctx)
if err != nil {
t.Error(err)
Expand Down Expand Up @@ -131,6 +132,7 @@ func Test_Consistency(t *testing.T) {
//are added; for all project services that are created here, project
//resources are added where the quota constraint contains a Minimum value or
//the quota distribution configuration contains a DefaultQuota value..
s.Clock.StepBy(time.Hour)
err = consistencyJob.ProcessOne(s.Ctx)
if err != nil {
t.Error(err)
Expand Down
12 changes: 6 additions & 6 deletions internal/collector/fixtures/checkconsistency-pre.sql
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,12 @@ INSERT INTO domains (id, name, uuid) VALUES (2, 'france', 'uuid-for-france');
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (1, 1, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (2, 1, 'shared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (3, 1, 'unshared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (5, 2, 'shared', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (7, 3, 'centralized', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (8, 3, 'shared', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (9, 3, 'unshared', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (5, 2, 'shared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (7, 3, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (8, 3, 'shared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (9, 3, 'unshared', 0, 0);

INSERT INTO projects (id, domain_id, name, uuid, parent_uuid, has_bursting) VALUES (1, 1, 'berlin', 'uuid-for-berlin', 'uuid-for-germany', FALSE);
INSERT INTO projects (id, domain_id, name, uuid, parent_uuid, has_bursting) VALUES (2, 1, 'dresden', 'uuid-for-dresden', 'uuid-for-berlin', FALSE);
Expand Down
12 changes: 6 additions & 6 deletions internal/collector/fixtures/checkconsistency0.sql
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,12 @@ INSERT INTO domains (id, name, uuid) VALUES (2, 'france', 'uuid-for-france');
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (1, 1, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (2, 1, 'shared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (3, 1, 'unshared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (5, 2, 'shared', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (7, 3, 'centralized', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (8, 3, 'shared', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (9, 3, 'unshared', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (5, 2, 'shared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (7, 3, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (8, 3, 'shared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (9, 3, 'unshared', 0, 0);

INSERT INTO projects (id, domain_id, name, uuid, parent_uuid, has_bursting) VALUES (1, 1, 'berlin', 'uuid-for-berlin', 'uuid-for-germany', FALSE);
INSERT INTO projects (id, domain_id, name, uuid, parent_uuid, has_bursting) VALUES (2, 1, 'dresden', 'uuid-for-dresden', 'uuid-for-berlin', FALSE);
Expand Down
8 changes: 4 additions & 4 deletions internal/collector/fixtures/checkconsistency1.sql
Original file line number Diff line number Diff line change
Expand Up @@ -27,10 +27,10 @@ INSERT INTO domains (id, name, uuid) VALUES (2, 'france', 'uuid-for-france');
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (1, 1, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (10, 1, 'whatever', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (3, 1, 'unshared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (7, 3, 'centralized', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (9, 3, 'unshared', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (7, 3, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (9, 3, 'unshared', 0, 0);

INSERT INTO projects (id, domain_id, name, uuid, parent_uuid, has_bursting) VALUES (1, 1, 'berlin', 'uuid-for-berlin', 'uuid-for-germany', FALSE);
INSERT INTO projects (id, domain_id, name, uuid, parent_uuid, has_bursting) VALUES (2, 1, 'dresden', 'uuid-for-dresden', 'uuid-for-berlin', FALSE);
Expand Down
14 changes: 7 additions & 7 deletions internal/collector/fixtures/checkconsistency2.sql
Original file line number Diff line number Diff line change
Expand Up @@ -32,14 +32,14 @@ INSERT INTO domains (id, name, uuid) VALUES (1, 'germany', 'uuid-for-germany');
INSERT INTO domains (id, name, uuid) VALUES (2, 'france', 'uuid-for-france');

INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (1, 1, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (11, 1, 'shared', 5, 5);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (12, 2, 'shared', 5, 5);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (13, 3, 'shared', 6, 6);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (11, 1, 'shared', 7200, 7200);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (12, 2, 'shared', 7200, 7200);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (13, 3, 'shared', 7200, 7200);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (3, 1, 'unshared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (7, 3, 'centralized', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (9, 3, 'unshared', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (7, 3, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (9, 3, 'unshared', 0, 0);

INSERT INTO projects (id, domain_id, name, uuid, parent_uuid, has_bursting) VALUES (1, 1, 'berlin', 'uuid-for-berlin', 'uuid-for-germany', FALSE);
INSERT INTO projects (id, domain_id, name, uuid, parent_uuid, has_bursting) VALUES (2, 1, 'dresden', 'uuid-for-dresden', 'uuid-for-berlin', FALSE);
Expand Down
Loading

0 comments on commit b22f516

Please sign in to comment.