Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

replace old mock clock with the standard clock from go-bits/mock #356

Merged
merged 2 commits into from
Sep 12, 2023
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions internal/collector/capacity_scrape.go
Original file line number Diff line number Diff line change
Expand Up @@ -102,7 +102,7 @@ var (
)

func (c *Collector) discoverCapacityScrapeTask(_ context.Context, _ prometheus.Labels, lastConsistencyCheckAt *time.Time) (task capacityScrapeTask, err error) {
task.Timing.StartedAt = c.TimeNow()
task.Timing.StartedAt = c.MeasureTime()

//consistency check: every once in a while (and also immediately on startup),
//check that all required `cluster_capacitors` entries exist
Expand Down Expand Up @@ -185,7 +185,7 @@ func (c *Collector) processCapacityScrapeTask(_ context.Context, task capacitySc

//scrape capacity data
capacityData, serializedMetrics, err := plugin.Scrape()
task.Timing.FinishedAt = c.TimeNow()
task.Timing.FinishedAt = c.MeasureTimeAtEnd()
if err == nil {
capacitor.ScrapedAt = &task.Timing.FinishedAt
capacitor.ScrapeDurationSecs = task.Timing.Duration().Seconds()
Expand Down
133 changes: 83 additions & 50 deletions internal/collector/capacity_scrape_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,7 @@ package collector

import (
"testing"
"time"

"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
Expand Down Expand Up @@ -74,15 +75,8 @@ func Test_ScanCapacity(t *testing.T) {
s := test.NewSetup(t,
test.WithConfig(testScanCapacityConfigYAML),
)
test.ResetTime()

c := Collector{
Cluster: s.Cluster,
DB: s.DB,
LogError: t.Errorf,
TimeNow: test.TimeNow,
AddJitter: test.NoJitter,
}

c := getCollector(t, s)
job := c.CapacityScrapeJob(s.Registry)

//cluster_services must be created as a baseline (this is usually done by the CheckConsistencyJob)
Expand All @@ -104,8 +98,8 @@ func Test_ScanCapacity(t *testing.T) {
setClusterCapacitorsStale(t, s)
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)))
tr.DBChanges().AssertEqualf(`
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest', 1, 1, 901);
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest2', 3, 1, 903);
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest', 5, 5, 905);
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest2', 10, 5, 910);
INSERT INTO cluster_resources (service_id, name, capacity, capacitor_id) VALUES (1, 'things', 42, 'unittest');
INSERT INTO cluster_resources (service_id, name, capacity, capacitor_id) VALUES (2, 'capacity', 42, 'unittest2');
`)
Expand Down Expand Up @@ -133,17 +127,17 @@ func Test_ScanCapacity(t *testing.T) {
s.Cluster.CapacityPlugins["unittest"].(*plugins.StaticCapacityPlugin).Capacity = 23
setClusterCapacitorsStale(t, s)
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)))

scrapedAt1 := s.Clock.Now().Add(-5 * time.Second)
scrapedAt2 := s.Clock.Now()
tr.DBChanges().AssertEqualf(`
UPDATE cluster_capacitors SET scraped_at = 5, next_scrape_at = 905 WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = 7, next_scrape_at = 907 WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest2';
UPDATE cluster_resources SET capacity = 23 WHERE service_id = 1 AND name = 'things';
`)

//move the clock forward by 300 seconds (the capacitor add step only triggers every five minutes)
//TODO: I hate this clock
for step := 1; step <= 300; step++ {
_ = test.TimeNow()
}
`,
scrapedAt1.Unix(), scrapedAt1.Add(15*time.Minute).Unix(),
scrapedAt2.Unix(), scrapedAt2.Add(15*time.Minute).Unix(),
)

//add a capacity plugin that reports subcapacities; check that subcapacities
//are correctly written when creating a cluster_resources record
Expand All @@ -157,30 +151,41 @@ func Test_ScanCapacity(t *testing.T) {
`
subcapacityPlugin := s.AddCapacityPlugin(t, pluginConfig).(*plugins.StaticCapacityPlugin) //nolint:errcheck
setClusterCapacitorsStale(t, s)
s.Clock.StepBy(5 * time.Minute) //to force a capacitor consistency check to run
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)))

scrapedAt1 = s.Clock.Now().Add(-10 * time.Second)
scrapedAt2 = s.Clock.Now().Add(-5 * time.Second)
scrapedAt4 := s.Clock.Now()
tr.DBChanges().AssertEqualf(`
UPDATE cluster_capacitors SET scraped_at = 309, next_scrape_at = 1209 WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = 311, next_scrape_at = 1211 WHERE capacitor_id = 'unittest2';
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, serialized_metrics, next_scrape_at) VALUES ('unittest4', 313, 1, '{"smaller_half":14,"larger_half":28}', 1213);
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest2';
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, serialized_metrics, next_scrape_at) VALUES ('unittest4', %d, 5, '{"smaller_half":14,"larger_half":28}', %d);
INSERT INTO cluster_resources (service_id, name, capacity, subcapacities, capacitor_id) VALUES (2, 'things', 42, '[{"smaller_half":14},{"larger_half":28}]', 'unittest4');
`)
`,
scrapedAt1.Unix(), scrapedAt1.Add(15*time.Minute).Unix(),
scrapedAt2.Unix(), scrapedAt2.Add(15*time.Minute).Unix(),
scrapedAt4.Unix(), scrapedAt4.Add(15*time.Minute).Unix(),
)

//check that scraping correctly updates subcapacities on an existing record
subcapacityPlugin.Capacity = 10
setClusterCapacitorsStale(t, s)
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)))

scrapedAt1 = s.Clock.Now().Add(-10 * time.Second)
scrapedAt2 = s.Clock.Now().Add(-5 * time.Second)
scrapedAt4 = s.Clock.Now()
tr.DBChanges().AssertEqualf(`
UPDATE cluster_capacitors SET scraped_at = 315, next_scrape_at = 1215 WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = 317, next_scrape_at = 1217 WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = 319, serialized_metrics = '{"smaller_half":3,"larger_half":7}', next_scrape_at = 1219 WHERE capacitor_id = 'unittest4';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = %d, serialized_metrics = '{"smaller_half":3,"larger_half":7}', next_scrape_at = %d WHERE capacitor_id = 'unittest4';
UPDATE cluster_resources SET capacity = 10, subcapacities = '[{"smaller_half":3},{"larger_half":7}]' WHERE service_id = 2 AND name = 'things';
`)

//move the clock forward by 300 seconds (the capacitor add step only triggers every five minutes)
//TODO: I hate this clock
for step := 1; step <= 300; step++ {
_ = test.TimeNow()
}
`,
scrapedAt1.Unix(), scrapedAt1.Add(15*time.Minute).Unix(),
scrapedAt2.Unix(), scrapedAt2.Add(15*time.Minute).Unix(),
scrapedAt4.Unix(), scrapedAt4.Add(15*time.Minute).Unix(),
)

//add a capacity plugin that also reports capacity per availability zone; check that
//these capacities are correctly written when creating a cluster_resources record
Expand All @@ -194,26 +199,47 @@ func Test_ScanCapacity(t *testing.T) {
`
azCapacityPlugin := s.AddCapacityPlugin(t, pluginConfig).(*plugins.StaticCapacityPlugin) //nolint:errcheck
setClusterCapacitorsStale(t, s)
s.Clock.StepBy(5 * time.Minute) //to force a capacitor consistency check to run
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)))

scrapedAt1 = s.Clock.Now().Add(-15 * time.Second)
scrapedAt2 = s.Clock.Now().Add(-10 * time.Second)
scrapedAt4 = s.Clock.Now().Add(-5 * time.Second)
scrapedAt5 := s.Clock.Now()
tr.DBChanges().AssertEqualf(`
UPDATE cluster_capacitors SET scraped_at = 621, next_scrape_at = 1521 WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = 623, next_scrape_at = 1523 WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = 625, next_scrape_at = 1525 WHERE capacitor_id = 'unittest4';
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest5', 627, 1, 1527);
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest4';
INSERT INTO cluster_capacitors (capacitor_id, scraped_at, scrape_duration_secs, next_scrape_at) VALUES ('unittest5', %d, 5, %d);
INSERT INTO cluster_resources (service_id, name, capacity, capacity_per_az, capacitor_id) VALUES (3, 'things', 42, '[{"name":"az-one","capacity":21,"usage":4},{"name":"az-two","capacity":21,"usage":4}]', 'unittest5');
`)
`,
scrapedAt1.Unix(), scrapedAt1.Add(15*time.Minute).Unix(),
scrapedAt2.Unix(), scrapedAt2.Add(15*time.Minute).Unix(),
scrapedAt4.Unix(), scrapedAt4.Add(15*time.Minute).Unix(),
scrapedAt5.Unix(), scrapedAt5.Add(15*time.Minute).Unix(),
)

//check that scraping correctly updates the capacities on an existing record
azCapacityPlugin.Capacity = 30
setClusterCapacitorsStale(t, s)
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)))

scrapedAt1 = s.Clock.Now().Add(-15 * time.Second)
scrapedAt2 = s.Clock.Now().Add(-10 * time.Second)
scrapedAt4 = s.Clock.Now().Add(-5 * time.Second)
scrapedAt5 = s.Clock.Now()
tr.DBChanges().AssertEqualf(`
UPDATE cluster_capacitors SET scraped_at = 629, next_scrape_at = 1529 WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = 631, next_scrape_at = 1531 WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = 633, next_scrape_at = 1533 WHERE capacitor_id = 'unittest4';
UPDATE cluster_capacitors SET scraped_at = 635, next_scrape_at = 1535 WHERE capacitor_id = 'unittest5';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest4';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest5';
UPDATE cluster_resources SET capacity = 30, capacity_per_az = '[{"name":"az-one","capacity":15,"usage":3},{"name":"az-two","capacity":15,"usage":3}]' WHERE service_id = 3 AND name = 'things';
`)
`,
scrapedAt1.Unix(), scrapedAt1.Add(15*time.Minute).Unix(),
scrapedAt2.Unix(), scrapedAt2.Add(15*time.Minute).Unix(),
scrapedAt4.Unix(), scrapedAt4.Add(15*time.Minute).Unix(),
scrapedAt5.Unix(), scrapedAt5.Add(15*time.Minute).Unix(),
)

//check data metrics generated for these capacity data
registry := prometheus.NewPedanticRegistry()
Expand All @@ -232,19 +258,26 @@ func Test_ScanCapacity(t *testing.T) {
delete(s.Cluster.CapacityPlugins, "unittest5")
setClusterCapacitorsStale(t, s)
mustT(t, jobloop.ProcessMany(job, s.Ctx, len(s.Cluster.CapacityPlugins)+1)) //+1 to account for the deleted capacitor

scrapedAt1 = s.Clock.Now().Add(-10 * time.Second)
scrapedAt2 = s.Clock.Now().Add(-5 * time.Second)
scrapedAt4 = s.Clock.Now()
tr.DBChanges().AssertEqualf(`
UPDATE cluster_capacitors SET scraped_at = 637, next_scrape_at = 1537 WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = 639, next_scrape_at = 1539 WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = 641, next_scrape_at = 1541 WHERE capacitor_id = 'unittest4';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest2';
UPDATE cluster_capacitors SET scraped_at = %d, next_scrape_at = %d WHERE capacitor_id = 'unittest4';
DELETE FROM cluster_capacitors WHERE capacitor_id = 'unittest5';
DELETE FROM cluster_resources WHERE service_id = 3 AND name = 'things';
`)
`,
scrapedAt1.Unix(), scrapedAt1.Add(15*time.Minute).Unix(),
scrapedAt2.Unix(), scrapedAt2.Add(15*time.Minute).Unix(),
scrapedAt4.Unix(), scrapedAt4.Add(15*time.Minute).Unix(),
)
}

func setClusterCapacitorsStale(t *testing.T, s test.Setup) {
//NOTE: This is built to not use `test.TimeNow()`, because using this function shifts the time around.
//TODO: I hate this clock
t.Helper()
_, err := s.DB.Exec(`UPDATE cluster_capacitors SET next_scrape_at = (SELECT MAX(scraped_at) FROM cluster_capacitors)`)
_, err := s.DB.Exec(`UPDATE cluster_capacitors SET next_scrape_at = $1`, s.Clock.Now())
mustT(t, err)
}
16 changes: 10 additions & 6 deletions internal/collector/collector.go
Original file line number Diff line number Diff line change
Expand Up @@ -40,19 +40,23 @@ type Collector struct {
//Usually logg.Error, but can be changed inside unit tests.
LogError func(msg string, args ...any)
//Usually time.Now, but can be changed inside unit tests.
TimeNow func() time.Time
//MeasureTimeAtEnd behaves slightly differently in unit tests: It will advance
//the mock.Clock before reading it to simulate time passing during the previous task.
MeasureTime func() time.Time
MeasureTimeAtEnd func() time.Time
//Usually addJitter, but can be changed inside unit tests.
AddJitter func(time.Duration) time.Duration
}

// NewCollector creates a Collector instance.
func NewCollector(cluster *core.Cluster, dbm *gorp.DbMap) *Collector {
return &Collector{
Cluster: cluster,
DB: dbm,
LogError: logg.Error,
TimeNow: time.Now,
AddJitter: addJitter,
Cluster: cluster,
DB: dbm,
LogError: logg.Error,
MeasureTime: time.Now,
MeasureTimeAtEnd: time.Now,
AddJitter: addJitter,
}
}

Expand Down
2 changes: 1 addition & 1 deletion internal/collector/consistency.go
Original file line number Diff line number Diff line change
Expand Up @@ -126,7 +126,7 @@ func (c *Collector) checkConsistencyDomain(domain db.Domain) error {
}
logg.Info("checking consistency for %d projects in domain %q...", len(projects), domain.Name)

now := c.TimeNow()
now := c.MeasureTime()
for _, project := range projects {
//ValidateProjectServices usually does nothing or does maybe one DELETE or
//INSERT, so it does not need to be in a transaction
Expand Down
13 changes: 3 additions & 10 deletions internal/collector/consistency_test.go
Original file line number Diff line number Diff line change
Expand Up @@ -27,20 +27,11 @@ import (

"github.com/sapcc/limes/internal/core"
"github.com/sapcc/limes/internal/db"
"github.com/sapcc/limes/internal/test"
)

func Test_Consistency(t *testing.T) {
test.ResetTime()
s, cluster := keystoneTestCluster(t)
c := Collector{
Cluster: cluster,
DB: s.DB,
LogError: t.Errorf,
TimeNow: test.TimeNow,
AddJitter: test.NoJitter,
}

c := getCollector(t, s)
consistencyJob := c.CheckConsistencyJob(s.Registry)

//run ScanDomains once to establish a baseline
Expand All @@ -53,6 +44,7 @@ func Test_Consistency(t *testing.T) {
//check that CheckConsistency() is satisfied with the
//{domain,project}_services created by ScanDomains(), but adds
//cluster_services entries
s.Clock.StepBy(time.Hour)
err = consistencyJob.ProcessOne(s.Ctx)
if err != nil {
t.Error(err)
Expand Down Expand Up @@ -140,6 +132,7 @@ func Test_Consistency(t *testing.T) {
//are added; for all project services that are created here, project
//resources are added where the quota constraint contains a Minimum value or
//the quota distribution configuration contains a DefaultQuota value..
s.Clock.StepBy(time.Hour)
err = consistencyJob.ProcessOne(s.Ctx)
if err != nil {
t.Error(err)
Expand Down
12 changes: 6 additions & 6 deletions internal/collector/fixtures/checkconsistency-pre.sql
Original file line number Diff line number Diff line change
Expand Up @@ -30,12 +30,12 @@ INSERT INTO domains (id, name, uuid) VALUES (2, 'france', 'uuid-for-france');
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (1, 1, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (2, 1, 'shared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (3, 1, 'unshared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (5, 2, 'shared', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (7, 3, 'centralized', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (8, 3, 'shared', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (9, 3, 'unshared', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (5, 2, 'shared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (7, 3, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (8, 3, 'shared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (9, 3, 'unshared', 0, 0);

INSERT INTO projects (id, domain_id, name, uuid, parent_uuid, has_bursting) VALUES (1, 1, 'berlin', 'uuid-for-berlin', 'uuid-for-germany', FALSE);
INSERT INTO projects (id, domain_id, name, uuid, parent_uuid, has_bursting) VALUES (2, 1, 'dresden', 'uuid-for-dresden', 'uuid-for-berlin', FALSE);
Expand Down
12 changes: 6 additions & 6 deletions internal/collector/fixtures/checkconsistency0.sql
Original file line number Diff line number Diff line change
Expand Up @@ -34,12 +34,12 @@ INSERT INTO domains (id, name, uuid) VALUES (2, 'france', 'uuid-for-france');
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (1, 1, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (2, 1, 'shared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (3, 1, 'unshared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (5, 2, 'shared', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 1, 1);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (7, 3, 'centralized', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (8, 3, 'shared', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (9, 3, 'unshared', 2, 2);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (4, 2, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (5, 2, 'shared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (6, 2, 'unshared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (7, 3, 'centralized', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (8, 3, 'shared', 0, 0);
INSERT INTO project_services (id, project_id, type, next_scrape_at, rates_next_scrape_at) VALUES (9, 3, 'unshared', 0, 0);

INSERT INTO projects (id, domain_id, name, uuid, parent_uuid, has_bursting) VALUES (1, 1, 'berlin', 'uuid-for-berlin', 'uuid-for-germany', FALSE);
INSERT INTO projects (id, domain_id, name, uuid, parent_uuid, has_bursting) VALUES (2, 1, 'dresden', 'uuid-for-dresden', 'uuid-for-berlin', FALSE);
Expand Down
Loading
Loading