diff --git a/docs/operators/config.md b/docs/operators/config.md index 8eea12f0..53c2e3e0 100644 --- a/docs/operators/config.md +++ b/docs/operators/config.md @@ -132,7 +132,6 @@ Some special behaviors for resources can be configured in the `resource_behavior | `resource_behavior[].resource` | yes | Must contain a regex. The behavior entry applies to all resources where this regex matches against a slash-concatenated pair of service type and resource name. The anchors `^` and `$` are implied at both ends, so the regex must match the entire phrase. | | `resource_behavior[].overcommit_factor` | no | If given, capacity for matching resources will be computed as `raw_capacity * overcommit_factor`, where `raw_capacity` is what the capacity plugin reports. | | `resource_behavior[].commitment_durations` | no | If given, commitments for this resource can be created with any of the given durations. The duration format is the same as in the `commitments[].duration` attribute that appears on the resource API. If empty, this resource does not accept commitments. | -| `resource_behavior[].commitment_is_az_aware` | no | If true, commitments for this resource must be created in a specific AZ (i.e. not in a pseudo-AZ). If false, commitments for this resource must be created in the pseudo-AZ `any`. Ignored if `commitment_durations` is empty. | | `resource_behavior[].commitment_min_confirm_date` | no | If given, commitments for this resource will always be created with `confirm_by` no earlier than this timestamp. This can be used to plan the introduction of commitments on a specific date. Ignored if `commitment_durations` is empty. | | `resource_behavior[].commitment_until_percent` | no | If given, commitments for this resource will only be confirmed while the total of all confirmed commitments or uncommitted usage in the respective AZ is smaller than the respective percentage of the total capacity for that AZ. This is intended to provide a reserved buffer for the growth quota configured by `quota_distribution_configs[].autogrow.growth_multiplier`. Defaults to 100, i.e. all capacity is committable. | | `resource_behavior[].commitment_conversion.identifier` | no | If given, must contain a string. Commitments for this resource will then be allowed to be converted into commitments for all resources that set the same conversion identifier. | @@ -147,7 +146,7 @@ resource_behavior: # matches both sharev2/share_capacity and sharev2/snapshot_capacity - { resource: sharev2/.*_capacity, overcommit_factor: 2 } # starting in 2024, offer commitments for Cinder storage - - { resource: volumev2/capacity, commitment_durations: [ 1 year, 2 years, 3 years ], commitment_is_az_aware: true, commitment_min_confirm_date: 2024-01-01T00:00:00Z } + - { resource: volumev2/capacity, commitment_durations: [ 1 year, 2 years, 3 years ], commitment_min_confirm_date: 2024-01-01T00:00:00Z } # an Ironic flavor has been renamed from "thebigbox" to "baremetal_large" - { resource: compute/instances_baremetal_large, identity_in_v1_api: compute/instances_thebigbox } ``` diff --git a/internal/api/api_test.go b/internal/api/api_test.go index 4d2d75e7..21a021c6 100644 --- a/internal/api/api_test.go +++ b/internal/api/api_test.go @@ -106,10 +106,6 @@ const ( - resource: 'shared/(capacity|things)$' commitment_durations: ["1 hour", "2 hours"] commitment_min_confirm_date: '1970-01-08T00:00:00Z' # one week after start of mock.Clock - - resource: 'shared/capacity$' - commitment_is_az_aware: true - - resource: shared/things - commitment_is_az_aware: false ` ) diff --git a/internal/api/commitment.go b/internal/api/commitment.go index 737bad81..d516e7b9 100644 --- a/internal/api/commitment.go +++ b/internal/api/commitment.go @@ -32,6 +32,7 @@ import ( "github.com/sapcc/go-api-declarations/cadf" "github.com/sapcc/go-api-declarations/limes" limesresources "github.com/sapcc/go-api-declarations/limes/resources" + "github.com/sapcc/go-api-declarations/liquid" "github.com/sapcc/go-bits/audittools" "github.com/sapcc/go-bits/gopherpolicy" "github.com/sapcc/go-bits/httpapi" @@ -234,18 +235,19 @@ func (p *v1Provider) parseAndValidateCommitmentRequest(w http.ResponseWriter, r return nil, nil, nil } behavior := p.Cluster.BehaviorForResource(dbServiceType, dbResourceName) + resInfo := p.Cluster.InfoForResource(dbServiceType, dbResourceName) if len(behavior.CommitmentDurations) == 0 { http.Error(w, "commitments are not enabled for this resource", http.StatusUnprocessableEntity) return nil, nil, nil } - if behavior.CommitmentIsAZAware { - if !slices.Contains(p.Cluster.Config.AvailabilityZones, req.AvailabilityZone) { - http.Error(w, "no such availability zone", http.StatusUnprocessableEntity) + if resInfo.Topology == liquid.FlatResourceTopology { + if req.AvailabilityZone != limes.AvailabilityZoneAny { + http.Error(w, `resource does not accept AZ-aware commitments, so the AZ must be set to "any"`, http.StatusUnprocessableEntity) return nil, nil, nil } } else { - if req.AvailabilityZone != limes.AvailabilityZoneAny { - http.Error(w, `resource does not accept AZ-aware commitments, so the AZ must be set to "any"`, http.StatusUnprocessableEntity) + if !slices.Contains(p.Cluster.Config.AvailabilityZones, req.AvailabilityZone) { + http.Error(w, "no such availability zone", http.StatusUnprocessableEntity) return nil, nil, nil } } diff --git a/internal/api/commitment_test.go b/internal/api/commitment_test.go index 3190da00..0ffa194d 100644 --- a/internal/api/commitment_test.go +++ b/internal/api/commitment_test.go @@ -25,10 +25,12 @@ import ( "testing" "time" + "github.com/sapcc/go-api-declarations/liquid" "github.com/sapcc/go-bits/assert" "github.com/sapcc/limes/internal/db" "github.com/sapcc/limes/internal/test" + "github.com/sapcc/limes/internal/test/plugins" ) const day = 24 * time.Hour @@ -47,10 +49,6 @@ const testCommitmentsYAML = ` - resource: first/.* commitment_durations: ["1 hour", "2 hours"] commitment_min_confirm_date: '1970-01-08T00:00:00Z' # one week after start of mock.Clock - - resource: first/things - commitment_is_az_aware: false - - resource: first/capacity - commitment_is_az_aware: true ` const testCommitmentsYAMLWithoutMinConfirmDate = ` availability_zones: [ az-one, az-two ] @@ -65,12 +63,6 @@ const testCommitmentsYAMLWithoutMinConfirmDate = ` # the resources in "first" have commitments, the ones in "second" do not - resource: second/.* commitment_durations: ["1 hour", "2 hours", "3 hours"] - - resource: second/things - commitment_is_az_aware: false - - resource: second/capacity - commitment_is_az_aware: true - - resource: second/capacity_portion - commitment_is_az_aware: true ` const testConvertCommitmentsYAML = ` @@ -95,10 +87,8 @@ const testConvertCommitmentsYAML = ` - resource: third/.* commitment_durations: ["1 hour", "2 hours"] - resource: first/capacity - commitment_is_az_aware: true commitment_conversion: {identifier: flavor1, weight: 48} - resource: second/capacity - commitment_is_az_aware: true commitment_conversion: {identifier: flavor1, weight: 32} - resource: third/capacity_c32 commitment_conversion: {identifier: flavor1, weight: 32} @@ -118,6 +108,10 @@ func TestCommitmentLifecycleWithDelayedConfirmation(t *testing.T) { test.WithConfig(testCommitmentsYAML), test.WithAPIHandler(NewV1API), ) + plugin := s.Cluster.QuotaPlugins["first"].(*plugins.GenericQuotaPlugin) + plugin2 := s.Cluster.QuotaPlugins["second"].(*plugins.GenericQuotaPlugin) + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZAwareResourceTopology}, "things": {Topology: liquid.FlatResourceTopology}} + plugin2.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZAwareResourceTopology}, "things": {Topology: liquid.FlatResourceTopology}} // GET returns an empty list if there are no commitments assert.HTTPRequest{ @@ -477,6 +471,9 @@ func TestPutCommitmentErrorCases(t *testing.T) { test.WithAPIHandler(NewV1API), ) + plugin := s.Cluster.QuotaPlugins["first"].(*plugins.GenericQuotaPlugin) + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"things": {Topology: liquid.FlatResourceTopology}} + request := assert.JSONObject{ "service_type": "first", "resource_name": "capacity", diff --git a/internal/collector/capacity_scrape.go b/internal/collector/capacity_scrape.go index be47f325..ff9f0347 100644 --- a/internal/collector/capacity_scrape.go +++ b/internal/collector/capacity_scrape.go @@ -361,6 +361,7 @@ func (c *Collector) processCapacityScrapeTask(ctx context.Context, task capacity func (c *Collector) confirmPendingCommitmentsIfNecessary(serviceType db.ServiceType, resourceName liquid.ResourceName) error { behavior := c.Cluster.BehaviorForResource(serviceType, resourceName) + resInfo := c.Cluster.InfoForResource(serviceType, resourceName) now := c.MeasureTime() // do not run ConfirmPendingCommitments if commitments are not enabled (or not live yet) for this resource @@ -378,7 +379,7 @@ func (c *Collector) confirmPendingCommitmentsIfNecessary(serviceType db.ServiceT defer sqlext.RollbackUnlessCommitted(tx) committableAZs := c.Cluster.Config.AvailabilityZones - if !behavior.CommitmentIsAZAware { + if resInfo.Topology == liquid.FlatResourceTopology { committableAZs = []liquid.AvailabilityZone{liquid.AvailabilityZoneAny} } for _, az := range committableAZs { diff --git a/internal/collector/capacity_scrape_test.go b/internal/collector/capacity_scrape_test.go index bb4f3428..e1eb0cae 100644 --- a/internal/collector/capacity_scrape_test.go +++ b/internal/collector/capacity_scrape_test.go @@ -126,7 +126,7 @@ const ( - second/things resource_behavior: # enable commitments for the */capacity resources - - { resource: '.*/capacity', commitment_durations: [ '1 hour', '10 days' ], commitment_is_az_aware: true } + - { resource: '.*/capacity', commitment_durations: [ '1 hour', '10 days' ] } # test that overcommit factor is considered when confirming commitments - { resource: first/capacity, overcommit_factor: 10.0 } quota_distribution_configs: diff --git a/internal/collector/commitment_cleanup_test.go b/internal/collector/commitment_cleanup_test.go index cdd10240..20f24729 100644 --- a/internal/collector/commitment_cleanup_test.go +++ b/internal/collector/commitment_cleanup_test.go @@ -41,7 +41,7 @@ const ( type: --test-generic resource_behavior: # enable commitments for the */capacity resources - - { resource: '.*/capacity', commitment_durations: [ '1 day', '3 years' ], commitment_is_az_aware: true } + - { resource: '.*/capacity', commitment_durations: [ '1 day', '3 years' ] } ` ) diff --git a/internal/collector/scrape.go b/internal/collector/scrape.go index f37f62d4..53495ef6 100644 --- a/internal/collector/scrape.go +++ b/internal/collector/scrape.go @@ -208,17 +208,29 @@ func (c *Collector) writeResourceScrapeResult(dbDomain db.Domain, dbProject db.P srv := task.Service for resName, resData := range resourceData { + resInfo := c.Cluster.InfoForResource(task.Service.Type, resName) if len(resData.UsageData) == 0 { // ensure that there is at least one ProjectAZResource for each ProjectResource resData.UsageData = core.InAnyAZ(core.UsageData{Usage: 0}) resourceData[resName] = resData } else { - // for AZ-aware resources, ensure that we also have a ProjectAZResource in - // "any", because ApplyComputedProjectQuota needs somewhere to write base - // quotas into if enabled - _, exists := resData.UsageData[liquid.AvailabilityZoneAny] - if !exists { - resData.UsageData[liquid.AvailabilityZoneAny] = &core.UsageData{Usage: 0} + // AZ separated resources will not include "any" AZ. The basequota will be distributed towards the existing AZs. + // If an AZ is not available within the scrape response, it will be created to store the basequota. + if resInfo.Topology == liquid.AZSeparatedResourceTopology { + for _, availabilityZone := range c.Cluster.Config.AvailabilityZones { + _, exists := resData.UsageData[availabilityZone] + if !exists { + resData.UsageData[availabilityZone] = &core.UsageData{Usage: 0} + } + } + } else { + // for AZ-aware resources, ensure that we also have a ProjectAZResource in + // "any", because ApplyComputedProjectQuota needs somewhere to write base + // quotas into if enabled + _, exists := resData.UsageData[liquid.AvailabilityZoneAny] + if !exists { + resData.UsageData[liquid.AvailabilityZoneAny] = &core.UsageData{Usage: 0} + } } } } @@ -303,6 +315,14 @@ func (c *Collector) writeResourceScrapeResult(dbDomain db.Domain, dbProject db.P azRes.Usage = data.Usage azRes.PhysicalUsage = data.PhysicalUsage + // set AZ backend quota. + resInfo := c.Cluster.InfoForResource(srv.Type, res.Name) + if resInfo.Topology == liquid.AZSeparatedResourceTopology && resInfo.HasQuota { + azRes.BackendQuota = data.Quota + } else { + azRes.BackendQuota = nil + } + // warn when the backend is inconsistent with itself if data.Subresources != nil && uint64(len(data.Subresources)) != data.Usage { logg.Info("resource quantity mismatch in project %s, resource %s/%s, AZ %s: usage = %d, but found %d subresources", diff --git a/internal/collector/scrape_test.go b/internal/collector/scrape_test.go index d36ee371..05ff6b23 100644 --- a/internal/collector/scrape_test.go +++ b/internal/collector/scrape_test.go @@ -21,6 +21,7 @@ package collector import ( "database/sql" + "errors" "net/http" "regexp" "testing" @@ -582,3 +583,175 @@ func Test_ScrapeReturnsNoUsageData(t *testing.T) { scrapedAt.Unix(), scrapedAt.Add(scrapeInterval).Unix(), ) } + +func Test_TopologyScrapes(t *testing.T) { + s := test.NewSetup(t, + test.WithConfig(testScrapeBasicConfigYAML), + ) + prepareDomainsAndProjectsForScrape(t, s) + + c := getCollector(t, s) + job := c.ResourceScrapeJob(s.Registry) + withLabel := jobloop.WithLabel("service_type", "unittest") + syncJob := c.SyncQuotaToBackendJob(s.Registry) + plugin := s.Cluster.QuotaPlugins["unittest"].(*plugins.GenericQuotaPlugin) + + tr, tr0 := easypg.NewTracker(t, s.DB.Db) + tr0.AssertEqualToFile("fixtures/scrape0.sql") + + // positive: Sync az-separated quota values with the backend + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZSeparatedResourceTopology}, "things": {Topology: liquid.AZSeparatedResourceTopology}} + plugin.ReportedAZs = map[liquid.AvailabilityZone]struct{}{"az-one": {}, "az-two": {}} + mustT(t, job.ProcessOne(s.Ctx, withLabel)) + mustT(t, job.ProcessOne(s.Ctx, withLabel)) + + scrapedAt1 := s.Clock.Now().Add(-5 * time.Second) + scrapedAt2 := s.Clock.Now() + tr.DBChanges().AssertEqualf(` + INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (1, 1, 'az-one', 0, 0, '{"t":[%[1]d],"v":[0]}', 50); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (10, 5, 'any', 0, '{"t":[%[3]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (11, 5, 'az-one', 0, '{"t":[%[3]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (12, 5, 'az-two', 0, '{"t":[%[3]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (13, 6, 'az-one', 2, '[{"index":0},{"index":1}]', '{"t":[%[3]d],"v":[2]}', 21); + INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (14, 6, 'az-two', 2, '[{"index":2},{"index":3}]', '{"t":[%[3]d],"v":[2]}', 21); + INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (2, 1, 'az-two', 0, 0, '{"t":[%[1]d],"v":[0]}', 50); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (3, 2, 'any', 0, '{"t":[%[1]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (4, 2, 'az-one', 0, '{"t":[%[1]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (5, 2, 'az-two', 0, '{"t":[%[1]d],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (6, 3, 'az-one', 2, '[{"index":0},{"index":1}]', '{"t":[%[1]d],"v":[2]}', 21); + INSERT INTO project_az_resources (id, resource_id, az, usage, subresources, historical_usage, backend_quota) VALUES (7, 3, 'az-two', 2, '[{"index":2},{"index":3}]', '{"t":[%[1]d],"v":[2]}', 21); + INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (8, 4, 'az-one', 0, 0, '{"t":[%[3]d],"v":[0]}', 50); + INSERT INTO project_az_resources (id, resource_id, az, usage, physical_usage, historical_usage, backend_quota) VALUES (9, 4, 'az-two', 0, 0, '{"t":[%[3]d],"v":[0]}', 50); + INSERT INTO project_resources (id, service_id, name, quota, backend_quota) VALUES (1, 1, 'capacity', 0, 100); + INSERT INTO project_resources (id, service_id, name) VALUES (2, 1, 'capacity_portion'); + INSERT INTO project_resources (id, service_id, name, quota, backend_quota) VALUES (3, 1, 'things', 0, 42); + INSERT INTO project_resources (id, service_id, name, quota, backend_quota) VALUES (4, 2, 'capacity', 0, 100); + INSERT INTO project_resources (id, service_id, name) VALUES (5, 2, 'capacity_portion'); + INSERT INTO project_resources (id, service_id, name, quota, backend_quota) VALUES (6, 2, 'things', 0, 42); + UPDATE project_services SET scraped_at = %[1]d, scrape_duration_secs = 5, serialized_metrics = '{"capacity_usage":0,"things_usage":4}', checked_at = %[1]d, next_scrape_at = %[2]d, quota_desynced_at = %[1]d WHERE id = 1 AND project_id = 1 AND type = 'unittest'; + UPDATE project_services SET scraped_at = %[3]d, scrape_duration_secs = 5, serialized_metrics = '{"capacity_usage":0,"things_usage":4}', checked_at = %[3]d, next_scrape_at = %[4]d, quota_desynced_at = %[3]d WHERE id = 2 AND project_id = 2 AND type = 'unittest'; + `, + scrapedAt1.Unix(), scrapedAt1.Add(scrapeInterval).Unix(), + scrapedAt2.Unix(), scrapedAt2.Add(scrapeInterval).Unix(), + ) + + // set some quota acpq values. + // resource level + _, err := s.DB.Exec(`UPDATE project_resources SET quota = $1 WHERE name = $2`, 20, "capacity") + if err != nil { + t.Fatal(err) + } + _, err = s.DB.Exec(`UPDATE project_resources SET quota = $1 WHERE name = $2`, 13, "things") + if err != nil { + t.Fatal(err) + } + // az level + _, err = s.DB.Exec(`UPDATE project_az_resources SET quota = $1 WHERE resource_id IN (1,4) and az != 'any'`, 20) + if err != nil { + t.Fatal(err) + } + _, err = s.DB.Exec(`UPDATE project_az_resources SET quota = $1 WHERE resource_id IN (3,6) and az != 'any'`, 13) + if err != nil { + t.Fatal(err) + } + tr.DBChanges().Ignore() + + mustT(t, syncJob.ProcessOne(s.Ctx, withLabel)) + mustT(t, syncJob.ProcessOne(s.Ctx, withLabel)) + + tr.DBChanges().AssertEqualf(` + UPDATE project_az_resources SET backend_quota = 20 WHERE id = 1 AND resource_id = 1 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 13 WHERE id = 13 AND resource_id = 6 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 13 WHERE id = 14 AND resource_id = 6 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 20 WHERE id = 2 AND resource_id = 1 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 13 WHERE id = 6 AND resource_id = 3 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 13 WHERE id = 7 AND resource_id = 3 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 20 WHERE id = 8 AND resource_id = 4 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 20 WHERE id = 9 AND resource_id = 4 AND az = 'az-two'; + UPDATE project_resources SET backend_quota = 20 WHERE id = 1 AND service_id = 1 AND name = 'capacity'; + UPDATE project_resources SET backend_quota = 13 WHERE id = 3 AND service_id = 1 AND name = 'things'; + UPDATE project_resources SET backend_quota = 20 WHERE id = 4 AND service_id = 2 AND name = 'capacity'; + UPDATE project_resources SET backend_quota = 13 WHERE id = 6 AND service_id = 2 AND name = 'things'; + UPDATE project_services SET quota_desynced_at = NULL, quota_sync_duration_secs = 5 WHERE id = 1 AND project_id = 1 AND type = 'unittest'; + UPDATE project_services SET quota_desynced_at = NULL, quota_sync_duration_secs = 5 WHERE id = 2 AND project_id = 2 AND type = 'unittest'; + `) + + s.Clock.StepBy(scrapeInterval) + + // topology of a resource changes. Reset AZ-separated backend_quota + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZSeparatedResourceTopology}, "things": {Topology: liquid.AZAwareResourceTopology}} + mustT(t, job.ProcessOne(s.Ctx, withLabel)) + mustT(t, job.ProcessOne(s.Ctx, withLabel)) + + checkedAt1 := s.Clock.Now().Add(-5 * time.Second) + checkedAt2 := s.Clock.Now() + tr.DBChanges().AssertEqualf(` + UPDATE project_az_resources SET backend_quota = 50 WHERE id = 1 AND resource_id = 1 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = NULL WHERE id = 13 AND resource_id = 6 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = NULL WHERE id = 14 AND resource_id = 6 AND az = 'az-two'; + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (15, 3, 'any', 0, '{"t":[1825],"v":[0]}'); + INSERT INTO project_az_resources (id, resource_id, az, usage, historical_usage) VALUES (16, 6, 'any', 0, '{"t":[1830],"v":[0]}'); + UPDATE project_az_resources SET backend_quota = 50 WHERE id = 2 AND resource_id = 1 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = NULL WHERE id = 6 AND resource_id = 3 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = NULL WHERE id = 7 AND resource_id = 3 AND az = 'az-two'; + UPDATE project_az_resources SET backend_quota = 50 WHERE id = 8 AND resource_id = 4 AND az = 'az-one'; + UPDATE project_az_resources SET backend_quota = 50 WHERE id = 9 AND resource_id = 4 AND az = 'az-two'; + UPDATE project_services SET scraped_at = %[1]d, checked_at = %[1]d, next_scrape_at = %[2]d WHERE id = 1 AND project_id = 1 AND type = 'unittest'; + UPDATE project_services SET scraped_at = %[3]d, checked_at = %[3]d, next_scrape_at = %[4]d WHERE id = 2 AND project_id = 2 AND type = 'unittest'; + `, + checkedAt1.Unix(), checkedAt1.Add(scrapeInterval).Unix(), + checkedAt2.Unix(), checkedAt2.Add(scrapeInterval).Unix(), + ) + + s.Clock.StepBy(scrapeInterval) + // positive: missing AZ in resource report will be created by the scraper in order to assign basequota later. + // warning: any AZs will be removed, because resource things switches from AZAware to AZSeparated. + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZSeparatedResourceTopology}, "things": {Topology: liquid.AZSeparatedResourceTopology}} + delete(plugin.StaticResourceData["things"].UsageData, "az-two") + mustT(t, job.ProcessOne(s.Ctx, withLabel)) + mustT(t, job.ProcessOne(s.Ctx, withLabel)) + + checkedAt1 = s.Clock.Now().Add(-5 * time.Second) + checkedAt2 = s.Clock.Now() + tr.DBChanges().AssertEqualf(` + UPDATE project_az_resources SET backend_quota = 21 WHERE id = 13 AND resource_id = 6 AND az = 'az-one'; + UPDATE project_az_resources SET usage = 0, subresources = '', historical_usage = '{"t":[%[2]d,%[5]d],"v":[2,0]}' WHERE id = 14 AND resource_id = 6 AND az = 'az-two'; + DELETE FROM project_az_resources WHERE id = 15 AND resource_id = 3 AND az = 'any'; + DELETE FROM project_az_resources WHERE id = 16 AND resource_id = 6 AND az = 'any'; + UPDATE project_az_resources SET backend_quota = 21 WHERE id = 6 AND resource_id = 3 AND az = 'az-one'; + UPDATE project_az_resources SET usage = 0, subresources = '', historical_usage = '{"t":[%[1]d,%[3]d],"v":[2,0]}' WHERE id = 7 AND resource_id = 3 AND az = 'az-two'; + UPDATE project_services SET scraped_at = %[3]d, serialized_metrics = '{"capacity_usage":0,"things_usage":2}', checked_at = %[3]d, next_scrape_at = %[4]d WHERE id = 1 AND project_id = 1 AND type = 'unittest'; + UPDATE project_services SET scraped_at = %[5]d, serialized_metrics = '{"capacity_usage":0,"things_usage":2}', checked_at = %[5]d, next_scrape_at = %[6]d WHERE id = 2 AND project_id = 2 AND type = 'unittest'; + `, + scrapedAt1.Unix(), scrapedAt2.Unix(), + checkedAt1.Unix(), checkedAt1.Add(scrapeInterval).Unix(), + checkedAt2.Unix(), checkedAt2.Add(scrapeInterval).Unix(), + ) + + s.Clock.StepBy(scrapeInterval) + // negative: scrape with flat topology returns invalid AZs + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.FlatResourceTopology}} + plugin.ReportedAZs = map[liquid.AvailabilityZone]struct{}{"az-one": {}, "az-two": {}} + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with topology type: flat returned AZs: [az-one az-two]")) + + // negative: scrape with az-aware topology returns invalid any AZ + plugin.LiquidServiceInfo.Resources["capacity"] = liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology} + plugin.ReportedAZs = map[liquid.AvailabilityZone]struct{}{"any": {}} + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/dresden: service: unittest, resource: capacity: scrape with topology type: az-aware returned AZs: [any]")) + + s.Clock.StepBy(scrapeInterval) + // negative: scrape with az-separated topology returns invalid AZs any and unknown + plugin.LiquidServiceInfo.Resources["capacity"] = liquid.ResourceInfo{Topology: liquid.AZSeparatedResourceTopology} + plugin.ReportedAZs = map[liquid.AvailabilityZone]struct{}{"az-one": {}, "unknown": {}} + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with topology type: az-separated returned AZs: [az-one unknown]")) + + // negative: reject liquid initialization with invalid topologies + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: "invalidAZ1"}, "things": {Topology: "invalidAZ2"}} + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/dresden: invalid topology: invalidAZ1 on resource: capacity\ninvalid topology: invalidAZ2 on resource: things")) + + s.Clock.StepBy(scrapeInterval) + // negative: multiple resources with mismatching topology to AZ responses + plugin.LiquidServiceInfo.Resources = map[liquid.ResourceName]liquid.ResourceInfo{"capacity": {Topology: liquid.AZSeparatedResourceTopology}, "things": {Topology: liquid.AZSeparatedResourceTopology}} + plugin.ReportedAZs = map[liquid.AvailabilityZone]struct{}{"unknown": {}} + mustFailT(t, job.ProcessOne(s.Ctx, withLabel), errors.New("during resource scrape of project germany/berlin: service: unittest, resource: capacity: scrape with topology type: az-separated returned AZs: [unknown]\nservice: unittest, resource: things: scrape with topology type: az-separated returned AZs: [unknown]")) +} diff --git a/internal/collector/sync_quota_to_backend.go b/internal/collector/sync_quota_to_backend.go index 65643c13..470382e5 100644 --- a/internal/collector/sync_quota_to_backend.go +++ b/internal/collector/sync_quota_to_backend.go @@ -25,6 +25,7 @@ import ( "fmt" "time" + "github.com/lib/pq" "github.com/prometheus/client_golang/prometheus" "github.com/sapcc/go-api-declarations/liquid" "github.com/sapcc/go-bits/jobloop" @@ -93,15 +94,25 @@ func (c *Collector) processQuotaSyncTask(ctx context.Context, srv db.ProjectServ var ( quotaSyncSelectQuery = sqlext.SimplifyWhitespace(` - SELECT name, backend_quota, quota + SELECT id, name, backend_quota, quota FROM project_resources WHERE service_id = $1 AND quota IS NOT NULL `) + azQuotaSyncSelectQuery = sqlext.SimplifyWhitespace(` + SELECT az, backend_quota, quota + FROM project_az_resources + WHERE resource_id = $1 AND quota IS NOT NULL + `) quotaSyncMarkResourcesAsAppliedQuery = sqlext.SimplifyWhitespace(` UPDATE project_resources SET backend_quota = quota WHERE service_id = $1 `) + azQuotaSyncMarkResourcesAsAppliedQuery = sqlext.SimplifyWhitespace(` + UPDATE project_az_resources + SET backend_quota = quota + WHERE resource_id = ANY($1) + `) quotaSyncMarkServiceAsAppliedQuery = sqlext.SimplifyWhitespace(` UPDATE project_services SET quota_desynced_at = NULL, quota_sync_duration_secs = $2 @@ -123,14 +134,18 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, // collect backend quota values that we want to apply targetQuotasInDB := make(map[liquid.ResourceName]uint64) + targetAZQuotasInDB := make(map[liquid.ResourceName]map[liquid.AvailabilityZone]liquid.AZResourceQuotaRequest) needsApply := false + azSeparatedNeedsApply := false + var azSeparatedResourceIDs []db.ProjectResourceID err := sqlext.ForeachRow(c.DB, quotaSyncSelectQuery, []any{srv.ID}, func(rows *sql.Rows) error { var ( + resourceID db.ProjectResourceID resourceName liquid.ResourceName currentQuota *int64 targetQuota uint64 ) - err := rows.Scan(&resourceName, ¤tQuota, &targetQuota) + err := rows.Scan(&resourceID, &resourceName, ¤tQuota, &targetQuota) if err != nil { return err } @@ -138,21 +153,53 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, if currentQuota == nil || *currentQuota < 0 || uint64(*currentQuota) != targetQuota { needsApply = true } + + resInfo := c.Cluster.InfoForResource(srv.Type, resourceName) + if resInfo.Topology != liquid.AZSeparatedResourceTopology { + return nil + } + err = sqlext.ForeachRow(c.DB, azQuotaSyncSelectQuery, []any{resourceID}, func(rows *sql.Rows) error { + var ( + availabilityZone liquid.AvailabilityZone + currentAZQuota *int64 + targetAZQuota uint64 + ) + err := rows.Scan(&availabilityZone, ¤tAZQuota, &targetAZQuota) + if err != nil { + return err + } + // defense in depth: configured backend_quota for AZ any or unknown are not valid for the azSeparatedQuota topology. + if (availabilityZone == liquid.AvailabilityZoneAny || availabilityZone == liquid.AvailabilityZoneUnknown) && currentAZQuota != nil { + return fmt.Errorf("detected invalid AZ: %s for resource: %s with topology: %s has backend_quota: %v", availabilityZone, resourceName, resInfo.Topology, currentAZQuota) + } + azSeparatedResourceIDs = append(azSeparatedResourceIDs, resourceID) + if targetAZQuotasInDB[resourceName] == nil { + targetAZQuotasInDB[resourceName] = make(map[liquid.AvailabilityZone]liquid.AZResourceQuotaRequest) + } + targetAZQuotasInDB[resourceName][availabilityZone] = liquid.AZResourceQuotaRequest{Quota: targetAZQuota} + if currentAZQuota == nil || *currentAZQuota < 0 || uint64(*currentAZQuota) != targetAZQuota { + azSeparatedNeedsApply = true + } + return nil + }) + if err != nil { + return err + } return nil }) if err != nil { return fmt.Errorf("while collecting target quota values for %s backend: %w", srv.Type, err) } - if needsApply { + if needsApply || azSeparatedNeedsApply { // double-check that we only include quota values for resources that the backend currently knows about - targetQuotasForBackend := make(map[liquid.ResourceName]uint64) + targetQuotasForBackend := make(map[liquid.ResourceName]liquid.ResourceQuotaRequest) for resName, resInfo := range plugin.Resources() { if !resInfo.HasQuota { continue } //NOTE: If `targetQuotasInDB` does not have an entry for this resource, we will write 0 into the backend. - targetQuotasForBackend[resName] = targetQuotasInDB[resName] + targetQuotasForBackend[resName] = liquid.ResourceQuotaRequest{Quota: targetQuotasInDB[resName], PerAZ: targetAZQuotasInDB[resName]} } // apply quotas in backend @@ -172,6 +219,12 @@ func (c *Collector) performQuotaSync(ctx context.Context, srv db.ProjectService, if err != nil { return err } + if azSeparatedNeedsApply { + _, err = c.DB.Exec(azQuotaSyncMarkResourcesAsAppliedQuery, pq.Array(azSeparatedResourceIDs)) + if err != nil { + return err + } + } } finishedAt := c.MeasureTimeAtEnd() diff --git a/internal/core/data.go b/internal/core/data.go index 83a0fafb..7e3ed0ec 100644 --- a/internal/core/data.go +++ b/internal/core/data.go @@ -190,6 +190,7 @@ func (r ResourceData) AddLocalizedUsage(az limes.AvailabilityZone, usage uint64) // UsageData contains usage data for a single project resource. // It appears in type ResourceData. type UsageData struct { + Quota *int64 Usage uint64 PhysicalUsage *uint64 // only supported by some plugins Subresources []any // only if supported by plugin and enabled in config diff --git a/internal/core/plugin.go b/internal/core/plugin.go index 218b38bf..e731f046 100644 --- a/internal/core/plugin.go +++ b/internal/core/plugin.go @@ -141,7 +141,7 @@ type QuotaPlugin interface { // SetQuota updates the backend service's quotas for the given project in the // given domain to the values specified here. The map is guaranteed to contain // values for all resources defined by Resources(). - SetQuota(ctx context.Context, project KeystoneProject, quotas map[liquid.ResourceName]uint64) error + SetQuota(ctx context.Context, project KeystoneProject, quotaReq map[liquid.ResourceName]liquid.ResourceQuotaRequest) error // Rates returns metadata for all the rates that this plugin scrapes // from the backend service. diff --git a/internal/core/resource_behavior.go b/internal/core/resource_behavior.go index ecd73868..6b63f8b7 100644 --- a/internal/core/resource_behavior.go +++ b/internal/core/resource_behavior.go @@ -37,7 +37,6 @@ type ResourceBehavior struct { FullResourceNameRx regexpext.BoundedRegexp `yaml:"resource"` OvercommitFactor liquid.OvercommitFactor `yaml:"overcommit_factor"` CommitmentDurations []limesresources.CommitmentDuration `yaml:"commitment_durations"` - CommitmentIsAZAware bool `yaml:"commitment_is_az_aware"` CommitmentMinConfirmDate *time.Time `yaml:"commitment_min_confirm_date"` CommitmentUntilPercent *float64 `yaml:"commitment_until_percent"` CommitmentConversion CommitmentConversion `yaml:"commitment_conversion"` @@ -100,9 +99,6 @@ func (b *ResourceBehavior) Merge(other ResourceBehavior, fullResourceName string b.CommitmentMinConfirmDate = other.CommitmentMinConfirmDate } } - if other.CommitmentIsAZAware { - b.CommitmentIsAZAware = true - } if other.CommitmentUntilPercent != nil { if b.CommitmentUntilPercent == nil || *b.CommitmentUntilPercent > *other.CommitmentUntilPercent { b.CommitmentUntilPercent = other.CommitmentUntilPercent diff --git a/internal/datamodel/apply_computed_project_quota.go b/internal/datamodel/apply_computed_project_quota.go index da2c8ccf..4bddbcb6 100644 --- a/internal/datamodel/apply_computed_project_quota.go +++ b/internal/datamodel/apply_computed_project_quota.go @@ -47,7 +47,7 @@ var ( WHERE ps.type = $1 AND pr.name = $2 AND (pr.min_quota_from_backend IS NOT NULL OR pr.max_quota_from_backend IS NOT NULL OR pr.max_quota_from_outside_admin IS NOT NULL - OR pr.max_quota_from_local_admin IS NOT NULL + OR pr.max_quota_from_local_admin IS NOT NULL OR pr.override_quota_from_config IS NOT NULL) `) @@ -151,7 +151,8 @@ func ApplyComputedProjectQuota(serviceType db.ServiceType, resourceName liquid.R } // evaluate QD algorithm - target, allowsQuotaOvercommit := acpqComputeQuotas(stats, cfg, constraints) + // AZ separated basequota will be assigned to all available AZs + target, allowsQuotaOvercommit := acpqComputeQuotas(stats, cfg, constraints, resInfo) if logg.ShowDebug { // NOTE: The structs that contain pointers must be printed as JSON to actually show all values. logg.Debug("ACPQ for %s/%s: stats = %#v", serviceType, resourceName, stats) @@ -164,6 +165,7 @@ func ApplyComputedProjectQuota(serviceType db.ServiceType, resourceName liquid.R } // write new AZ quotas to database + servicesWithUpdatedQuota := make(map[db.ProjectServiceID]struct{}) err = sqlext.WithPreparedStatement(tx, acpqUpdateAZQuotaQuery, func(stmt *sql.Stmt) error { for az, azTarget := range target { for resourceID, projectTarget := range azTarget { @@ -171,6 +173,15 @@ func ApplyComputedProjectQuota(serviceType db.ServiceType, resourceName liquid.R if err != nil { return fmt.Errorf("in AZ %s in project resource %d: %w", az, resourceID, err) } + // AZSeparatedResourceTopology does not update resource quota. Therefore the service desync needs to be queued right here. + if resInfo.Topology == liquid.AZSeparatedResourceTopology { + var serviceID db.ProjectServiceID + err := tx.SelectOne(&serviceID, `SELECT service_id FROM project_resources WHERE id = $1`, resourceID) + if err != nil { + return fmt.Errorf("in project resource %d: %w", resourceID, err) + } + servicesWithUpdatedQuota[serviceID] = struct{}{} + } } } return nil @@ -186,8 +197,13 @@ func ApplyComputedProjectQuota(serviceType db.ServiceType, resourceName liquid.R quotasByResourceID[resourceID] += projectTarget.Allocated } } - servicesWithUpdatedQuota := make(map[db.ProjectServiceID]struct{}) + err = sqlext.WithPreparedStatement(tx, acpqUpdateProjectQuotaQuery, func(stmt *sql.Stmt) error { + // Skip resources with AZSeparatedResourceTopology. The quota scrape would receive a resource nil value, while ACPQ calculates qouta. + // This would lead to unnecessary quota syncs with the backend, because backendQuota != quota. + if resInfo.Topology == liquid.AZSeparatedResourceTopology { + return nil + } for resourceID, quota := range quotasByResourceID { var serviceID db.ProjectServiceID err := stmt.QueryRow(quota, resourceID).Scan(&serviceID) @@ -286,7 +302,7 @@ type acpqGlobalTarget map[limes.AvailabilityZone]acpqAZTarget // effects (reading the DB, writing the DB, setting quota in the backend). // This function is separate because most test cases work on this level. // The full ApplyComputedProjectQuota() function is tested during capacity scraping. -func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats, cfg core.AutogrowQuotaDistributionConfiguration, constraints map[db.ProjectResourceID]projectLocalQuotaConstraints) (target acpqGlobalTarget, allowsQuotaOvercommit map[limes.AvailabilityZone]bool) { +func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats, cfg core.AutogrowQuotaDistributionConfiguration, constraints map[db.ProjectResourceID]projectLocalQuotaConstraints, resInfo liquid.ResourceInfo) (target acpqGlobalTarget, allowsQuotaOvercommit map[limes.AvailabilityZone]bool) { // enumerate which project resource IDs and AZs are relevant // ("Relevant" AZs are all that have allocation stats available.) isProjectResourceID := make(map[db.ProjectResourceID]struct{}) @@ -300,7 +316,7 @@ func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats } } slices.Sort(allAZsInOrder) - if cfg.ProjectBaseQuota > 0 { + if cfg.ProjectBaseQuota > 0 && resInfo.Topology != liquid.AZSeparatedResourceTopology { // base quota is given out in the pseudo-AZ "any", so we need to calculate quota for "any", too isRelevantAZ[limes.AvailabilityZoneAny] = struct{}{} } @@ -321,7 +337,7 @@ func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats // in AZ-aware resources, quota for the pseudo-AZ "any" is backed by capacity // in all the real AZs, so it can only allow quota overcommit if all AZs do - if isAZAware { + if isAZAware && resInfo.Topology != liquid.AZSeparatedResourceTopology { allowsQuotaOvercommit[limes.AvailabilityZoneAny] = allRealAZsAllowQuotaOvercommit } @@ -374,7 +390,14 @@ func acpqComputeQuotas(stats map[limes.AvailabilityZone]clusterAZAllocationStats } } if sumOfLocalizedQuotas < cfg.ProjectBaseQuota { - target[limes.AvailabilityZoneAny][resourceID].Desired = cfg.ProjectBaseQuota - sumOfLocalizedQuotas + // AZ separated topology receives the basequota to all available AZs + if resInfo.Topology == liquid.AZSeparatedResourceTopology { + for az := range isRelevantAZ { + target[az][resourceID].Desired = cfg.ProjectBaseQuota + } + } else { + target[limes.AvailabilityZoneAny][resourceID].Desired = cfg.ProjectBaseQuota - sumOfLocalizedQuotas + } } } if !slices.Contains(allAZsInOrder, limes.AvailabilityZoneAny) { diff --git a/internal/datamodel/apply_computed_project_quota_test.go b/internal/datamodel/apply_computed_project_quota_test.go index ef48d17e..c8503e7d 100644 --- a/internal/datamodel/apply_computed_project_quota_test.go +++ b/internal/datamodel/apply_computed_project_quota_test.go @@ -24,6 +24,7 @@ import ( "testing" "github.com/sapcc/go-api-declarations/limes" + "github.com/sapcc/go-api-declarations/liquid" "github.com/sapcc/limes/internal/core" "github.com/sapcc/limes/internal/db" @@ -71,7 +72,7 @@ func TestACPQBasicWithoutAZAwareness(t *testing.T) { 405: {Allocated: 10}, 406: {Allocated: 10}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.FlatResourceTopology}) } } @@ -156,7 +157,69 @@ func TestACPQBasicWithAZAwareness(t *testing.T) { 406: {Allocated: 10}, 407: {Allocated: 5}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) + } +} + +func TestACPQBasicWithAZSeparated(t *testing.T) { + input := map[limes.AvailabilityZone]clusterAZAllocationStats{ + "az-one": { + Capacity: 200, + ProjectStats: map[db.ProjectResourceID]projectAZAllocationStats{ + // 401 and 402 are boring base cases with usage only in one AZ or both AZs, respectively + 401: constantUsage(20), + 402: constantUsage(20), + // 403 tests how growth multiplier follows historical usage + 403: {Usage: 30, MinHistoricalUsage: 28, MaxHistoricalUsage: 30}, + // 404 tests how historical usage limits quota shrinking + 404: {Usage: 5, MinHistoricalUsage: 5, MaxHistoricalUsage: 20}, + // 405 tests how commitment guarantees quota even with low usage, + // and also that usage in one AZ does not reflect commitments in another + 405: {Committed: 60, Usage: 10, MinHistoricalUsage: 8, MaxHistoricalUsage: 12}, + // 406 and 407 test the application of base quota in "any" + 406: constantUsage(0), + 407: constantUsage(2), + }, + }, + "az-two": { + Capacity: 200, + ProjectStats: map[db.ProjectResourceID]projectAZAllocationStats{ + 401: constantUsage(20), + 402: constantUsage(0), + 403: {Usage: 20, MinHistoricalUsage: 19, MaxHistoricalUsage: 20}, + 404: {Usage: 0, MinHistoricalUsage: 0, MaxHistoricalUsage: 15}, + 405: constantUsage(40), + 406: constantUsage(0), + 407: constantUsage(1), + }, + }, + } + cfg := core.AutogrowQuotaDistributionConfiguration{ + GrowthMultiplier: 1.2, + ProjectBaseQuota: 10, + } + + for _, cfg.AllowQuotaOvercommitUntilAllocatedPercent = range []float64{0, 10000} { + expectACPQResult(t, input, cfg, nil, acpqGlobalTarget{ + "az-one": { + 401: {Allocated: 24}, + 402: {Allocated: 24}, + 403: {Allocated: 33}, // 28 * 1.2 = 33.6 + 404: {Allocated: 20}, + 405: {Allocated: 72}, // 60 * 1.2 = 72 + 406: {Allocated: 10}, // Basequota + 407: {Allocated: 10}, // Basequota + }, + "az-two": { + 401: {Allocated: 24}, + 402: {Allocated: 0}, + 403: {Allocated: 22}, // 19 * 1.2 = 22.8 + 404: {Allocated: 15}, + 405: {Allocated: 48}, // 40 * 1.2 = 48 + 406: {Allocated: 10}, // Basequota + 407: {Allocated: 10}, // Basequota + }, + }, liquid.ResourceInfo{Topology: liquid.AZSeparatedResourceTopology}) } } @@ -199,7 +262,7 @@ func TestACPQCapacityLimitsQuotaAllocation(t *testing.T) { 404: {Allocated: 5}, 405: {Allocated: 5}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.FlatResourceTopology}) // Stage 2: There is enough capacity for the minimum quotas, but not for the // desired quotas. @@ -218,7 +281,7 @@ func TestACPQCapacityLimitsQuotaAllocation(t *testing.T) { 404: {Allocated: 0}, 405: {Allocated: 0}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.FlatResourceTopology}) // Stage 3: There is enough capacity for the hard minimum quotas, but not for // the soft minimum quotas. @@ -237,7 +300,7 @@ func TestACPQCapacityLimitsQuotaAllocation(t *testing.T) { 404: {Allocated: 0}, 405: {Allocated: 0}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.FlatResourceTopology}) // Stage 4: Capacity is SOMEHOW not even enough for the hard minimum quotas. input["any"] = clusterAZAllocationStats{ @@ -255,7 +318,7 @@ func TestACPQCapacityLimitsQuotaAllocation(t *testing.T) { 404: {Allocated: 0}, 405: {Allocated: 0}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.FlatResourceTopology}) } func TestACPQQuotaOvercommitTurnsOffAboveAllocationThreshold(t *testing.T) { @@ -306,7 +369,7 @@ func TestACPQQuotaOvercommitTurnsOffAboveAllocationThreshold(t *testing.T) { 404: {Allocated: 10}, 405: {Allocated: 10}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) // test with quota overcommit forbidden (85% allocation is above 80%) cfg.AllowQuotaOvercommitUntilAllocatedPercent = 80 @@ -326,7 +389,7 @@ func TestACPQQuotaOvercommitTurnsOffAboveAllocationThreshold(t *testing.T) { 404: {}, 405: {}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) } func TestACPQWithProjectLocalQuotaConstraints(t *testing.T) { @@ -374,7 +437,7 @@ func TestACPQWithProjectLocalQuotaConstraints(t *testing.T) { 401: {Allocated: 36}, 402: {Allocated: 16}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) // test with MinQuota constraints // @@ -400,7 +463,7 @@ func TestACPQWithProjectLocalQuotaConstraints(t *testing.T) { 401: {Allocated: 0}, 402: {Allocated: 16}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) // test with MaxQuota constraints that constrain the soft minimum (hard minimum is not constrainable) constraints = map[db.ProjectResourceID]projectLocalQuotaConstraints{ @@ -420,7 +483,7 @@ func TestACPQWithProjectLocalQuotaConstraints(t *testing.T) { 401: {Allocated: 0}, 402: {Allocated: 0}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) // test with MaxQuota constraints that constrain the base quota constraints = map[db.ProjectResourceID]projectLocalQuotaConstraints{ @@ -440,7 +503,7 @@ func TestACPQWithProjectLocalQuotaConstraints(t *testing.T) { 401: {Allocated: 26}, 402: {Allocated: 6}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) } func TestEmptyRegionDoesNotPrecludeQuotaOvercommit(t *testing.T) { @@ -530,7 +593,7 @@ func TestEmptyRegionDoesNotPrecludeQuotaOvercommit(t *testing.T) { 404: {Allocated: 5}, 405: {Allocated: 5}, }, - }) + }, liquid.ResourceInfo{Topology: liquid.AZAwareResourceTopology}) } // Shortcut to avoid repetition in projectAZAllocationStats literals. @@ -547,9 +610,9 @@ func withCommitted(committed uint64, stats projectAZAllocationStats) projectAZAl return stats } -func expectACPQResult(t *testing.T, input map[limes.AvailabilityZone]clusterAZAllocationStats, cfg core.AutogrowQuotaDistributionConfiguration, constraints map[db.ProjectResourceID]projectLocalQuotaConstraints, expected acpqGlobalTarget) { +func expectACPQResult(t *testing.T, input map[limes.AvailabilityZone]clusterAZAllocationStats, cfg core.AutogrowQuotaDistributionConfiguration, constraints map[db.ProjectResourceID]projectLocalQuotaConstraints, expected acpqGlobalTarget, resourceInfo liquid.ResourceInfo) { t.Helper() - actual, _ := acpqComputeQuotas(input, cfg, constraints) + actual, _ := acpqComputeQuotas(input, cfg, constraints, resourceInfo) // normalize away any left-over intermediate values for _, azTarget := range actual { for _, projectTarget := range azTarget { diff --git a/internal/db/migrations.go b/internal/db/migrations.go index a08ba781..ea5659fb 100644 --- a/internal/db/migrations.go +++ b/internal/db/migrations.go @@ -179,4 +179,12 @@ var sqlMigrations = map[string]string{ ALTER TABLE project_resources RENAME COLUMN max_quota_from_admin TO max_quota_from_outside_admin; `, + "046_az_backend_quota.down.sql": ` + ALTER TABLE project_az_resources + DROP COLUMN backend_quota; + `, + "046_az_backend_quota.up.sql": ` + ALTER TABLE project_az_resources + ADD COLUMN backend_quota BIGINT default NULL; + `, } diff --git a/internal/db/models.go b/internal/db/models.go index 335f457e..cd976ca9 100644 --- a/internal/db/models.go +++ b/internal/db/models.go @@ -138,6 +138,7 @@ type ProjectAZResource struct { ResourceID ProjectResourceID `db:"resource_id"` AvailabilityZone limes.AvailabilityZone `db:"az"` Quota *uint64 `db:"quota"` + BackendQuota *int64 `db:"backend_quota"` Usage uint64 `db:"usage"` PhysicalUsage *uint64 `db:"physical_usage"` SubresourcesJSON string `db:"subresources"` diff --git a/internal/plugins/capacity_liquid.go b/internal/plugins/capacity_liquid.go index 059154c3..b487cf45 100644 --- a/internal/plugins/capacity_liquid.go +++ b/internal/plugins/capacity_liquid.go @@ -72,6 +72,10 @@ func (p *liquidCapacityPlugin) Init(ctx context.Context, client *gophercloud.Pro return fmt.Errorf("cannot initialize ServiceClient for %s: %w", p.LiquidServiceType, err) } p.LiquidServiceInfo, err = p.LiquidClient.GetInfo(ctx) + if err != nil { + return err + } + err = CheckResourceTopologies(p.LiquidServiceInfo) return err } @@ -90,6 +94,19 @@ func (p *liquidCapacityPlugin) Scrape(ctx context.Context, backchannel core.Capa logg.Fatal("ServiceInfo version for %s changed from %d to %d; restarting now to reload ServiceInfo...", p.LiquidServiceType, p.LiquidServiceInfo.Version, resp.InfoVersion) } + resourceNames := SortedMapKeys(p.LiquidServiceInfo.Resources) + var errs []error + for _, resourceName := range resourceNames { + perAZ := resp.Resources[resourceName].PerAZ + topology := p.LiquidServiceInfo.Resources[resourceName].Topology + err := MatchLiquidReportToTopology(perAZ, topology) + if err != nil { + errs = append(errs, fmt.Errorf("resource: %s: %w", resourceName, err)) + } + } + if len(errs) > 0 { + return nil, nil, errors.Join(errs...) + } resultInService := make(map[liquid.ResourceName]core.PerAZ[core.CapacityData], len(p.LiquidServiceInfo.Resources)) for resName, resInfo := range p.LiquidServiceInfo.Resources { diff --git a/internal/plugins/liquid.go b/internal/plugins/liquid.go index 23e600e3..c6105de5 100644 --- a/internal/plugins/liquid.go +++ b/internal/plugins/liquid.go @@ -83,6 +83,10 @@ func (p *liquidQuotaPlugin) Init(ctx context.Context, client *gophercloud.Provid return fmt.Errorf("cannot initialize ServiceClient for liquid-%s: %w", serviceType, err) } p.LiquidServiceInfo, err = p.LiquidClient.GetInfo(ctx) + if err != nil { + return err + } + err = CheckResourceTopologies(p.LiquidServiceInfo) return err } @@ -119,9 +123,22 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro logg.Fatal("ServiceInfo version for %s changed from %d to %d; restarting now to reload ServiceInfo...", p.LiquidServiceType, p.LiquidServiceInfo.Version, resp.InfoVersion) } + resourceNames := SortedMapKeys(p.LiquidServiceInfo.Resources) + var errs []error + for _, resourceName := range resourceNames { + perAZ := resp.Resources[resourceName].PerAZ + topology := p.LiquidServiceInfo.Resources[resourceName].Topology + err := MatchLiquidReportToTopology(perAZ, topology) + if err != nil { + errs = append(errs, fmt.Errorf("resource: %s: %w", resourceName, err)) + } + } + if len(errs) > 0 { + return nil, nil, errors.Join(errs...) + } result = make(map[liquid.ResourceName]core.ResourceData, len(p.LiquidServiceInfo.Resources)) - for resName := range p.LiquidServiceInfo.Resources { + for resName, resInfo := range p.LiquidServiceInfo.Resources { resReport := resp.Resources[resName] if resReport == nil { return nil, nil, fmt.Errorf("missing report for resource %q", resName) @@ -142,6 +159,9 @@ func (p *liquidQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePro PhysicalUsage: azReport.PhysicalUsage, Subresources: castSliceToAny(azReport.Subresources), } + if resInfo.Topology == liquid.AZSeparatedResourceTopology && azReport.Quota != nil { + resData.UsageData[az].Quota = azReport.Quota + } } result[resName] = resData @@ -174,13 +194,8 @@ func castSliceToAny[T any](input []T) (output []any) { } // SetQuota implements the core.QuotaPlugin interface. -func (p *liquidQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]uint64) error { - req := liquid.ServiceQuotaRequest{ - Resources: make(map[liquid.ResourceName]liquid.ResourceQuotaRequest, len(quotas)), - } - for resName, quota := range quotas { - req.Resources[resName] = liquid.ResourceQuotaRequest{Quota: quota} - } +func (p *liquidQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotaReq map[liquid.ResourceName]liquid.ResourceQuotaRequest) error { + req := liquid.ServiceQuotaRequest{Resources: quotaReq} if p.LiquidServiceInfo.QuotaUpdateNeedsProjectMetadata { req.ProjectMetadata = project.ForLiquid() } diff --git a/internal/plugins/nova.go b/internal/plugins/nova.go index 81df22af..823adb1e 100644 --- a/internal/plugins/nova.go +++ b/internal/plugins/nova.go @@ -60,11 +60,11 @@ type novaPlugin struct { } var novaDefaultResources = map[liquid.ResourceName]liquid.ResourceInfo{ - "cores": {Unit: limes.UnitNone, HasQuota: true}, - "instances": {Unit: limes.UnitNone, HasQuota: true}, - "ram": {Unit: limes.UnitMebibytes, HasQuota: true}, - "server_groups": {Unit: limes.UnitNone, HasQuota: true}, - "server_group_members": {Unit: limes.UnitNone, HasQuota: true}, + "cores": {Unit: limes.UnitNone, HasQuota: true, Topology: liquid.AZAwareResourceTopology}, + "instances": {Unit: limes.UnitNone, HasQuota: true, Topology: liquid.AZAwareResourceTopology}, + "ram": {Unit: limes.UnitMebibytes, HasQuota: true, Topology: liquid.AZAwareResourceTopology}, + "server_groups": {Unit: limes.UnitNone, HasQuota: true, Topology: liquid.FlatResourceTopology}, + "server_group_members": {Unit: limes.UnitNone, HasQuota: true, Topology: liquid.FlatResourceTopology}, } func init() { @@ -121,6 +121,7 @@ func (p *novaPlugin) Init(ctx context.Context, provider *gophercloud.ProviderCli p.resources[liquid.ResourceName(resourceName)] = liquid.ResourceInfo{ Unit: unit, HasQuota: true, + Topology: liquid.AZAwareResourceTopology, } } @@ -134,6 +135,7 @@ func (p *novaPlugin) Init(ctx context.Context, provider *gophercloud.ProviderCli p.resources[resName] = liquid.ResourceInfo{ Unit: limes.UnitNone, HasQuota: true, + Topology: liquid.AZAwareResourceTopology, } } } @@ -345,11 +347,11 @@ func (p *novaPlugin) pooledResourceName(hwVersion string, base liquid.ResourceNa } // SetQuota implements the core.QuotaPlugin interface. -func (p *novaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]uint64) error { +func (p *novaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotaReq map[liquid.ResourceName]liquid.ResourceQuotaRequest) error { // translate Limes resource names for separate instance quotas into Nova quota names - novaQuotas := make(novaQuotaUpdateOpts, len(quotas)) - for resourceName, quota := range quotas { - novaQuotas[string(resourceName)] = quota + novaQuotas := make(novaQuotaUpdateOpts, len(quotaReq)) + for resourceName, request := range quotaReq { + novaQuotas[string(resourceName)] = request.Quota } return quotasets.Update(ctx, p.NovaV2, project.UUID, novaQuotas).Err diff --git a/internal/plugins/utils.go b/internal/plugins/utils.go index 405b21ce..944e9723 100644 --- a/internal/plugins/utils.go +++ b/internal/plugins/utils.go @@ -19,6 +19,71 @@ package plugins +import ( + "errors" + "fmt" + "maps" + "slices" + + "github.com/sapcc/go-api-declarations/liquid" + "github.com/sapcc/go-bits/logg" +) + func p2u64(val uint64) *uint64 { return &val } + +func SortedMapKeys[M map[K]V, K ~string, V any](mapToSort M) []K { + sortedKeys := slices.Collect(maps.Keys(mapToSort)) + slices.Sort(sortedKeys) + return sortedKeys +} + +func CheckResourceTopologies(serviceInfo liquid.ServiceInfo) (err error) { + var errs []error + resources := serviceInfo.Resources + + resourceNames := SortedMapKeys(resources) + for _, resourceName := range resourceNames { + topology := resources[resourceName].Topology + if topology == "" { + // several algorithms inside Limes depend on a topology being chosen, so we have to pick a default for now + // TODO: make this a fatal error once liquid-ceph has rolled out their Topology patch + logg.Error("missing topology on resource: %s (assuming %q)", resourceName, liquid.FlatResourceTopology) + resInfo := resources[resourceName] + resInfo.Topology = liquid.FlatResourceTopology + resources[resourceName] = resInfo + } + if !topology.IsValid() { + errs = append(errs, fmt.Errorf("invalid topology: %s on resource: %s", topology, resourceName)) + } + } + if len(errs) > 0 { + return errors.Join(errs...) + } + return +} + +func MatchLiquidReportToTopology[V any](perAZReport map[liquid.AvailabilityZone]V, topology liquid.ResourceTopology) (err error) { + _, anyExists := perAZReport[liquid.AvailabilityZoneAny] + _, unknownExists := perAZReport[liquid.AvailabilityZoneUnknown] + switch topology { + case liquid.FlatResourceTopology: + if len(perAZReport) == 1 && anyExists { + return + } + case liquid.AZAwareResourceTopology: + if len(perAZReport) > 0 && !anyExists { + return + } + case liquid.AZSeparatedResourceTopology: + if len(perAZReport) > 0 && !anyExists && !unknownExists { + return + } + case "": + return + } + + reportedAZs := SortedMapKeys(perAZReport) + return fmt.Errorf("scrape with topology type: %s returned AZs: %v", topology, reportedAZs) +} diff --git a/internal/test/plugins/quota_generic.go b/internal/test/plugins/quota_generic.go index 7059ecfe..1373b07c 100644 --- a/internal/test/plugins/quota_generic.go +++ b/internal/test/plugins/quota_generic.go @@ -34,6 +34,7 @@ import ( "github.com/sapcc/limes/internal/core" "github.com/sapcc/limes/internal/db" + "github.com/sapcc/limes/internal/plugins" ) func init() { @@ -44,38 +45,42 @@ func init() { // mostly reports static data and offers several controls to simulate failed // operations. type GenericQuotaPlugin struct { - ServiceType db.ServiceType `yaml:"-"` - StaticRateInfos map[liquid.RateName]liquid.RateInfo `yaml:"rate_infos"` - StaticResourceData map[liquid.ResourceName]*core.ResourceData `yaml:"-"` - StaticResourceAttributes map[liquid.ResourceName]map[string]any `yaml:"-"` - OverrideQuota map[string]map[liquid.ResourceName]uint64 `yaml:"-"` // first key is project UUID + ServiceType db.ServiceType `yaml:"-"` + LiquidServiceInfo liquid.ServiceInfo `yaml:"-"` + StaticRateInfos map[liquid.RateName]liquid.RateInfo `yaml:"rate_infos"` + StaticResourceData map[liquid.ResourceName]*core.ResourceData `yaml:"-"` + StaticResourceAttributes map[liquid.ResourceName]map[string]any `yaml:"-"` + OverrideQuota map[string]map[liquid.ResourceName]liquid.ResourceQuotaRequest `yaml:"-"` // first key is project UUID // behavior flags that can be set by a unit test - ScrapeFails bool `yaml:"-"` - SetQuotaFails bool `yaml:"-"` - MinQuota map[liquid.ResourceName]uint64 `yaml:"-"` - MaxQuota map[liquid.ResourceName]uint64 `yaml:"-"` + ReportedAZs map[liquid.AvailabilityZone]struct{} `yaml:"-"` + ScrapeFails bool `yaml:"-"` + SetQuotaFails bool `yaml:"-"` + MinQuota map[liquid.ResourceName]uint64 `yaml:"-"` + MaxQuota map[liquid.ResourceName]uint64 `yaml:"-"` } // Init implements the core.QuotaPlugin interface. func (p *GenericQuotaPlugin) Init(ctx context.Context, provider *gophercloud.ProviderClient, eo gophercloud.EndpointOpts, serviceType db.ServiceType) error { p.ServiceType = serviceType + thingsAZQuota := int64(21) + capacityAZQuota := int64(50) p.StaticResourceData = map[liquid.ResourceName]*core.ResourceData{ "things": { Quota: 42, UsageData: core.PerAZ[core.UsageData]{ - "az-one": {Usage: 2}, - "az-two": {Usage: 2}, + "az-one": {Usage: 2, Quota: &thingsAZQuota}, + "az-two": {Usage: 2, Quota: &thingsAZQuota}, }, }, "capacity": { Quota: 100, UsageData: core.PerAZ[core.UsageData]{ - "az-one": {Usage: 0}, - "az-two": {Usage: 0}, + "az-one": {Usage: 0, Quota: &capacityAZQuota}, + "az-two": {Usage: 0, Quota: &capacityAZQuota}, }, }, } - p.OverrideQuota = make(map[string]map[liquid.ResourceName]uint64) + p.OverrideQuota = make(map[string]map[liquid.ResourceName]liquid.ResourceQuotaRequest) return nil } @@ -95,9 +100,9 @@ func (p *GenericQuotaPlugin) ServiceInfo() core.ServiceInfo { // Resources implements the core.QuotaPlugin interface. func (p *GenericQuotaPlugin) Resources() map[liquid.ResourceName]liquid.ResourceInfo { result := map[liquid.ResourceName]liquid.ResourceInfo{ - "capacity": {Unit: limes.UnitBytes, HasQuota: true}, - "capacity_portion": {Unit: limes.UnitBytes, HasQuota: false}, // NOTE: This used to be `ContainedIn: "capacity"` before we removed support for this relation. - "things": {Unit: limes.UnitNone, HasQuota: true}, + "capacity": {Unit: limes.UnitBytes, HasQuota: true, Topology: p.LiquidServiceInfo.Resources["capacity"].Topology}, + "capacity_portion": {Unit: limes.UnitBytes, HasQuota: false, Topology: p.LiquidServiceInfo.Resources["capacity_portion"].Topology}, // NOTE: This used to be `ContainedIn: "capacity"` before we removed support for this relation. + "things": {Unit: limes.UnitNone, HasQuota: true, Topology: p.LiquidServiceInfo.Resources["things"].Topology}, } for resName, resInfo := range result { @@ -160,6 +165,28 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr return nil, nil, errors.New("Scrape failed as requested") } + if len(p.LiquidServiceInfo.Resources) > 0 { + err := plugins.CheckResourceTopologies(p.LiquidServiceInfo) + if err != nil { + return nil, nil, err + } + } + + if len(p.ReportedAZs) > 0 { + var errs []error + resourceNames := plugins.SortedMapKeys(p.LiquidServiceInfo.Resources) + for _, resourceName := range resourceNames { + topology := p.LiquidServiceInfo.Resources[resourceName].Topology + err := plugins.MatchLiquidReportToTopology(p.ReportedAZs, topology) + if err != nil { + errs = append(errs, fmt.Errorf("service: %s, resource: %s: %w", p.ServiceType, resourceName, err)) + } + } + if len(errs) > 0 { + return nil, nil, errors.Join(errs...) + } + } + result = make(map[liquid.ResourceName]core.ResourceData) for key, val := range p.StaticResourceData { copyOfVal := core.ResourceData{ @@ -167,6 +194,11 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr UsageData: val.UsageData.Clone(), } + // populate azSeparatedQuota + for az, data := range copyOfVal.UsageData { + data.Quota = val.UsageData[az].Quota + } + // test coverage for PhysicalUsage != Usage if key == "capacity" { for _, data := range copyOfVal.UsageData { @@ -201,7 +233,7 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr if exists { for resourceName, quota := range data { resData := result[resourceName] - resData.Quota = int64(quota) //nolint:gosec // uint64 -> int64 would only fail if quota is bigger than 2^63 + resData.Quota = int64(quota.Quota) //nolint:gosec // uint64 -> int64 would only fail if quota is bigger than 2^63 result[resourceName] = resData } } @@ -229,7 +261,7 @@ func (p *GenericQuotaPlugin) Scrape(ctx context.Context, project core.KeystonePr } // SetQuota implements the core.QuotaPlugin interface. -func (p *GenericQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]uint64) error { +func (p *GenericQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]liquid.ResourceQuotaRequest) error { if p.SetQuotaFails { return errors.New("SetQuota failed as requested") } diff --git a/internal/test/plugins/quota_noop.go b/internal/test/plugins/quota_noop.go index c2ce8f9c..dbd4734d 100644 --- a/internal/test/plugins/quota_noop.go +++ b/internal/test/plugins/quota_noop.go @@ -120,6 +120,6 @@ func (p *NoopQuotaPlugin) BuildServiceUsageRequest(project core.KeystoneProject, } // SetQuota implements the core.QuotaPlugin interface. -func (p *NoopQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]uint64) error { +func (p *NoopQuotaPlugin) SetQuota(ctx context.Context, project core.KeystoneProject, quotas map[liquid.ResourceName]liquid.ResourceQuotaRequest) error { return nil } diff --git a/main.go b/main.go index f86f7a74..b5378277 100644 --- a/main.go +++ b/main.go @@ -453,7 +453,7 @@ func taskTestSetQuota(ctx context.Context, cluster *core.Cluster, args []string) project := must.Return(findProjectForTesting(ctx, cluster, args[0])) quotaValueRx := regexp.MustCompile(`^([^=]+)=(\d+)$`) - quotaValues := make(map[liquid.ResourceName]uint64) + quotaValues := make(map[liquid.ResourceName]liquid.ResourceQuotaRequest) for _, arg := range args[2:] { match := quotaValueRx.FindStringSubmatch(arg) if match == nil { @@ -463,7 +463,7 @@ func taskTestSetQuota(ctx context.Context, cluster *core.Cluster, args []string) if err != nil { logg.Fatal(err.Error()) } - quotaValues[liquid.ResourceName(match[1])] = val + quotaValues[liquid.ResourceName(match[1])] = liquid.ResourceQuotaRequest{Quota: val} } must.Succeed(cluster.QuotaPlugins[serviceType].SetQuota(ctx, project, quotaValues))